source "drivers/staging/most/Kconfig"
+source "drivers/staging/imgtec/Kconfig"
+
endif # STAGING
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
obj-$(CONFIG_WILC1000) += wilc1000/
obj-$(CONFIG_MOST) += most/
+obj-$(CONFIG_POWERVR_ROGUE_N) += imgtec/
--- /dev/null
+config POWERVR_ADF_FBDEV
+ tristate "ADF driver for fbdev-only systems"
+ depends on ADF
+ depends on FB
+ help
+ Driver for systems with only fbdev video drivers.
+
+ Say Y here if your SoC has a pre-existing fbdev driver, but
+ no native ADF driver. This driver will wrap the fbdev driver
+ to provide minimal compatibility with ADF.
+
+source "drivers/staging/imgtec/rogue/Kconfig"
+
+source "drivers/staging/imgtec/apollo/Kconfig"
--- /dev/null
+obj-$(CONFIG_POWERVR_ADF_FBDEV) += powervr_adf_fbdev.o
+powervr_adf_fbdev-y += adf_common.o adf_fbdev.o
+ccflags-y += \
+ -include $(srctree)/drivers/staging/imgtec/config_kernel.h \
+ -I$(srctree)/$(src) \
+ -I$(srctree)/drivers/staging/android
+
+obj-y += rogue/
+obj-y += apollo/
--- /dev/null
+/*************************************************************************/ /*!
+@File adf_ext.h
+@Title IMG extension ioctls and ioctl packages for ADF
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* vi: set ts=8: */
+
+#ifndef __ADF_EXT_H__
+#define __ADF_EXT_H__
+
+#include <drm/drm.h>
+
+#define ADF_BUFFER_TRANSFORM_NONE_EXT (0 << 0)
+#define ADF_BUFFER_TRANSFORM_FLIP_H_EXT (1 << 0)
+#define ADF_BUFFER_TRANSFORM_FLIP_V_EXT (1 << 1)
+#define ADF_BUFFER_TRANSFORM_ROT_90_EXT (1 << 2)
+#define ADF_BUFFER_TRANSFORM_ROT_180_EXT ((1 << 0) + (1 << 1))
+#define ADF_BUFFER_TRANSFORM_ROT_270_EXT ((1 << 0) + (1 << 1) + (1 << 2))
+
+#define ADF_BUFFER_BLENDING_NONE_EXT 0
+#define ADF_BUFFER_BLENDING_PREMULT_EXT 1
+#define ADF_BUFFER_BLENDING_COVERAGE_EXT 2
+
+struct adf_buffer_config_ext {
+ /* Crop applied to surface (BEFORE transformation) */
+ struct drm_clip_rect crop;
+
+ /* Region of screen to display surface in (AFTER scaling) */
+ struct drm_clip_rect display;
+
+ /* Surface rotation / flip / mirror */
+ __u32 transform;
+
+ /* Alpha blending mode e.g. none / premult / coverage */
+ __u32 blend_type;
+
+ /* Plane alpha */
+ __u8 plane_alpha;
+ __u8 reserved[3];
+} __packed;
+
+struct adf_post_ext {
+ __u32 post_id;
+ struct adf_buffer_config_ext bufs_ext[];
+} __packed;
+
+struct adf_validate_config_ext {
+ __u32 n_interfaces;
+ __u32 __user *interfaces;
+
+ __u32 n_bufs;
+
+ struct adf_buffer_config __user *bufs;
+ struct adf_post_ext __user *post_ext;
+} __packed;
+
+#define ADF_IOCTL_NR_VALIDATE_IMG (ADF_IOCTL_NR_CUSTOM + 0)
+
+#define ADF_VALIDATE_CONFIG_EXT \
+ _IOW(ADF_IOCTL_TYPE, ADF_IOCTL_NR_VALIDATE_IMG, \
+ struct adf_validate_config_ext)
+
+#endif /* __ADF_EXT_H__ */
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "adf_common.h"
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/compat.h>
+#include <linux/bug.h>
+
+#include <video/adf_client.h>
+
+#ifdef DEBUG_VALIDATE
+#define val_dbg(dev, fmt, x...) dev_dbg(dev, fmt, x)
+#else
+#define val_dbg(dev, fmt, x...) do { } while (0)
+#endif
+
+static long validate(struct adf_device *dev,
+ struct adf_validate_config_ext __user *arg)
+{
+ struct adf_interface **intfs = NULL;
+ struct adf_validate_config_ext data;
+ struct adf_buffer *bufs = NULL;
+ struct adf_post post_cfg;
+ void *post_ext = NULL;
+ u32 post_ext_size;
+ void *driver_state;
+ int err = 0;
+ size_t i, j;
+
+ if (copy_from_user(&data, arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ if (data.n_interfaces > ADF_MAX_INTERFACES) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (data.n_bufs > ADF_MAX_BUFFERS) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ post_ext_size = sizeof(struct adf_post_ext) +
+ data.n_bufs * sizeof(struct adf_buffer_config_ext);
+
+ if (!access_ok(VERIFY_READ, data.bufs,
+ sizeof(*data.bufs) * data.n_bufs)) {
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ post_ext = kmalloc(post_ext_size, GFP_KERNEL);
+ if (!post_ext) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ if (!access_ok(VERIFY_READ, data.post_ext, post_ext_size)) {
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ if (copy_from_user(post_ext, data.post_ext, post_ext_size)) {
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ if (data.n_interfaces) {
+ if (!access_ok(VERIFY_READ, data.interfaces,
+ sizeof(*data.interfaces) *data.n_interfaces)) {
+ err = -EFAULT;
+ goto err_out;
+ }
+ intfs = kmalloc_array(data.n_interfaces, sizeof(intfs[0]),
+ GFP_KERNEL);
+ if (!intfs) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ for (i = 0; i < data.n_interfaces; i++) {
+ u32 intf_id;
+ if (get_user(intf_id, &data.interfaces[i])) {
+ err = -EFAULT;
+ goto err_out;
+ }
+ intfs[i] = idr_find(&dev->interfaces, intf_id);
+ if (!intfs[i]) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ }
+
+ if (data.n_bufs) {
+ bufs = kcalloc(data.n_bufs, sizeof(bufs[0]), GFP_KERNEL);
+ if (!bufs) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ for (i = 0; i < data.n_bufs; i++) {
+ struct adf_buffer_config config;
+
+ if (copy_from_user(&config, &data.bufs[i], sizeof(config))) {
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ memset(&bufs[i], 0, sizeof(bufs[i]));
+
+ if (config.n_planes > ADF_MAX_PLANES) {
+ err = -EINVAL;
+ goto err_import;
+ }
+
+ bufs[i].overlay_engine = idr_find(&dev->overlay_engines,
+ config.overlay_engine);
+ if (!bufs[i].overlay_engine) {
+ err = -ENOENT;
+ goto err_import;
+ }
+
+ bufs[i].w = config.w;
+ bufs[i].h = config.h;
+ bufs[i].format = config.format;
+
+ for (j = 0; j < config.n_planes; j++) {
+ bufs[i].dma_bufs[j] = dma_buf_get(config.fd[j]);
+ if (IS_ERR_OR_NULL(bufs[i].dma_bufs[j])) {
+ err = PTR_ERR(bufs[i].dma_bufs[j]);
+ bufs[i].dma_bufs[j] = NULL;
+ goto err_import;
+ }
+ bufs[i].offset[j] = config.offset[j];
+ bufs[i].pitch[j] = config.pitch[j];
+ }
+ bufs[i].n_planes = config.n_planes;
+
+ bufs[i].acquire_fence = NULL;
+ }
+
+ /* Fake up a post configuration to validate */
+ post_cfg.custom_data_size = post_ext_size;
+ post_cfg.custom_data = post_ext;
+ post_cfg.n_bufs = data.n_bufs;
+ post_cfg.bufs = bufs;
+
+ /* Mapping dma bufs is too expensive for validate, and we don't
+ * need to do it at the moment.
+ */
+ post_cfg.mappings = NULL;
+
+ err = dev->ops->validate(dev, &post_cfg, &driver_state);
+ if (err)
+ goto err_import;
+
+ /* For the validate ioctl, we don't need the driver state. If it
+ * was allocated, free it immediately.
+ */
+ if (dev->ops->state_free)
+ dev->ops->state_free(dev, driver_state);
+
+err_import:
+ for (i = 0; i < data.n_bufs; i++)
+ for (j = 0; j < ARRAY_SIZE(bufs[i].dma_bufs); j++)
+ if (bufs[i].dma_bufs[j])
+ dma_buf_put(bufs[i].dma_bufs[j]);
+err_out:
+ kfree(post_ext);
+ kfree(intfs);
+ kfree(bufs);
+ return err;
+}
+
+static long adf_img_ioctl_validate(struct adf_device *dev,
+struct adf_validate_config_ext __user *arg)
+{
+ int err;
+
+ if (!access_ok(VERIFY_READ, arg, sizeof(*arg))) {
+ err = -EFAULT;
+ goto err_out;
+ }
+ err = validate(dev, arg);
+err_out:
+ return err;
+}
+
+#ifdef CONFIG_COMPAT
+
+#define ADF_VALIDATE_CONFIG_EXT32 \
+ _IOW(ADF_IOCTL_TYPE, ADF_IOCTL_NR_VALIDATE_IMG, \
+ struct adf_validate_config_ext32)
+
+struct adf_validate_config_ext32 {
+ __u32 n_interfaces;
+ compat_uptr_t interfaces;
+
+ __u32 n_bufs;
+
+ compat_uptr_t bufs;
+ compat_uptr_t post_ext;
+} __packed;
+
+/* adf_validate_config_ext32 must map to the adf_validate_config_ext struct.
+ * Changes to struct adf_validate_config_ext will likely be needed to be
+ * mirrored in adf_validate_config_ext32, so put a sanity check here to try
+ * to notice if the size has changed from what's expected.
+ */
+
+static long adf_img_ioctl_validate_compat(struct adf_device *dev,
+ struct adf_validate_config_ext32 __user *arg_compat)
+{
+ struct adf_validate_config_ext arg;
+ int err = 0;
+
+ BUILD_BUG_ON_MSG(sizeof(struct adf_validate_config_ext) != 32,
+ "adf_validate_config_ext has unexpected size");
+
+ if (!access_ok(VERIFY_READ, arg_compat, sizeof(*arg_compat))) {
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ arg.n_interfaces = arg_compat->n_interfaces;
+ arg.interfaces = compat_ptr(arg_compat->interfaces);
+ arg.n_bufs = arg_compat->n_bufs;
+ arg.bufs = compat_ptr(arg_compat->bufs);
+ arg.post_ext = compat_ptr(arg_compat->post_ext);
+
+ err = validate(dev, &arg);
+err_out:
+ return err;
+}
+
+#endif /* CONFIG_COMPAT */
+
+long adf_img_ioctl(struct adf_obj *obj, unsigned int cmd, unsigned long arg)
+{
+ struct adf_device *dev =
+ (struct adf_device *)obj->parent;
+
+ switch (cmd) {
+ case ADF_VALIDATE_CONFIG_EXT:
+ return adf_img_ioctl_validate(dev,
+ (struct adf_validate_config_ext __user *)arg);
+#ifdef CONFIG_COMPAT
+ case ADF_VALIDATE_CONFIG_EXT32:
+ return adf_img_ioctl_validate_compat(dev,
+ (struct adf_validate_config_ext32 __user *)
+ compat_ptr(arg));
+#endif
+ }
+
+ return -ENOTTY;
+}
+
+/* Callers of this function should have taken the dev->client_lock */
+
+static struct adf_interface *
+get_interface_attached_to_overlay(struct adf_device *dev,
+ struct adf_overlay_engine *overlay)
+{
+ struct adf_interface *interface = NULL;
+ struct adf_attachment_list *entry;
+
+ /* We are open-coding adf_attachment_list_to_array. We can't use the
+ * adf_device_attachments helper because it takes the client lock,
+ * which is already held for calls to validate.
+ */
+ list_for_each_entry(entry, &dev->attached, head) {
+ /* If there are multiple interfaces attached to an overlay,
+ * this will return the last.
+ */
+ if (entry->attachment.overlay_engine == overlay)
+ interface = entry->attachment.interface;
+ }
+
+ return interface;
+}
+
+int adf_img_validate_simple(struct adf_device *dev, struct adf_post *cfg,
+ void **driver_state)
+{
+ struct adf_post_ext *post_ext = cfg->custom_data;
+ struct adf_overlay_engine *overlay;
+ struct adf_interface *interface;
+ struct adf_buffer *buffer;
+ int i = 0;
+ struct device *device = dev->dev;
+ size_t expected_custom_data_size;
+
+ /* "Null" flip handling */
+ if (cfg->n_bufs == 0)
+ return 0;
+
+ expected_custom_data_size = sizeof(struct adf_post_ext)
+ + cfg->n_bufs * sizeof(struct adf_buffer_config_ext);
+ if (cfg->custom_data_size != expected_custom_data_size) {
+ val_dbg(device, "Custom data size %zu not expected size %zu",
+ cfg->custom_data_size,
+ sizeof(struct adf_buffer_config_ext));
+ return -EINVAL;
+ }
+
+ if (cfg->n_bufs != 1) {
+ val_dbg(device, "Got %zu buffers in post. Should be 1.\n",
+ cfg->n_bufs);
+ return -EINVAL;
+ }
+
+ buffer = &cfg->bufs[0];
+ overlay = buffer->overlay_engine;
+ if (!overlay) {
+ dev_err(device, "Buffer without an overlay engine.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < overlay->ops->n_supported_formats; i++) {
+ if (buffer->format == overlay->ops->supported_formats[i])
+ break;
+ }
+
+ if (i == overlay->ops->n_supported_formats) {
+ char req_format_str[ADF_FORMAT_STR_SIZE];
+
+ adf_format_str(buffer->format, req_format_str);
+
+ val_dbg(device, "Unsupported buffer format %s.\n",
+ req_format_str);
+ return -EINVAL;
+ }
+
+ interface = get_interface_attached_to_overlay(dev, overlay);
+ if (!interface) {
+ dev_err(device, "No interface attached to overlay\n");
+ return -EINVAL;
+ }
+
+ if (buffer->w != interface->current_mode.hdisplay) {
+ val_dbg(device, "Buffer width %u is not expected %u.\n",
+ buffer->w, interface->current_mode.hdisplay);
+ return -EINVAL;
+ }
+
+ if (buffer->h != interface->current_mode.vdisplay) {
+ val_dbg(device, "Buffer height %u is not expected %u.\n",
+ buffer->h, interface->current_mode.vdisplay);
+ return -EINVAL;
+ }
+
+ if (buffer->n_planes != 1) {
+ val_dbg(device, "Buffer n_planes %u is not 1.\n",
+ buffer->n_planes);
+ return -EINVAL;
+ }
+
+ if (buffer->offset[0] != 0) {
+ val_dbg(device, "Buffer offset %u is not 0.\n",
+ buffer->offset[0]);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cfg->n_bufs; i++) {
+ struct adf_buffer_config_ext *buf_ext = &post_ext->bufs_ext[i];
+ u16 hdisplay = interface->current_mode.hdisplay;
+ u16 vdisplay = interface->current_mode.vdisplay;
+
+ if (buf_ext->crop.x1 != 0 ||
+ buf_ext->crop.y1 != 0 ||
+ buf_ext->crop.x2 != hdisplay ||
+ buf_ext->crop.y2 != vdisplay) {
+ val_dbg(device, "Buffer crop {%u,%u,%u,%u} not expected {%u,%u,%u,%u}.\n",
+ buf_ext->crop.x1, buf_ext->crop.y1,
+ buf_ext->crop.x2, buf_ext->crop.y2,
+ 0, 0, hdisplay, vdisplay);
+
+ /* Userspace might be emulating a lower resolution */
+ if (buf_ext->crop.x2 > hdisplay ||
+ buf_ext->crop.y2 > vdisplay)
+ return -EINVAL;
+ }
+
+ if (buf_ext->display.x1 != 0 ||
+ buf_ext->display.y1 != 0 ||
+ buf_ext->display.x2 != hdisplay ||
+ buf_ext->display.y2 != vdisplay) {
+ val_dbg(device, "Buffer display {%u,%u,%u,%u} not expected {%u,%u,%u,%u}.\n",
+ buf_ext->display.x1, buf_ext->display.y1,
+ buf_ext->display.x2, buf_ext->display.y2,
+ 0, 0, hdisplay, vdisplay);
+
+ /* Userspace might be emulating a lower resolution */
+ if (buf_ext->display.x2 > hdisplay ||
+ buf_ext->display.y2 > vdisplay)
+ return -EINVAL;
+ }
+
+ if (buf_ext->transform != ADF_BUFFER_TRANSFORM_NONE_EXT) {
+ val_dbg(device, "Buffer transform 0x%x not expected transform 0x%x.\n",
+ buf_ext->transform,
+ ADF_BUFFER_TRANSFORM_NONE_EXT);
+ return -EINVAL;
+ }
+
+ if (buf_ext->blend_type != ADF_BUFFER_BLENDING_PREMULT_EXT &&
+ buf_ext->blend_type != ADF_BUFFER_BLENDING_NONE_EXT) {
+ val_dbg(device, "Buffer blend type %u not supported.\n",
+ buf_ext->blend_type);
+ return -EINVAL;
+ }
+
+ if (buf_ext->plane_alpha != 0xff) {
+ val_dbg(device, "Buffer plane alpha %u not expected plane alpha %u.\n",
+ buf_ext->plane_alpha, 0xff);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+bool adf_img_buffer_sanity_check(const struct adf_interface *intf,
+ const struct adf_buffer *buf,
+ const struct adf_buffer_config_ext *buf_ext)
+{
+ struct device *dev = intf->base.parent->dev;
+ int plane;
+
+ if (buf->w == 0) {
+ dev_err(dev, "Buffer sanity failed: Zero width\n");
+ return false;
+ }
+ if (buf->h == 0) {
+ dev_err(dev, "Buffer sanity failed: Zero height\n");
+ return false;
+ }
+ if (buf->format == 0) {
+ dev_err(dev, "Buffer sanity failed: Zero format\n");
+ return false;
+ }
+ if (buf->pitch == 0) {
+ dev_err(dev, "Buffer sanity failed: Zero pitch\n");
+ return false;
+ }
+ if (buf->n_planes == 0) {
+ dev_err(dev, "Buffer sanity failed: Zero plane count\n");
+ return false;
+ }
+ if (buf->overlay_engine == NULL) {
+ dev_err(dev, "Buffer sanity failed: NULL assigned overlay\n");
+ return false;
+ }
+
+ for (plane = 0; plane < buf->n_planes; plane++) {
+ if (buf->dma_bufs[plane] == NULL) {
+ dev_err(dev, "Buffer sanity failed: NULL dma buf for plane %d\n",
+ plane);
+ return false;
+ }
+ if (buf->pitch[plane] == 0) {
+ dev_err(dev, "Buffer sanity failed: Zero pitch for plane %d\n",
+ plane);
+ return false;
+ }
+ /* The offset may be zero, so we can't check that here */
+ }
+
+ if (buf_ext->crop.x1 >= buf_ext->crop.x2 ||
+ buf_ext->crop.y1 >= buf_ext->crop.y2) {
+ dev_err(dev, "Buffer sanity failed: Invalid crop rect (%d,%d)(%d,%d)\n",
+ buf_ext->crop.x1, buf_ext->crop.y1,
+ buf_ext->crop.x2, buf_ext->crop.y2);
+ return false;
+ }
+
+ if (buf_ext->crop.x1 > buf->w ||
+ buf_ext->crop.x2 > buf->w ||
+ buf_ext->crop.y1 > buf->h ||
+ buf_ext->crop.y2 > buf->h) {
+ dev_err(dev, "Buffer sanity failed: Crop rect (%d,%d)(%d,%d) outside of %dx%d source buffer\n",
+ buf_ext->crop.x1, buf_ext->crop.y1,
+ buf_ext->crop.x2, buf_ext->crop.y2,
+ buf->w, buf->h);
+ return false;
+ }
+
+ if (buf_ext->display.x1 >= buf_ext->display.x2 ||
+ buf_ext->display.y1 >= buf_ext->display.y2) {
+ dev_err(dev, "Buffer sanity failed: Invalid display rect (%d,%d)(%d,%d)\n",
+ buf_ext->display.x1, buf_ext->display.y1,
+ buf_ext->display.x2, buf_ext->display.y2);
+ return false;
+ }
+
+ if (buf_ext->crop.x1 > buf->w ||
+ buf_ext->crop.x2 > buf->w ||
+ buf_ext->crop.y1 > buf->h ||
+ buf_ext->crop.y2 > buf->h) {
+ dev_err(dev, "Buffer sanity failed: Display rect (%d,%d)(%d,%d) outside of %dx%d current interface mode\n",
+ buf_ext->crop.x1, buf_ext->crop.y1,
+ buf_ext->crop.x2, buf_ext->crop.y2,
+ intf->current_mode.hdisplay,
+ intf->current_mode.vdisplay);
+ return false;
+ }
+
+ switch (buf_ext->transform) {
+ case ADF_BUFFER_TRANSFORM_NONE_EXT:
+ case ADF_BUFFER_TRANSFORM_FLIP_H_EXT:
+ case ADF_BUFFER_TRANSFORM_FLIP_V_EXT:
+ case ADF_BUFFER_TRANSFORM_ROT_90_EXT:
+ case ADF_BUFFER_TRANSFORM_ROT_180_EXT:
+ case ADF_BUFFER_TRANSFORM_ROT_270_EXT:
+ break;
+ default:
+ dev_err(dev, "Invalid transform 0x%x\n", buf_ext->transform);
+ return false;
+ }
+
+ switch (buf_ext->blend_type) {
+ case ADF_BUFFER_BLENDING_NONE_EXT:
+ case ADF_BUFFER_BLENDING_PREMULT_EXT:
+ case ADF_BUFFER_BLENDING_COVERAGE_EXT:
+ break;
+ default:
+ dev_err(dev, "Invalid blend type 0x%x\n", buf_ext->blend_type);
+ return false;
+ }
+ return true;
+}
+
+bool adf_img_rects_intersect(const struct drm_clip_rect *rect1,
+ const struct drm_clip_rect *rect2)
+{
+ if (rect1->x1 < rect2->x2 &&
+ rect1->x2 > rect2->x1 &&
+ rect1->y1 < rect2->y2 &&
+ rect1->y2 > rect2->y1)
+ return true;
+ return false;
+}
+
+
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <video/adf.h>
+#include <adf/adf_ext.h>
+
+long
+adf_img_ioctl(struct adf_obj *obj, unsigned int cmd, unsigned long arg);
+
+/* This validates a post config with a set of assumptions for simple display
+ * engines:
+ * - The config custom data is a struct adf_buffer_config_ext
+ * - There is a single interface with a single overlay attached
+ * - There is a single non-blended layer
+ * - There is a single full-screen buffer
+ * - The buffer is of a format supported by the overlay
+ */
+int
+adf_img_validate_simple(struct adf_device *dev, struct adf_post *cfg,
+ void **driver_state);
+
+/* This does a quick sanity check of the supplied buffer, returns true if it
+ * passes the sanity checks.
+ * The calling driver must still do any device-specific validation
+ * of the buffer arguments.
+ */
+bool
+adf_img_buffer_sanity_check(const struct adf_interface *intf,
+ const struct adf_buffer *buf,
+ const struct adf_buffer_config_ext *buf_ext);
+
+
+/* Returns true if the two clip rects intersect
+ */
+bool
+adf_img_rects_intersect(const struct drm_clip_rect *rect1,
+ const struct drm_clip_rect *rect2);
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <linux/console.h>
+#include <linux/dma-buf.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fb.h>
+
+#include <drm/drm_fourcc.h>
+
+#include <video/adf.h>
+#include <video/adf_fbdev.h>
+#include <video/adf_client.h>
+
+#include <adf/adf_ext.h>
+
+/* for sync_fence_put */
+#include PVR_ANDROID_SYNC_HEADER
+
+#include "adf_common.h"
+
+#ifndef CONFIG_FB
+#error adf_fbdev needs Linux framebuffer support. Enable it in your kernel.
+#endif
+
+MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+MODULE_LICENSE("Dual MIT/GPL");
+
+/* NOTE: This is just an example of how to use adf. You should NOT use this
+ * module in a production environment. It is meaningless to layer adf
+ * on top of fbdev, as adf is more flexible than fbdev and adf itself
+ * provides fbdev emulation. Do not use this implementation generally!
+ */
+
+#define DRVNAME "adf_fbdev"
+
+#define FALLBACK_REFRESH_RATE 60
+#define FALLBACK_DPI 160
+
+#if defined(ADF_FBDEV_NUM_PREFERRED_BUFFERS)
+#define NUM_PREFERRED_BUFFERS ADF_FBDEV_NUM_PREFERRED_BUFFERS
+#else
+#define NUM_PREFERRED_BUFFERS 3
+#endif
+
+struct adf_fbdev_dmabuf {
+ struct sg_table sg_table;
+ size_t offset;
+ size_t length;
+ void *vaddr;
+
+ /* Used for cleanup of dmabuf private data */
+ spinlock_t *alloc_lock;
+ u8 *alloc_mask;
+ u8 id;
+};
+
+struct adf_fbdev_device {
+ struct adf_device base;
+ struct fb_info *fb_info;
+ atomic_t refcount;
+};
+
+struct adf_fbdev_interface {
+ struct adf_interface base;
+ struct drm_mode_modeinfo fb_mode;
+ u16 width_mm, height_mm;
+ struct fb_info *fb_info;
+ spinlock_t alloc_lock;
+ u8 alloc_mask;
+};
+
+/* SIMPLE BUFFER MANAGER *****************************************************/
+
+/* Handle alloc/free from the fbdev carveout (fix.smem_start -> fix.smem_size)
+ * region. This simple allocator sets a bit in the alloc_mask when a buffer is
+ * owned by dmabuf. When the dmabuf ->release() is called, the alloc_mask bit
+ * is cleared and the adf_fbdev_dmabuf object is freed.
+ *
+ * Since dmabuf relies on sg_table/scatterlists, and hence struct page*, this
+ * code may have problems if your framebuffer uses memory that is not in the
+ * kernel's page tables.
+ */
+
+static struct adf_fbdev_dmabuf *
+adf_fbdev_alloc_buffer(struct adf_fbdev_interface *interface)
+{
+ struct adf_fbdev_dmabuf *fbdev_dmabuf;
+ struct scatterlist *sg;
+ size_t unitary_size;
+ struct page *page;
+ u32 offset = 0;
+ int i, err;
+ u32 id;
+
+ spin_lock(&interface->alloc_lock);
+
+ for (id = 0; id < NUM_PREFERRED_BUFFERS; id++) {
+ if (!(interface->alloc_mask & (1UL << id))) {
+ interface->alloc_mask |= (1UL << id);
+ break;
+ }
+ }
+
+ spin_unlock(&interface->alloc_lock);
+
+ if (id == NUM_PREFERRED_BUFFERS)
+ return ERR_PTR(-ENOMEM);
+
+ unitary_size = interface->fb_info->fix.line_length *
+ interface->fb_info->var.yres;
+
+ /* PAGE_SIZE alignment has been checked already, do NOT allow it
+ * through here. We are about to allocate an sg_list.
+ */
+ BUG_ON((unitary_size % PAGE_SIZE) != 0);
+
+ fbdev_dmabuf = kmalloc(sizeof(*fbdev_dmabuf), GFP_KERNEL);
+ if (!fbdev_dmabuf)
+ return ERR_PTR(-ENOMEM);
+
+ err = sg_alloc_table(&fbdev_dmabuf->sg_table, unitary_size / PAGE_SIZE,
+ GFP_KERNEL);
+ if (err) {
+ kfree(fbdev_dmabuf);
+ return ERR_PTR(err);
+ }
+
+ /* Increment the reference count of this module as long as the
+ * adb_fbdev_dmabuf object exists. This prevents this module from
+ * being unloaded if the buffer is passed around by dmabuf.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ pr_err("try_module_get(THIS_MODULE) failed");
+ kfree(fbdev_dmabuf);
+ return ERR_PTR(-EFAULT);
+ }
+
+ fbdev_dmabuf->offset = id * unitary_size;
+ fbdev_dmabuf->length = unitary_size;
+ fbdev_dmabuf->vaddr = interface->fb_info->screen_base +
+ fbdev_dmabuf->offset;
+
+ for_each_sg(fbdev_dmabuf->sg_table.sgl, sg,
+ fbdev_dmabuf->sg_table.nents, i) {
+ page = vmalloc_to_page(fbdev_dmabuf->vaddr + offset);
+ if (!page) {
+ pr_err("Failed to map fbdev vaddr to pages\n");
+ kfree(fbdev_dmabuf);
+ return ERR_PTR(-EFAULT);
+ }
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ offset += PAGE_SIZE;
+
+ /* Shadow what ion is doing currently to ensure sg_dma_address()
+ * is valid. This is not strictly correct as the dma address
+ * should only be valid after mapping (ownership changed), and
+ * we haven't mapped the scatter list yet.
+ */
+ sg_dma_address(sg) = sg_phys(sg);
+ }
+
+ fbdev_dmabuf->alloc_mask = &interface->alloc_mask;
+ fbdev_dmabuf->alloc_lock = &interface->alloc_lock;
+ fbdev_dmabuf->id = id;
+
+ return fbdev_dmabuf;
+}
+
+static void adf_fbdev_free_buffer(struct adf_fbdev_dmabuf *fbdev_dmabuf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(fbdev_dmabuf->alloc_lock, flags);
+ (*fbdev_dmabuf->alloc_mask) &= ~(1UL << fbdev_dmabuf->id);
+ spin_unlock_irqrestore(fbdev_dmabuf->alloc_lock, flags);
+
+ sg_free_table(&fbdev_dmabuf->sg_table);
+ kfree(fbdev_dmabuf);
+
+ module_put(THIS_MODULE);
+}
+
+/* DMA BUF LAYER *************************************************************/
+
+static struct sg_table *
+adf_fbdev_d_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct adf_fbdev_dmabuf *fbdev_dmabuf = attachment->dmabuf->priv;
+
+ return &fbdev_dmabuf->sg_table;
+}
+
+static void adf_fbdev_d_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ /* No-op */
+}
+
+static int adf_fbdev_d_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ unsigned long addr = vma->vm_start;
+ unsigned long remainder, len;
+ struct scatterlist *sg;
+ struct page *page;
+ u32 i;
+
+ for_each_sg(fbdev_dmabuf->sg_table.sgl, sg,
+ fbdev_dmabuf->sg_table.nents, i) {
+ page = sg_page(sg);
+ if (!page) {
+ pr_err("Failed to retrieve pages\n");
+ return -EFAULT;
+ }
+ remainder = vma->vm_end - addr;
+ len = sg_dma_len(sg);
+ if (offset >= sg_dma_len(sg)) {
+ offset -= sg_dma_len(sg);
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg_dma_len(sg) - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+
+ return 0;
+}
+
+static void adf_fbdev_d_release(struct dma_buf *dmabuf)
+{
+ adf_fbdev_free_buffer(dmabuf->priv);
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
+ !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
+
+static int
+adf_fbdev_d_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+ enum dma_data_direction dir)
+{
+ struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
+
+ if (start + len > fbdev_dmabuf->length)
+ return -EINVAL;
+ return 0;
+}
+
+static void adf_fbdev_d_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len, enum dma_data_direction dir)
+{
+ /* Framebuffer memory is cache coherent. No-op. */
+}
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) &&
+ !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318) */
+
+static void *
+adf_fbdev_d_kmap(struct dma_buf *dmabuf, unsigned long page_offset)
+{
+ struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
+ void *vaddr;
+
+ if (page_offset * PAGE_SIZE >= fbdev_dmabuf->length)
+ return ERR_PTR(-EINVAL);
+ vaddr = fbdev_dmabuf->vaddr + page_offset * PAGE_SIZE;
+ return vaddr;
+}
+
+static void
+adf_fbdev_d_kunmap(struct dma_buf *dmabuf, unsigned long page_offset,
+ void *ptr)
+{
+ /* No-op */
+}
+
+static void *adf_fbdev_d_vmap(struct dma_buf *dmabuf)
+{
+ struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
+
+ return fbdev_dmabuf->vaddr;
+}
+
+static void adf_fbdev_d_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ /* No-op */
+}
+
+static const struct dma_buf_ops adf_fbdev_dma_buf_ops = {
+ .map_dma_buf = adf_fbdev_d_map_dma_buf,
+ .unmap_dma_buf = adf_fbdev_d_unmap_dma_buf,
+ .mmap = adf_fbdev_d_mmap,
+ .release = adf_fbdev_d_release,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
+ !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
+ .begin_cpu_access = adf_fbdev_d_begin_cpu_access,
+ .end_cpu_access = adf_fbdev_d_end_cpu_access,
+#endif
+ .kmap_atomic = adf_fbdev_d_kmap,
+ .kunmap_atomic = adf_fbdev_d_kunmap,
+ .kmap = adf_fbdev_d_kmap,
+ .kunmap = adf_fbdev_d_kunmap,
+ .vmap = adf_fbdev_d_vmap,
+ .vunmap = adf_fbdev_d_vunmap,
+};
+
+/* ADF LAYER *****************************************************************/
+
+static u32 adf_fbdev_supported_format;
+
+static int adf_fbdev_validate(struct adf_device *dev, struct adf_post *cfg,
+ void **driver_state)
+{
+ int err = adf_img_validate_simple(dev, cfg, driver_state);
+
+ if (cfg->n_bufs == 0 || err != 0)
+ return err;
+
+ /* Everything checked out in the generic validation, but we
+ * additionally want to check that the dmabuf came from the
+ * adf_fbdev module, which the generic code can't check.
+ */
+ if (cfg->bufs[0].dma_bufs[0]->ops != &adf_fbdev_dma_buf_ops)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void adf_fbdev_post(struct adf_device *dev, struct adf_post *cfg,
+ void *driver_state)
+{
+ struct adf_fbdev_device *device = (struct adf_fbdev_device *)dev;
+ struct fb_var_screeninfo new_var = device->fb_info->var;
+ struct adf_fbdev_dmabuf *fbdev_dmabuf;
+ struct adf_buffer *buffer;
+ int err;
+
+ /* "Null" flip handling */
+ if (cfg->n_bufs == 0)
+ return;
+
+ if (!lock_fb_info(device->fb_info)) {
+ pr_err("Failed to lock fb_info structure.\n");
+ return;
+ }
+
+ console_lock();
+
+ buffer = &cfg->bufs[0];
+ fbdev_dmabuf = buffer->dma_bufs[0]->priv;
+ new_var.yoffset = new_var.yres * fbdev_dmabuf->id;
+
+ /* If we're supposed to be able to flip, but the yres_virtual has been
+ * changed to an unsupported (smaller) value, we need to change it back
+ * (this is a workaround for some Linux fbdev drivers that seem to lose
+ * any modifications to yres_virtual after a blank.)
+ */
+ if (new_var.yres_virtual < new_var.yres * NUM_PREFERRED_BUFFERS) {
+ new_var.activate = FB_ACTIVATE_NOW;
+ new_var.yres_virtual = new_var.yres * NUM_PREFERRED_BUFFERS;
+
+ err = fb_set_var(device->fb_info, &new_var);
+ if (err)
+ pr_err("fb_set_var failed (err=%d)\n", err);
+ } else {
+ err = fb_pan_display(device->fb_info, &new_var);
+ if (err)
+ pr_err("fb_pan_display failed (err=%d)\n", err);
+ }
+
+ console_unlock();
+
+ unlock_fb_info(device->fb_info);
+}
+
+static int
+adf_fbdev_open2(struct adf_obj *obj, struct inode *inode, struct file *file)
+{
+ struct adf_fbdev_device *dev =
+ (struct adf_fbdev_device *)obj->parent;
+ atomic_inc(&dev->refcount);
+ return 0;
+}
+
+static void
+adf_fbdev_release2(struct adf_obj *obj, struct inode *inode, struct file *file)
+{
+ struct adf_fbdev_device *dev =
+ (struct adf_fbdev_device *)obj->parent;
+ struct sync_fence *release_fence;
+
+ if (atomic_dec_return(&dev->refcount))
+ return;
+
+ /* This special "null" flip works around a problem with ADF
+ * which leaves buffers pinned by the display engine even
+ * after all ADF clients have closed.
+ *
+ * The "null" flip is pipelined like any other. The user won't
+ * be able to unload this module until it has been posted.
+ */
+ release_fence = adf_device_post(&dev->base, NULL, 0, NULL, 0, NULL, 0);
+ if (IS_ERR_OR_NULL(release_fence)) {
+ pr_err("Failed to queue null flip command (err=%d).\n",
+ (int)PTR_ERR(release_fence));
+ return;
+ }
+
+ sync_fence_put(release_fence);
+}
+
+static const struct adf_device_ops adf_fbdev_device_ops = {
+ .owner = THIS_MODULE,
+ .base = {
+ .open = adf_fbdev_open2,
+ .release = adf_fbdev_release2,
+ .ioctl = adf_img_ioctl,
+ },
+ .validate = adf_fbdev_validate,
+ .post = adf_fbdev_post,
+};
+
+static bool
+adf_fbdev_supports_event(struct adf_obj *obj, enum adf_event_type type)
+{
+ switch (type) {
+ case ADF_EVENT_VSYNC:
+ case ADF_EVENT_HOTPLUG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void
+adf_fbdev_set_event(struct adf_obj *obj, enum adf_event_type type,
+ bool enabled)
+{
+ switch (type) {
+ case ADF_EVENT_VSYNC:
+ case ADF_EVENT_HOTPLUG:
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int adf_fbdev_blank2(struct adf_interface *intf, u8 state)
+{
+ struct adf_fbdev_interface *interface =
+ (struct adf_fbdev_interface *)intf;
+ struct fb_info *fb_info = interface->fb_info;
+
+ if (!fb_info->fbops->fb_blank)
+ return -EOPNOTSUPP;
+
+ return fb_info->fbops->fb_blank(state, fb_info);
+}
+
+static int
+adf_fbdev_alloc_simple_buffer(struct adf_interface *intf, u16 w, u16 h,
+ u32 format, struct dma_buf **dma_buf,
+ u32 *offset, u32 *pitch)
+{
+ struct adf_fbdev_interface *interface =
+ (struct adf_fbdev_interface *)intf;
+ struct fb_var_screeninfo *var = &interface->fb_info->var;
+ struct adf_fbdev_dmabuf *fbdev_dmabuf;
+
+ if (w != var->xres) {
+ pr_err("Simple alloc request w=%u does not match w=%u.\n",
+ w, var->xres);
+ return -EINVAL;
+ }
+
+ if (h != var->yres) {
+ pr_err("Simple alloc request h=%u does not match h=%u.\n",
+ h, var->yres);
+ return -EINVAL;
+ }
+
+ if (format != adf_fbdev_supported_format) {
+ pr_err("Simple alloc request f=0x%x does not match f=0x%x.\n",
+ format, adf_fbdev_supported_format);
+ return -EINVAL;
+ }
+
+ fbdev_dmabuf = adf_fbdev_alloc_buffer(interface);
+ if (IS_ERR_OR_NULL(fbdev_dmabuf))
+ return PTR_ERR(fbdev_dmabuf);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ {
+ DEFINE_DMA_BUF_EXPORT_INFO(export_info);
+
+ export_info.ops = &adf_fbdev_dma_buf_ops;
+ export_info.size = fbdev_dmabuf->length;
+ export_info.flags = O_RDWR;
+ export_info.priv = fbdev_dmabuf;
+
+ *dma_buf = dma_buf_export(&export_info);
+ }
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
+ *dma_buf = dma_buf_export(fbdev_dmabuf, &adf_fbdev_dma_buf_ops,
+ fbdev_dmabuf->length, O_RDWR, NULL);
+#else
+ *dma_buf = dma_buf_export(fbdev_dmabuf, &adf_fbdev_dma_buf_ops,
+ fbdev_dmabuf->length, O_RDWR);
+#endif
+ if (IS_ERR(*dma_buf)) {
+ adf_fbdev_free_buffer(fbdev_dmabuf);
+ return PTR_ERR(*dma_buf);
+ }
+
+ *pitch = interface->fb_info->fix.line_length;
+ *offset = 0;
+ return 0;
+}
+
+static int
+adf_fbdev_screen_size(struct adf_interface *intf, u16 *width_mm,
+ u16 *height_mm)
+{
+ struct adf_fbdev_interface *interface =
+ (struct adf_fbdev_interface *)intf;
+ *width_mm = interface->width_mm;
+ *height_mm = interface->height_mm;
+ return 0;
+}
+
+static int adf_fbdev_modeset(struct adf_interface *intf,
+ struct drm_mode_modeinfo *mode)
+{
+ struct adf_fbdev_interface *interface =
+ (struct adf_fbdev_interface *)intf;
+ return mode == &interface->fb_mode ? 0 : -EINVAL;
+}
+
+static const struct adf_interface_ops adf_fbdev_interface_ops = {
+ .base = {
+ .supports_event = adf_fbdev_supports_event,
+ .set_event = adf_fbdev_set_event,
+ },
+ .blank = adf_fbdev_blank2,
+ .alloc_simple_buffer = adf_fbdev_alloc_simple_buffer,
+ .screen_size = adf_fbdev_screen_size,
+ .modeset = adf_fbdev_modeset,
+};
+
+struct adf_overlay_engine_ops adf_fbdev_overlay_engine_ops = {
+ .supported_formats = &adf_fbdev_supported_format,
+ .n_supported_formats = 1,
+};
+
+/* If we can flip, we need to make sure we have the memory to do so.
+ *
+ * We'll assume that the fbdev device provides extra space in
+ * yres_virtual for panning; xres_virtual is theoretically supported,
+ * but it involves more work.
+ *
+ * If the fbdev device doesn't have yres_virtual > yres, we'll try
+ * requesting it before bailing. Userspace applications commonly do
+ * this with an FBIOPUT_VSCREENINFO ioctl().
+ *
+ * Another problem is with a limitation in PowerVR services -- it
+ * needs framebuffers to be page aligned (this is a SW limitation,
+ * the HW can support non-page-aligned buffers). So we have to
+ * check that stride * height for a single buffer is page aligned.
+ */
+static bool adf_fbdev_flip_possible(struct fb_info *fb_info)
+{
+ struct fb_var_screeninfo var = fb_info->var;
+ int err;
+
+ if (!fb_info->fix.xpanstep && !fb_info->fix.ypanstep &&
+ !fb_info->fix.ywrapstep) {
+ pr_err("The fbdev device detected does not support ypan/ywrap.\n");
+ return false;
+ }
+
+ if ((fb_info->fix.line_length * var.yres) % PAGE_SIZE != 0) {
+ pr_err("Line length (in bytes) x yres is not a multiple of page size.\n");
+ return false;
+ }
+
+ /* We might already have enough space */
+ if (var.yres * NUM_PREFERRED_BUFFERS <= var.yres_virtual)
+ return true;
+
+ pr_err("No buffer space for flipping; asking for more.\n");
+
+ var.activate = FB_ACTIVATE_NOW;
+ var.yres_virtual = var.yres * NUM_PREFERRED_BUFFERS;
+
+ err = fb_set_var(fb_info, &var);
+ if (err) {
+ pr_err("fb_set_var failed (err=%d).\n", err);
+ return false;
+ }
+
+ if (var.yres * NUM_PREFERRED_BUFFERS > var.yres_virtual) {
+ pr_err("Failed to obtain additional buffer space.\n");
+ return false;
+ }
+
+ /* Some fbdev drivers allow the yres_virtual modification through,
+ * but don't actually update the fix. We need the fix to be updated
+ * and more memory allocated, so we can actually take advantage of
+ * the increased yres_virtual.
+ */
+ if (fb_info->fix.smem_len < fb_info->fix.line_length *
+ var.yres_virtual) {
+ pr_err("'fix' not re-allocated with sufficient buffer space.\n");
+ pr_err("Check NUM_PREFERRED_BUFFERS (%u) is as intended.\n",
+ NUM_PREFERRED_BUFFERS);
+ return false;
+ }
+
+ return true;
+}
+
+/* Could use devres here? */
+static struct {
+ struct adf_fbdev_device device;
+ struct adf_fbdev_interface interface;
+ struct adf_overlay_engine engine;
+} dev_data;
+
+static int __init init_adf_fbdev(void)
+{
+ struct drm_mode_modeinfo *mode = &dev_data.interface.fb_mode;
+ char format_str[ADF_FORMAT_STR_SIZE];
+ struct fb_info *fb_info;
+ int err = -ENODEV;
+
+ fb_info = registered_fb[0];
+ if (!fb_info) {
+ pr_err("No Linux framebuffer (fbdev) device is registered!\n");
+ pr_err("Check you have a framebuffer driver compiled into your kernel\n");
+ pr_err("and that it is enabled on the cmdline.\n");
+ goto err_out;
+ }
+
+ if (!lock_fb_info(fb_info))
+ goto err_out;
+
+ console_lock();
+
+ /* Filter out broken FB devices */
+ if (!fb_info->fix.smem_len || !fb_info->fix.line_length) {
+ pr_err("The fbdev device detected had a zero smem_len or line_length,\n");
+ pr_err("which suggests it is a broken driver.\n");
+ goto err_unlock;
+ }
+
+ if (fb_info->fix.type != FB_TYPE_PACKED_PIXELS ||
+ fb_info->fix.visual != FB_VISUAL_TRUECOLOR) {
+ pr_err("The fbdev device detected is not truecolor with packed pixels.\n");
+ goto err_unlock;
+ }
+
+ if (fb_info->var.bits_per_pixel == 32) {
+ if (fb_info->var.red.length == 8 ||
+ fb_info->var.green.length == 8 ||
+ fb_info->var.blue.length == 8 ||
+ fb_info->var.red.offset == 16 ||
+ fb_info->var.green.offset == 8 ||
+ fb_info->var.blue.offset == 0) {
+#if defined(ADF_FBDEV_FORCE_XRGB8888)
+ adf_fbdev_supported_format = DRM_FORMAT_BGRX8888;
+#else
+ adf_fbdev_supported_format = DRM_FORMAT_BGRA8888;
+#endif
+ } else if (fb_info->var.red.length == 8 ||
+ fb_info->var.green.length == 8 ||
+ fb_info->var.blue.length == 8 ||
+ fb_info->var.red.offset == 0 ||
+ fb_info->var.green.offset == 8 ||
+ fb_info->var.blue.offset == 16) {
+ adf_fbdev_supported_format = DRM_FORMAT_RGBA8888;
+ } else {
+ pr_err("The fbdev device detected uses an unrecognized 32bit pixel format (%u/%u/%u, %u/%u/%u)\n",
+ fb_info->var.red.length,
+ fb_info->var.green.length,
+ fb_info->var.blue.length,
+ fb_info->var.red.offset,
+ fb_info->var.green.offset,
+ fb_info->var.blue.offset);
+ goto err_unlock;
+ }
+ } else if (fb_info->var.bits_per_pixel == 16) {
+ if (fb_info->var.red.length != 5 ||
+ fb_info->var.green.length != 6 ||
+ fb_info->var.blue.length != 5 ||
+ fb_info->var.red.offset != 11 ||
+ fb_info->var.green.offset != 5 ||
+ fb_info->var.blue.offset != 0) {
+ pr_err("The fbdev device detected uses an unrecognized 16bit pixel format (%u/%u/%u, %u/%u/%u)\n",
+ fb_info->var.red.length,
+ fb_info->var.green.length,
+ fb_info->var.blue.length,
+ fb_info->var.red.offset,
+ fb_info->var.green.offset,
+ fb_info->var.blue.offset);
+ goto err_unlock;
+ }
+ adf_fbdev_supported_format = DRM_FORMAT_BGR565;
+ } else {
+ pr_err("The fbdev device detected uses an unsupported bpp (%u).\n",
+ fb_info->var.bits_per_pixel);
+ goto err_unlock;
+ }
+
+#if defined(CONFIG_ARCH_MT8173)
+ /* Workaround for broken framebuffer driver. The wrong pixel format
+ * is reported to this module. It is always really RGBA8888.
+ */
+ adf_fbdev_supported_format = DRM_FORMAT_RGBA8888;
+#endif
+
+ if (!try_module_get(fb_info->fbops->owner)) {
+ pr_err("try_module_get() failed");
+ goto err_unlock;
+ }
+
+ if (fb_info->fbops->fb_open &&
+ fb_info->fbops->fb_open(fb_info, 0) != 0) {
+ pr_err("fb_open() failed");
+ goto err_module_put;
+ }
+
+ if (!adf_fbdev_flip_possible(fb_info)) {
+ pr_err("Flipping must be supported for ADF. Aborting.\n");
+ goto err_fb_release;
+ }
+
+ err = adf_device_init(&dev_data.device.base, fb_info->dev,
+ &adf_fbdev_device_ops, "fbdev");
+ if (err) {
+ pr_err("adf_device_init failed (%d)", err);
+ goto err_fb_release;
+ }
+
+ dev_data.device.fb_info = fb_info;
+
+ err = adf_interface_init(&dev_data.interface.base,
+ &dev_data.device.base,
+ ADF_INTF_DVI, 0, ADF_INTF_FLAG_PRIMARY,
+ &adf_fbdev_interface_ops, "fbdev_interface");
+ if (err) {
+ pr_err("adf_interface_init failed (%d)", err);
+ goto err_device_destroy;
+ }
+
+ spin_lock_init(&dev_data.interface.alloc_lock);
+ dev_data.interface.fb_info = fb_info;
+
+ /* If the fbdev mode looks viable, try to inherit from it */
+ if (fb_info->mode)
+ adf_modeinfo_from_fb_videomode(fb_info->mode, mode);
+
+ /* Framebuffer drivers aren't always very good at filling out their
+ * mode information, so fake up anything that's missing so we don't
+ * need to accommodate it in userspace.
+ */
+
+ if (!mode->hdisplay)
+ mode->hdisplay = fb_info->var.xres;
+ if (!mode->vdisplay)
+ mode->vdisplay = fb_info->var.yres;
+ if (!mode->vrefresh)
+ mode->vrefresh = FALLBACK_REFRESH_RATE;
+
+ if (fb_info->var.width > 0 && fb_info->var.width < 1000) {
+ dev_data.interface.width_mm = fb_info->var.width;
+ } else {
+ dev_data.interface.width_mm = (fb_info->var.xres * 25400) /
+ (FALLBACK_DPI * 1000);
+ }
+
+ if (fb_info->var.height > 0 && fb_info->var.height < 1000) {
+ dev_data.interface.height_mm = fb_info->var.height;
+ } else {
+ dev_data.interface.height_mm = (fb_info->var.yres * 25400) /
+ (FALLBACK_DPI * 1000);
+ }
+
+ err = adf_hotplug_notify_connected(&dev_data.interface.base, mode, 1);
+ if (err) {
+ pr_err("adf_hotplug_notify_connected failed (%d)", err);
+ goto err_interface_destroy;
+ }
+
+ /* This doesn't really set the mode, it just updates current_mode */
+ err = adf_interface_set_mode(&dev_data.interface.base, mode);
+ if (err) {
+ pr_err("adf_interface_set_mode failed (%d)", err);
+ goto err_interface_destroy;
+ }
+
+ err = adf_overlay_engine_init(&dev_data.engine, &dev_data.device.base,
+ &adf_fbdev_overlay_engine_ops,
+ "fbdev_overlay_engine");
+ if (err) {
+ pr_err("adf_overlay_engine_init failed (%d)", err);
+ goto err_interface_destroy;
+ }
+
+ err = adf_attachment_allow(&dev_data.device.base,
+ &dev_data.engine,
+ &dev_data.interface.base);
+
+ if (err) {
+ pr_err("adf_attachment_allow failed (%d)", err);
+ goto err_overlay_engine_destroy;
+ }
+
+ adf_format_str(adf_fbdev_supported_format, format_str);
+ pr_info("Found usable fbdev device (%s):\n"
+ "range (physical) = 0x%lx-0x%lx\n"
+ "range (virtual) = %p-%p\n"
+ "size (bytes) = 0x%x\n"
+ "xres x yres = %ux%u\n"
+ "xres x yres (v) = %ux%u\n"
+ "physical (mm) = %ux%u\n"
+ "refresh (Hz) = %u\n"
+ "drm fourcc = %s (0x%x)\n",
+ fb_info->fix.id,
+ fb_info->fix.smem_start,
+ fb_info->fix.smem_start + fb_info->fix.smem_len,
+ fb_info->screen_base,
+ fb_info->screen_base + fb_info->screen_size,
+ fb_info->fix.smem_len,
+ mode->hdisplay, mode->vdisplay,
+ fb_info->var.xres_virtual, fb_info->var.yres_virtual,
+ dev_data.interface.width_mm, dev_data.interface.height_mm,
+ mode->vrefresh,
+ format_str, adf_fbdev_supported_format);
+ err = 0;
+err_unlock:
+ console_unlock();
+ unlock_fb_info(fb_info);
+err_out:
+ return err;
+err_overlay_engine_destroy:
+ adf_overlay_engine_destroy(&dev_data.engine);
+err_interface_destroy:
+ adf_interface_destroy(&dev_data.interface.base);
+err_device_destroy:
+ adf_device_destroy(&dev_data.device.base);
+err_fb_release:
+ if (fb_info->fbops->fb_release)
+ fb_info->fbops->fb_release(fb_info, 0);
+err_module_put:
+ module_put(fb_info->fbops->owner);
+ goto err_unlock;
+}
+
+static void __exit exit_adf_fbdev(void)
+{
+ struct fb_info *fb_info = dev_data.device.fb_info;
+
+ if (!lock_fb_info(fb_info)) {
+ pr_err("Failed to lock fb_info.\n");
+ return;
+ }
+
+ console_lock();
+
+ adf_overlay_engine_destroy(&dev_data.engine);
+ adf_interface_destroy(&dev_data.interface.base);
+ adf_device_destroy(&dev_data.device.base);
+
+ if (fb_info->fbops->fb_release)
+ fb_info->fbops->fb_release(fb_info, 0);
+
+ module_put(fb_info->fbops->owner);
+
+ console_unlock();
+ unlock_fb_info(fb_info);
+}
+
+module_init(init_adf_fbdev);
+module_exit(exit_adf_fbdev);
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File adf_sunxi.c
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#ifdef SUPPORT_ADF_SUNXI_FBDEV
+#include <video/adf_fbdev.h>
+#endif
+
+#include PVR_ANDROID_ION_HEADER
+#include PVR_ANDROID_SYNC_HEADER
+
+#include <linux/sw_sync.h>
+
+#include "adf_common.h"
+#include "adf_sunxi.h"
+
+#include "pvrmodule.h"
+
+#define MAX_DISPLAYS 2
+/* This is the maximum number of overlays per display
+ * Any global limitation validation must be done in either adf_sunxi_attach()
+ * or adf_sunxi_validate()
+ */
+#define NUM_OVERLAYS 4
+#define NUM_BLENDER_PIPES 2
+#define MAX_BUFFERS (MAX_DISPLAYS * NUM_OVERLAYS)
+
+#define DEBUG_POST_DUMP_COUNT 4
+#define VALIDATE_LOG_LINES 50
+#define VALIDATE_LOG_LINE_SIZE 50
+
+#ifdef ADF_VERBOSE_DEBUG
+#define sunxi_dbg(x...) dev_dbg(x)
+#else
+#define sunxi_dbg(...)
+#endif
+
+struct sunxi_interface {
+ struct adf_interface interface;
+ enum adf_interface_type adf_type;
+ const char *name;
+
+ int num_supported_modes;
+ struct drm_mode_modeinfo *supported_modes;
+
+ bool connected;
+
+ int display_id;
+ disp_output_type disp_type;
+};
+
+
+struct sunxi_overlay {
+ struct adf_overlay_engine overlay;
+ /* <0 means not attached, otherwise offset in sunxi.interfaces */
+ struct sunxi_interface *interface;
+};
+
+struct {
+ struct adf_device device;
+ struct device *dev;
+ struct sunxi_interface interfaces[MAX_DISPLAYS];
+ struct sunxi_overlay overlays[MAX_DISPLAYS][NUM_OVERLAYS];
+ struct ion_client *ion_client;
+ u32 ion_heap_id;
+ atomic_t refcount;
+
+ struct disp_composer_ops disp_ops;
+
+ /* Used to dump the last config to debugfs file */
+ struct setup_dispc_data last_config[DEBUG_POST_DUMP_COUNT];
+ u32 last_config_id[DEBUG_POST_DUMP_COUNT];
+ int last_config_pos;
+
+ char validate_log[VALIDATE_LOG_LINES][VALIDATE_LOG_LINE_SIZE];
+ int validate_log_position;
+
+ struct dentry *debugfs_config_file;
+ struct dentry *debugfs_val_log;
+
+ atomic_t postcount;
+ atomic_t callbackcount;
+
+ wait_queue_head_t post_wait_queue;
+
+#ifdef SUPPORT_ADF_SUNXI_FBDEV
+ struct adf_fbdev fbdev;
+#endif
+} sunxi;
+
+static const u32 sunxi_supported_formats[] = {
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YVU420,
+};
+#define NUM_SUPPORTED_FORMATS ARRAY_SIZE(sunxi_supported_formats)
+
+static void val_log(u32 post_id, const char *fmt, ...)
+{
+ va_list args;
+ char *str;
+ int offset;
+
+ sunxi.validate_log_position = (sunxi.validate_log_position + 1)
+ % VALIDATE_LOG_LINES;
+
+ str = sunxi.validate_log[sunxi.validate_log_position];
+
+ offset = snprintf(str, VALIDATE_LOG_LINE_SIZE, "id %u:",
+ post_id);
+
+ va_start(args, fmt);
+ vsnprintf(str+offset, VALIDATE_LOG_LINE_SIZE-offset, fmt, args);
+ va_end(args);
+}
+
+static bool
+is_supported_format(u32 drm_format)
+{
+ int i;
+
+ for (i = 0; i < NUM_SUPPORTED_FORMATS; i++) {
+ if (sunxi_supported_formats[i] == drm_format)
+ return true;
+ }
+ return false;
+}
+
+static disp_pixel_format
+sunxi_format_to_disp(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_BGRA8888:
+ return DISP_FORMAT_ARGB_8888;
+ case DRM_FORMAT_ARGB8888:
+ return DISP_FORMAT_BGRA_8888;
+ case DRM_FORMAT_BGRX8888:
+ return DISP_FORMAT_XRGB_8888;
+ case DRM_FORMAT_XRGB8888:
+ return DISP_FORMAT_BGRX_8888;
+ case DRM_FORMAT_YVU420:
+ return DISP_FORMAT_YUV420_P;
+ default:
+ BUG();
+ return -1;
+ }
+}
+
+static bool
+sunxi_format_has_alpha(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_ARGB8888:
+ return true;
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_YVU420:
+ return false;
+ default:
+ BUG();
+ return false;
+ }
+}
+
+static u32
+sunxi_format_bpp(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ return 4;
+ case DRM_FORMAT_YVU420:
+ return 1;
+ default:
+ BUG();
+ return 0;
+ }
+}
+
+static bool
+sunxi_format_uv_is_swapped(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ return false;
+ case DRM_FORMAT_YVU420:
+ return true;
+ default:
+ BUG();
+ return false;
+ }
+}
+
+static bool
+buffer_is_scaled(const struct adf_buffer_config_ext *ext_config_data)
+{
+ int srcWidth = ext_config_data->crop.x2 - ext_config_data->crop.x1;
+ int srcHeight = ext_config_data->crop.y2 - ext_config_data->crop.y1;
+ int dstWidth = ext_config_data->display.x2
+ - ext_config_data->display.x1;
+ int dstHeight = ext_config_data->display.y2
+ - ext_config_data->display.y1;
+ if (srcWidth != dstWidth ||
+ srcHeight != dstHeight)
+ return true;
+ else
+ return false;
+}
+
+static int
+sunxi_buffer_to_layer_info(const struct adf_buffer *buf,
+ const struct adf_buffer_config_ext *ext_config_data,
+ const struct adf_buffer_mapping *mappings, disp_layer_info *layer)
+{
+ int plane;
+
+ if (buffer_is_scaled(ext_config_data))
+ layer->mode = DISP_LAYER_WORK_MODE_SCALER;
+ else
+ layer->mode = DISP_LAYER_WORK_MODE_NORMAL;
+
+ /* Pipe/z are set in the parent function */
+ layer->pipe = 0;
+ layer->zorder = 0;
+ /* 0 = per-pixel alpha, 1 = global alpha */
+ switch (ext_config_data->blend_type) {
+ case ADF_BUFFER_BLENDING_NONE_EXT:
+ layer->alpha_mode = 1;
+ layer->alpha_value = 255;
+ break;
+ case ADF_BUFFER_BLENDING_PREMULT_EXT:
+ if (sunxi_format_has_alpha(buf->format))
+ layer->alpha_mode = 0;
+ else
+ layer->alpha_mode = 1;
+ layer->alpha_value = ext_config_data->plane_alpha;
+ layer->fb.pre_multiply = true;
+ break;
+ case ADF_BUFFER_BLENDING_COVERAGE_EXT:
+ dev_err(sunxi.dev, "Coverage blending not implemented\n");
+ return -1;
+ default:
+ dev_err(sunxi.dev, "Unknown blending type %d\n",
+ ext_config_data->blend_type);
+ return -1;
+
+ }
+ layer->ck_enable = false;
+ layer->screen_win.x = ext_config_data->display.x1;
+ layer->screen_win.y = ext_config_data->display.y1;
+ layer->screen_win.width = ext_config_data->display.x2 -
+ ext_config_data->display.x1;
+ layer->screen_win.height = ext_config_data->display.y2 -
+ ext_config_data->display.y1;
+
+ if (mappings) {
+ for (plane = 0; plane < buf->n_planes; plane++) {
+ layer->fb.addr[plane] =
+ sg_phys(mappings->sg_tables[plane]->sgl) +
+ buf->offset[plane];
+ }
+
+ /* Fix up planar formats with VU plane ordering. For some
+ * reason this is not properly handled by the sunxi disp
+ * driver for sun9i.
+ */
+ if (sunxi_format_uv_is_swapped(buf->format)) {
+ unsigned int tmp = layer->fb.addr[1];
+
+ layer->fb.addr[1] = layer->fb.addr[2];
+ layer->fb.addr[2] = tmp;
+ }
+ }
+
+ layer->fb.size.width = buf->pitch[0] / sunxi_format_bpp(buf->format);
+ layer->fb.size.height = buf->h;
+ layer->fb.format = sunxi_format_to_disp(buf->format);
+ layer->fb.src_win.x = ext_config_data->crop.x1;
+ layer->fb.src_win.y = ext_config_data->crop.y1;
+ /* fb.src_win.width/height is only used for scaled layers */
+ layer->fb.src_win.width = ext_config_data->crop.x2 -
+ ext_config_data->crop.x1;
+ layer->fb.src_win.height = ext_config_data->crop.y2 -
+ ext_config_data->crop.y1;
+
+ return 0;
+}
+
+static int
+adf_sunxi_open(struct adf_obj *obj, struct inode *inode, struct file *file)
+{
+ atomic_inc(&sunxi.refcount);
+ return 0;
+}
+
+static void adf_sunxi_set_hotplug_state(struct sunxi_interface *intf,
+ bool enable);
+
+static void
+adf_sunxi_release(struct adf_obj *obj, struct inode *inode, struct file *file)
+{
+ struct sync_fence *release_fence;
+ int dpy;
+
+ if (atomic_dec_return(&sunxi.refcount))
+ return;
+
+ /* NULL flip to push buffer off screen */
+ release_fence = adf_device_post(obj->parent, NULL, 0, NULL, 0, NULL, 0);
+
+ if (IS_ERR_OR_NULL(release_fence)) {
+ dev_err(obj->parent->dev, "Failed to queue null flip command (err=%d)\n",
+ (int)PTR_ERR(release_fence));
+ return;
+ }
+
+ /* Disable any hotplug events */
+ for (dpy = 0; dpy < MAX_DISPLAYS; dpy++) {
+ if (sunxi.interfaces[dpy].disp_type == DISP_OUTPUT_TYPE_NONE)
+ continue;
+ if (sunxi.interfaces[dpy].disp_type == DISP_OUTPUT_TYPE_HDMI)
+ adf_sunxi_set_hotplug_state(&sunxi.interfaces[dpy],
+ false);
+ }
+
+ sync_fence_put(release_fence);
+}
+
+struct pipe_assignments {
+ int pipe[MAX_BUFFERS];
+};
+static void adf_sunxi_state_free(struct adf_device *dev, void *driver_state)
+{
+ struct pipe_assignments *pipe_assignments =
+ driver_state;
+
+ kfree(pipe_assignments);
+}
+
+static int get_buf_display(struct adf_buffer *buf)
+{
+ int dpy, ovl;
+
+ for (dpy = 0; dpy < MAX_DISPLAYS; dpy++) {
+ for (ovl = 0; ovl < NUM_OVERLAYS; ovl++) {
+ if (&sunxi.overlays[dpy][ovl].overlay ==
+ buf->overlay_engine) {
+ goto found_ovl;
+ }
+ }
+ }
+ return -1;
+found_ovl:
+ return dpy;
+}
+
+struct pipe_assignment_state {
+ int current_pipe[MAX_DISPLAYS];
+ int max_pipe[MAX_DISPLAYS];
+ int current_pipe_layers[MAX_DISPLAYS];
+
+ struct drm_clip_rect current_pipe_rects[MAX_DISPLAYS][NUM_OVERLAYS];
+};
+
+static int
+assign_pipe(struct pipe_assignment_state *state, int dpy, bool blended,
+ struct drm_clip_rect *display_rect)
+{
+ struct drm_clip_rect *current_pipe_rects =
+ &state->current_pipe_rects[dpy][0];
+ int rect;
+
+ /* The sunxi display block appears to support a single blender
+ * taking multiple input rects, so long as the blended
+ * rects do not overlap
+ */
+ if (blended) {
+ for (rect = 0; rect < state->current_pipe_layers[dpy]; rect++) {
+ const struct drm_clip_rect *layer_rect = &
+ current_pipe_rects[rect];
+ if (!adf_img_rects_intersect(layer_rect,
+ display_rect)) {
+ continue;
+ }
+ /* We need to assign a new pipe */
+ state->current_pipe[dpy]++;
+ state->current_pipe_layers[dpy] = 0;
+ if (state->current_pipe[dpy] >=
+ state->max_pipe[dpy]) {
+ return -1;
+ }
+ }
+ }
+ current_pipe_rects[state->current_pipe_layers[dpy]] =
+ *display_rect;
+ state->current_pipe_layers[dpy]++;
+
+ return state->current_pipe[dpy];
+}
+
+static int adf_sunxi_validate(struct adf_device *dev, struct adf_post *cfg,
+ void **driver_state)
+{
+ int i, dpy, pipe;
+ struct adf_post_ext *post_ext = cfg->custom_data;
+ struct adf_buffer_config_ext *bufs_ext;
+ size_t expected_custom_data_size;
+
+ struct pipe_assignment_state pipe_state;
+ struct pipe_assignments *pipe_assignments;
+ bool scaler_in_use[MAX_DISPLAYS];
+ int err = 0;
+ u32 post_id;
+
+ bool is_post = cfg->n_bufs && cfg->bufs[0].acquire_fence != NULL;
+
+ if (cfg->n_bufs == 0) {
+ val_log(0, "NULL flip\n");
+ return 0;
+ }
+
+ if (!post_ext) {
+ dev_err(dev->dev, "Invalid custom data pointer\n");
+ return -EINVAL;
+ }
+ post_id = post_ext->post_id;
+
+ expected_custom_data_size = sizeof(struct adf_post_ext)
+ + cfg->n_bufs * sizeof(struct adf_buffer_config_ext);
+ if (cfg->custom_data_size != expected_custom_data_size) {
+ dev_err(dev->dev, "Invalid custom data size - expected %u for %u buffers, got %u\n",
+ expected_custom_data_size, cfg->n_bufs,
+ cfg->custom_data_size);
+ return -EINVAL;
+ }
+
+ bufs_ext = &post_ext->bufs_ext[0];
+
+ /* Reset blend pipe state */
+ for (dpy = 0; dpy < MAX_DISPLAYS; dpy++) {
+ scaler_in_use[dpy] = false;
+ pipe_state.current_pipe[dpy] = 0;
+ pipe_state.current_pipe_layers[dpy] = 0;
+ }
+
+ /* NOTE: The current method of assigning pipes over multiple displays
+ * is unknown and needs experimentation/documentation to correct.
+ * The current assumption is that there are 2 blend sources (pipe 0
+ * and 1) on the internal display, and only 1 (pipe 0) on hdmi
+ */
+ if (sunxi.interfaces[DISPLAY_HDMI].connected) {
+ pipe_state.max_pipe[DISPLAY_HDMI] = 1;
+ pipe_state.current_pipe[DISPLAY_HDMI] = 0;
+ pipe_state.max_pipe[DISPLAY_INTERNAL] = NUM_BLENDER_PIPES;
+ pipe_state.current_pipe[DISPLAY_INTERNAL] = 1;
+ } else {
+ pipe_state.max_pipe[DISPLAY_INTERNAL] = NUM_BLENDER_PIPES;
+ pipe_state.current_pipe[DISPLAY_INTERNAL] = 0;
+ pipe_state.max_pipe[DISPLAY_HDMI] = 0;
+ pipe_state.current_pipe[DISPLAY_HDMI] = 0;
+ }
+
+ pipe_assignments =
+ kzalloc(sizeof(*pipe_assignments), GFP_KERNEL);
+ if (!pipe_assignments) {
+ dev_err(dev->dev, "Failed to allocate pipe assignment state\n");
+ err = -ENOMEM;
+ goto err_free_assignments;
+ }
+
+
+ if (cfg->n_bufs > MAX_BUFFERS) {
+ dev_err(dev->dev, "Trying to post %d buffers (max %d)\n",
+ MAX_BUFFERS, NUM_OVERLAYS);
+ err = -EINVAL;
+ goto err_free_assignments;
+ }
+
+ for (i = 0; i < cfg->n_bufs; i++) {
+ bool buffer_is_sane;
+ struct adf_buffer *buf = &cfg->bufs[i];
+ struct adf_buffer_config_ext *ebuf = &bufs_ext[i];
+
+ dpy = get_buf_display(buf);
+ if (dpy < 0) {
+ dev_err(dev->dev, "Buffer %d has invalid assigned overlay\n",
+ i);
+ err = -EINVAL;
+ goto err_free_assignments;
+ }
+
+ buffer_is_sane =
+ adf_img_buffer_sanity_check(
+ &sunxi.interfaces[dpy].interface,
+ buf,
+ ebuf);
+
+ if (!buffer_is_sane) {
+ dev_err(dev->dev, "Buffer %d failed sanity check\n",
+ i);
+ err = -EINVAL;
+ goto err_free_assignments;
+ }
+
+ if (!is_supported_format(buf->format)) {
+ /* This should be cleanly rejected when trying to assign
+ * an overlay engine
+ */
+ dev_err(dev->dev, "Buffer %d has unrecognised format 0x%08x\n",
+ i, buf->format);
+ err = -EINVAL;
+ goto err_free_assignments;
+ }
+ if (buffer_is_scaled(ebuf)) {
+ /* The assumption is that there is a single scaled
+ * layer allowed per display, otherwise there may
+ * be a unbounded top end to the samples required per
+ * frame when testing validity a single layer at a time
+ */
+ if (scaler_in_use[dpy]) {
+ val_log(post_id, "Buffer %d is second scaled layer\n",
+ i);
+ err = -EINVAL;
+ goto err_free_assignments;
+ }
+ scaler_in_use[dpy] = true;
+ if (!sunxi.disp_ops.is_support_scaler_layer(dpy,
+ ebuf->crop.x2 - ebuf->crop.x1,
+ ebuf->crop.y2 - ebuf->crop.y1,
+ ebuf->display.x2 - ebuf->display.x1,
+ ebuf->display.y2 - ebuf->display.y1)) {
+ val_log(post_id, "Buffer %d unsupported scaled layer\n",
+ i);
+ err = -EINVAL;
+ goto err_free_assignments;
+ }
+ }
+ if (ebuf->transform != ADF_BUFFER_TRANSFORM_NONE_EXT) {
+ /* TODO: Sunxi transform support */
+ val_log(post_id, "Transformed layers not supported at the minute\n");
+ err = -EINVAL;
+ goto err_free_assignments;
+ }
+
+ if (ebuf->blend_type != ADF_BUFFER_BLENDING_NONE_EXT &&
+ ebuf->plane_alpha != 255 &&
+ sunxi_format_has_alpha(buf->format)) {
+ /* The sunxi display block appears to only support
+ * pixel /or/ global (plane) alpha, not both
+ */
+ val_log(post_id, "Layer has both plane and pixel alpha\n");
+ err = -EINVAL;
+ goto err_free_assignments;
+ }
+
+ pipe = assign_pipe(&pipe_state, dpy,
+ ebuf->blend_type != ADF_BUFFER_BLENDING_NONE_EXT,
+ &ebuf->display);
+
+ if (pipe < 0) {
+ val_log(post_id, "Ran out of blend pipes\n");
+ err = -EINVAL;
+ goto err_free_assignments;
+ }
+ pipe_assignments->pipe[i] = pipe;
+ }
+ val_log(post_id, "Validate succeeded\n");
+
+ *driver_state = pipe_assignments;
+
+ return 0;
+err_free_assignments:
+ if (is_post)
+ dev_err(dev->dev, "Failed validate for post\n");
+ kfree(pipe_assignments);
+ return err;
+}
+
+static void sunxi_retire_callback(void)
+{
+ atomic_inc(&sunxi.callbackcount);
+ wake_up(&sunxi.post_wait_queue);
+}
+
+static bool sunxi_post_completed(u32 post_id)
+{
+ return (atomic_read(&sunxi.callbackcount) >= post_id);
+}
+
+static void adf_sunxi_post(struct adf_device *adf_dev, struct adf_post *cfg,
+ void *driver_state)
+{
+ struct setup_dispc_data *disp_data;
+ int err, buf;
+ struct adf_post_ext *post_ext = cfg->custom_data;
+ struct adf_buffer_config_ext *ext_config_data = NULL;
+ int num_buffers[MAX_DISPLAYS];
+ int dpy;
+ struct pipe_assignments *pipe_assignments;
+ u32 post_count, post_id;
+ /* Allow a timeout of 4 frames before we force the frame off-screen */
+ long timeout =
+ msecs_to_jiffies((1000 / 60) * 4);
+
+ if (cfg->n_bufs == 0) {
+ val_log(0, "NULL flip\n");
+ post_id = 0;
+ post_ext = NULL;
+ } else {
+ BUG_ON(post_ext == NULL);
+ post_id = post_ext->post_id;
+ ext_config_data = &post_ext->bufs_ext[0];
+ val_log(post_id, "Posting\n");
+ }
+
+ pipe_assignments = driver_state;
+ if (!pipe_assignments && cfg->n_bufs != 0) {
+ dev_err(adf_dev->dev, "Invalid driver state\n");
+ return;
+ }
+
+ for (dpy = 0; dpy < MAX_DISPLAYS; dpy++)
+ num_buffers[dpy] = 0;
+
+ disp_data = kzalloc(sizeof(*disp_data), GFP_KERNEL);
+ if (!disp_data) {
+ dev_err(adf_dev->dev, "Failed to allocate post data");
+ return;
+ }
+
+ for (buf = 0; buf < cfg->n_bufs; buf++) {
+
+ dpy = get_buf_display(&cfg->bufs[buf]);
+ if (dpy < 0) {
+ dev_err(adf_dev->dev, "Invalid overlay %p assigned to layer %d",
+ cfg->bufs[buf].overlay_engine, buf);
+ goto err_free_data;
+ }
+
+ err = sunxi_buffer_to_layer_info(&cfg->bufs[buf],
+ &ext_config_data[buf],
+ &cfg->mappings[buf],
+ &disp_data->layer_info[dpy][num_buffers[dpy]]);
+
+ if (err) {
+ dev_err(adf_dev->dev, "Failed to setup layer info (%d)\n",
+ err);
+ goto err_free_data;
+ }
+ disp_data->layer_info[dpy][num_buffers[dpy]].pipe =
+ pipe_assignments->pipe[buf];
+ disp_data->layer_info[dpy][num_buffers[dpy]].zorder = buf;
+ num_buffers[dpy]++;
+ }
+
+ for (dpy = 0; dpy < MAX_DISPLAYS; dpy++) {
+ sunxi_dbg(adf_dev->dev, "Dpy %u has %u layers\n", dpy,
+ num_buffers[dpy]);
+ disp_data->layer_num[dpy] = num_buffers[dpy];
+ }
+
+ disp_data->hConfigData = disp_data;
+
+ sunxi.last_config_pos = (sunxi.last_config_pos + 1)
+ % DEBUG_POST_DUMP_COUNT;
+
+ sunxi.last_config[sunxi.last_config_pos] = *disp_data;
+ sunxi.last_config_id[sunxi.last_config_pos] = post_id;
+
+ err = sunxi.disp_ops.dispc_gralloc_queue(disp_data);
+ if (err)
+ dev_err(adf_dev->dev, "Failed to queue post (%d)\n", err);
+
+ post_count = atomic_add_return(1, &sunxi.postcount);
+
+ if (wait_event_timeout(sunxi.post_wait_queue,
+ sunxi_post_completed(post_count-1), timeout) == 0) {
+ dev_err(sunxi.dev, "Timeout waiting for post callback\n");
+
+ }
+
+err_free_data:
+ kfree(disp_data);
+ return;
+
+}
+
+static bool adf_sunxi_supports_event(struct adf_obj *obj,
+ enum adf_event_type type)
+{
+ switch (obj->type) {
+ case ADF_OBJ_INTERFACE: {
+ struct adf_interface *intf =
+ container_of(obj, struct adf_interface, base);
+ struct sunxi_interface *sunxi_intf =
+ container_of(intf, struct sunxi_interface, interface);
+ switch (type) {
+ case ADF_EVENT_VSYNC:
+ return true;
+ case ADF_EVENT_HOTPLUG:
+ /* Only support hotplug on HDMI displays */
+ return (sunxi_intf->disp_type == DISP_OUTPUT_TYPE_HDMI);
+ default:
+ return false;
+ }
+ }
+ default:
+ return false;
+ }
+ return false;
+}
+
+static struct
+{
+ u32 width, height, refresh;
+ disp_tv_mode mode;
+} hdmi_valid_modes[] = {
+ /* List of modes in preference order */
+ { 1920, 1080, 60, DISP_TV_MOD_1080P_60HZ},
+ { 1920, 1080, 50, DISP_TV_MOD_1080P_50HZ},
+ { 1280, 720, 60, DISP_TV_MOD_720P_60HZ},
+ { 1280, 720, 50, DISP_TV_MOD_720P_50HZ},
+ { 1920, 1080, 25, DISP_TV_MOD_1080P_25HZ},
+ { 1920, 1080, 30, DISP_TV_MOD_1080P_30HZ},
+ { 640, 480, 30, DISP_TV_MOD_480P},
+};
+#define NUM_HDMI_VALID_MODES \
+ ARRAY_SIZE(hdmi_valid_modes)
+
+static void setup_drm_mode(struct drm_mode_modeinfo *mode, int height,
+ int width, int refresh)
+{
+ memset(mode, 0, sizeof(*mode));
+
+ mode->vrefresh = refresh;
+ mode->hdisplay = width;
+ mode->vdisplay = height;
+
+ adf_modeinfo_set_name(mode);
+}
+
+static void sunxi_disp_vsync_callback(void *user_data, u32 screen_id)
+{
+ adf_vsync_notify(&sunxi.interfaces[screen_id].interface, ktime_get());
+}
+
+static int sunxi_disp_hotplug_callback(void *user_data,
+ disp_hotplug_state state)
+{
+ struct sunxi_interface *intf = user_data;
+ int ret;
+ int mode_count = 0;
+ unsigned int idx;
+
+ dev_dbg(sunxi.dev, "%s: called state = %u\n", __func__, state);
+
+ /* Only HDMI displays can be hotplugged */
+ BUG_ON(intf->disp_type != DISP_OUTPUT_TYPE_HDMI);
+
+ kfree(intf->supported_modes);
+ intf->supported_modes = NULL;
+ intf->num_supported_modes = 0;
+ switch (state) {
+ default:
+ dev_err(sunxi.dev, "%s: Invalid hotplug state\n", __func__);
+ /* Fall-thru, treat as disconnect */
+ case DISP_HOTPLUG_DISCONNECT:
+ intf->connected = false;
+ adf_hotplug_notify_disconnected(&intf->interface);
+ dev_dbg(sunxi.dev, "%s: set disconnected\n", __func__);
+ return 0;
+ case DISP_HOTPLUG_CONNECT:
+ intf->connected = true;
+ break;
+ }
+
+ for (idx = 0; idx < NUM_HDMI_VALID_MODES; idx++) {
+ ret = sunxi.disp_ops.hdmi_check_support_mode(intf->display_id,
+ hdmi_valid_modes[idx].mode);
+ if (ret == 1)
+ mode_count++;
+ }
+
+ intf->num_supported_modes = mode_count;
+ if (mode_count == 0) {
+ dev_warn(sunxi.dev, "%s: No supported modes found for display id %d - forcing 720p\n",
+ __func__, intf->display_id);
+ intf->num_supported_modes = 1;
+ intf->supported_modes = kzalloc(
+ sizeof(*intf->supported_modes), GFP_KERNEL);
+ if (!intf->supported_modes) {
+ dev_err(sunxi.dev, "%s: Failed to allocate mode list\n",
+ __func__);
+ goto err_out;
+ }
+ /* Force the first mode in the supported list */
+ setup_drm_mode(&intf->supported_modes[0],
+ hdmi_valid_modes[0].height, hdmi_valid_modes[0].width,
+ hdmi_valid_modes[0].refresh);
+ } else {
+ unsigned int supported_idx = 0;
+
+ intf->num_supported_modes = mode_count;
+ intf->supported_modes = kzalloc(
+ mode_count * sizeof(*intf->supported_modes),
+ GFP_KERNEL);
+ if (!intf->supported_modes) {
+ dev_err(sunxi.dev, "%s: Failed to allocate mode list\n",
+ __func__);
+ goto err_out;
+ }
+ for (idx = 0; idx < NUM_HDMI_VALID_MODES; idx++) {
+ if (sunxi.disp_ops.hdmi_check_support_mode(
+ intf->display_id,
+ hdmi_valid_modes[idx].mode) != 1) {
+ continue;
+ }
+ BUG_ON(supported_idx >= intf->num_supported_modes);
+ setup_drm_mode(&intf->supported_modes[supported_idx],
+ hdmi_valid_modes[idx].height,
+ hdmi_valid_modes[idx].width,
+ hdmi_valid_modes[idx].refresh);
+ supported_idx++;
+ }
+ BUG_ON(supported_idx != intf->num_supported_modes);
+ }
+ adf_hotplug_notify_connected(&intf->interface, intf->supported_modes,
+ intf->num_supported_modes);
+ /* Default to first mode */
+ ret = adf_interface_set_mode(&intf->interface,
+ &intf->supported_modes[0]);
+ if (ret) {
+ dev_err(sunxi.dev, "%s: Failed hotplug modeset (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ dev_dbg(sunxi.dev, "%s: set connect\n", __func__);
+ return 0;
+
+err_out:
+ intf->num_supported_modes = 0;
+ kfree(intf->supported_modes);
+ intf->supported_modes = NULL;
+ return -1;
+}
+
+static void adf_sunxi_set_hotplug_state(struct sunxi_interface *intf,
+ bool enabled)
+{
+ BUG_ON(intf->disp_type != DISP_OUTPUT_TYPE_HDMI);
+ dev_dbg(sunxi.dev, "%s: hotplug set to %s\n", __func__,
+ enabled ? "enabled" : "disabled");
+ if (enabled) {
+ sunxi.disp_ops.hotplug_enable(intf->display_id, true);
+ sunxi.disp_ops.hotplug_callback(intf->display_id, intf,
+ sunxi_disp_hotplug_callback);
+ sunxi.disp_ops.hdmi_enable(intf->display_id);
+
+ } else {
+ sunxi.disp_ops.hdmi_disable(intf->display_id);
+ sunxi.disp_ops.hotplug_enable(intf->display_id, false);
+ sunxi.disp_ops.hotplug_callback(intf->display_id, NULL, NULL);
+ }
+
+}
+
+static void adf_sunxi_set_event(struct adf_obj *obj, enum adf_event_type type,
+ bool enabled)
+{
+ switch (obj->type) {
+ case ADF_OBJ_INTERFACE: {
+ struct adf_interface *intf =
+ container_of(obj, struct adf_interface, base);
+ struct sunxi_interface *sunxi_intf =
+ container_of(intf, struct sunxi_interface, interface);
+ switch (type) {
+ case ADF_EVENT_VSYNC:
+ sunxi.disp_ops.vsync_enable(sunxi_intf->display_id,
+ enabled);
+ break;
+ case ADF_EVENT_HOTPLUG:
+ adf_sunxi_set_hotplug_state(sunxi_intf, enabled);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ }
+ default:
+ BUG();
+ }
+}
+
+
+static disp_tv_mode
+find_matching_disp_tv_mode_id(struct drm_mode_modeinfo *mode)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < NUM_HDMI_VALID_MODES; idx++) {
+ if (hdmi_valid_modes[idx].width == mode->hdisplay &&
+ hdmi_valid_modes[idx].height == mode->vdisplay &&
+ hdmi_valid_modes[idx].refresh == mode->vrefresh) {
+ return hdmi_valid_modes[idx].mode;
+ }
+ }
+ dev_err(sunxi.dev, "%s: No matching disp_tv_mode for %ux%u@%u\n",
+ __func__, mode->hdisplay, mode->vdisplay, mode->vrefresh);
+ return 0;
+}
+
+static int adf_sunxi_modeset(struct adf_interface *intf,
+ struct drm_mode_modeinfo *mode)
+{
+ disp_tv_mode disp_mode;
+ int err;
+ struct sunxi_interface *sunxi_intf =
+ container_of(intf, struct sunxi_interface, interface);
+
+ dev_dbg(sunxi.dev, "%s: setting %d (type %d) to %ux%u@%u\n", __func__,
+ sunxi_intf->display_id, sunxi_intf->disp_type, mode->hdisplay,
+ mode->vdisplay, mode->vrefresh);
+
+ if (sunxi_intf->disp_type != DISP_OUTPUT_TYPE_HDMI) {
+ dev_dbg(sunxi.dev, "%s: Stub modeset for internal display\n",
+ __func__);
+ return 0;
+ }
+
+ disp_mode = find_matching_disp_tv_mode_id(mode);
+
+ dev_dbg(sunxi.dev, "%s: HDMI modeset to mode %d\n", __func__,
+ disp_mode);
+
+ err = sunxi.disp_ops.hdmi_disable(sunxi_intf->display_id);
+ if (err) {
+ dev_err(sunxi.dev, "%s: Failed to disable display id %d for modeset\n",
+ __func__, sunxi_intf->display_id);
+ return -EFAULT;
+ }
+
+ err = sunxi.disp_ops.hdmi_set_mode(sunxi_intf->display_id, disp_mode);
+ if (err) {
+ dev_err(sunxi.dev, "%s: Failed to set mode %ux%u@%u (id %d) to display id %d\n",
+ __func__, mode->hdisplay, mode->vdisplay,
+ mode->vrefresh, disp_mode, sunxi_intf->display_id);
+ return -EFAULT;
+ }
+
+ err = sunxi.disp_ops.hdmi_enable(sunxi_intf->display_id);
+ if (err) {
+ dev_err(sunxi.dev, "%s: Failed to enable display id %d after modeset\n",
+ __func__, sunxi_intf->display_id);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+#ifdef SUPPORT_ADF_SUNXI_FBDEV
+
+static int adf_sunxi_alloc_simple_buffer(struct adf_interface *intf, u16 w,
+ u16 h, u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
+{
+ int err = 0;
+ u32 bpp = sunxi_format_bpp(format);
+ u32 size = h * w * bpp;
+ struct ion_handle *hdl;
+ struct adf_device *dev = intf->base.parent;
+
+ if (bpp == 0) {
+ dev_err(dev->dev, "%s: unknown format (0x%08x)\n",
+ __func__, format);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ hdl = ion_alloc(sunxi.ion_client, size, 0,
+ (1 << sunxi.ion_heap_id), 0);
+ if (IS_ERR(hdl)) {
+ err = PTR_ERR(hdl);
+ dev_err(dev->dev, "%s: ion_alloc failed (%d)\n",
+ __func__, err);
+ goto err_out;
+ }
+ *dma_buf = ion_share_dma_buf(sunxi.ion_client, hdl);
+ if (IS_ERR(*dma_buf)) {
+ err = PTR_ERR(hdl);
+ dev_err(dev->dev, "%s: ion_share_dma_buf failed (%d)\n",
+ __func__, err);
+ goto err_free_buffer;
+
+ }
+ *pitch = w * bpp;
+ *offset = 0;
+err_free_buffer:
+ ion_free(sunxi.ion_client, hdl);
+err_out:
+ return err;
+}
+
+static int adf_sunxi_describe_simple_post(struct adf_interface *intf,
+ struct adf_buffer *fb, void *data, size_t *size)
+{
+ *size = 0;
+ return 0;
+}
+
+#endif /* SUPPORT_ADF_SUNXI_FBDEV */
+
+static struct adf_device_ops adf_sunxi_device_ops = {
+ .owner = THIS_MODULE,
+ .base = {
+ .open = adf_sunxi_open,
+ .release = adf_sunxi_release,
+ .ioctl = adf_img_ioctl,
+ },
+ .state_free = adf_sunxi_state_free,
+ .validate = adf_sunxi_validate,
+ .post = adf_sunxi_post,
+};
+
+static struct adf_interface_ops adf_sunxi_interface_ops = {
+ .base = {
+ .supports_event = adf_sunxi_supports_event,
+ .set_event = adf_sunxi_set_event,
+ },
+ .modeset = adf_sunxi_modeset,
+#ifdef SUPPORT_ADF_SUNXI_FBDEV
+ .alloc_simple_buffer = adf_sunxi_alloc_simple_buffer,
+ .describe_simple_post = adf_sunxi_describe_simple_post,
+#endif
+};
+
+static struct adf_overlay_engine_ops adf_sunxi_overlay_ops = {
+ .supported_formats = &sunxi_supported_formats[0],
+ .n_supported_formats = NUM_SUPPORTED_FORMATS,
+};
+
+#ifdef SUPPORT_ADF_SUNXI_FBDEV
+
+static struct fb_ops adf_sunxi_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = adf_fbdev_open,
+ .fb_release = adf_fbdev_release,
+ .fb_check_var = adf_fbdev_check_var,
+ .fb_set_par = adf_fbdev_set_par,
+ .fb_blank = adf_fbdev_blank,
+ .fb_pan_display = adf_fbdev_pan_display,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_mmap = adf_fbdev_mmap,
+};
+#endif
+
+
+
+static void sunxi_debugfs_print_window(struct seq_file *s, const char *prefix,
+ const disp_window *win)
+{
+ if (win->x)
+ seq_printf(s, "%sx\t=\t%u\n", prefix, win->x);
+ if (win->y)
+ seq_printf(s, "%sy\t=\t%u\n", prefix, win->y);
+ seq_printf(s, "%sw\t=\t%u\n", prefix, win->width);
+ seq_printf(s, "%sh\t=\t%u\n", prefix, win->height);
+
+}
+
+static void sunxi_debugfs_print_fb_info(struct seq_file *s, const char *prefix,
+ const disp_fb_info *fb)
+{
+ int i;
+
+ for (i = 0; i < 3; i++)
+ if (fb->addr[i])
+ seq_printf(s, "%saddr[%d]\t=\t0x%08x\n", prefix, i,
+ fb->addr[i]);
+ seq_printf(s, "%ssize.w\t=\t%u\n", prefix, fb->size.width);
+ seq_printf(s, "%ssize.h\t=\t%u\n", prefix, fb->size.height);
+ seq_printf(s, "%sformat\t=\t0x%x\n", prefix, fb->format);
+ if (fb->cs_mode)
+ seq_printf(s, "%scs_mode\t=\t0x%x\n", prefix, fb->cs_mode);
+ if (fb->b_trd_src)
+ seq_printf(s, "%sb_trd_src\t=\t0x%x\n", prefix, fb->b_trd_src);
+ if (fb->trd_mode)
+ seq_printf(s, "%strd_mode\t=\t0x%x\n", prefix, fb->trd_mode);
+ for (i = 0; i < 3; i++)
+ if (fb->trd_right_addr[i])
+ seq_printf(s, "%strd_right_addr[%d]\t=\t0x%x\n", prefix,
+ i, fb->trd_right_addr[i]);
+ /* Default alpha mode is pre-multiply, so interesting values would
+ * not equal 0x1
+ */
+ if (fb->pre_multiply != 0x1)
+ seq_printf(s, "%spre_multiply\t=\t0x%x\n", prefix,
+ fb->pre_multiply);
+
+}
+
+static void sunxi_debugfs_print_layer_info(struct seq_file *s, int layer_num,
+ disp_layer_info *layer_info)
+{
+ int i;
+
+ for (i = 0; i < layer_num; i++) {
+ disp_layer_info *layer = &layer_info[i];
+
+ seq_printf(s, "\tlayer[%d] = {\n", i);
+ if (layer->mode)
+ seq_printf(s, "\t\tmode\t=\t0x%x\n", layer->mode);
+ seq_printf(s, "\t\tpipe\t=\t0x%x\n", layer->pipe);
+ seq_printf(s, "\t\tzorder\t=\t0x%x\n", layer->zorder);
+ if (layer->alpha_mode)
+ seq_printf(s, "\t\talpha_mode\t=\t0x%x\n",
+ layer->alpha_mode);
+ /* The default alpha is 0xff, so interesting values would be
+ * when it does not equal 0xff
+ */
+ if (layer->alpha_value != 0xff)
+ seq_printf(s, "\t\talpha_value\t=\t0x%x\n",
+ layer->alpha_value);
+ if (layer->ck_enable)
+ seq_printf(s, "\t\tck_enable\t=\t0x%x\n",
+ layer->ck_enable);
+ sunxi_debugfs_print_window(s, "\t\tscreen_win.",
+ &layer->screen_win);
+ sunxi_debugfs_print_fb_info(s, "\t\tfb.", &layer->fb);
+ if (layer->b_trd_out)
+ seq_printf(s, "\t\tb_trd_out\t=\t0x%x\n",
+ layer->b_trd_out);
+ if (layer->out_trd_mode)
+ seq_printf(s, "\t\tout_trd_mode\t=\t0x%x\n",
+ layer->out_trd_mode);
+ seq_printf(s, "\t\tid\t=\t%u }\n", layer->id);
+ }
+}
+
+static void sunxi_debugfs_print_config(struct seq_file *s, u32 post_id,
+ struct setup_dispc_data *config)
+{
+ int dpy;
+
+ seq_printf(s, "adf_sunxi post_id %u = {\n", post_id);
+ for (dpy = 0; dpy < 3; dpy++) {
+ seq_printf(s, "\tlayer_num[%d] = %u\n", dpy,
+ config->layer_num[dpy]);
+ sunxi_debugfs_print_layer_info(s,
+ config->layer_num[dpy],
+ &config->layer_info[dpy][0]);
+ }
+ seq_puts(s, "}\n");
+}
+
+static int sunxi_debugfs_show(struct seq_file *s, void *unused)
+{
+ /* FIXME: Should properly lock to reduce the risk of modification
+ * while printing?
+ */
+ int post;
+
+ for (post = 0; post < DEBUG_POST_DUMP_COUNT; post++) {
+ /* Start at current buffer position +1 (oldest post in the
+ * log)
+ */
+ int pos = (sunxi.last_config_pos + post + 1)
+ % DEBUG_POST_DUMP_COUNT;
+ sunxi_debugfs_print_config(s, sunxi.last_config_id[pos],
+ &sunxi.last_config[pos]);
+ }
+ return 0;
+}
+
+static int sunxi_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sunxi_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations adf_sunxi_debugfs_fops = {
+ .open = sunxi_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int sunxi_debugfs_val_show(struct seq_file *s, void *unused)
+{
+ int line;
+
+ for (line = 0; line < VALIDATE_LOG_LINES; line++) {
+ int pos = (sunxi.validate_log_position + line + 1)
+ % VALIDATE_LOG_LINES;
+ seq_puts(s, sunxi.validate_log[pos]);
+ }
+ return 0;
+}
+
+static int sunxi_debugfs_val_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sunxi_debugfs_val_show, inode->i_private);
+}
+
+static const struct file_operations adf_sunxi_debugfs_val_fops = {
+ .open = sunxi_debugfs_val_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int adf_init_lcd_interface(struct sunxi_interface *interface)
+{
+ int height, width;
+ int refresh = 60;
+ int err;
+
+ interface->connected = true;
+ interface->name = "LCD";
+ interface->adf_type = ADF_INTF_DSI;
+ err = adf_interface_init(&interface->interface, &sunxi.device,
+ interface->adf_type, interface->display_id,
+ ADF_INTF_FLAG_PRIMARY, &adf_sunxi_interface_ops,
+ interface->name);
+ if (err) {
+ dev_err(sunxi.dev, "%s: Failed to init adf interface %d (%d)\n",
+ __func__, interface->display_id, err);
+ goto err_out;
+ }
+ height = sunxi.disp_ops.get_screen_height(interface->display_id);
+ if (height < 0) {
+ dev_err(sunxi.dev, "%s: Failed to query display height (%d)\n",
+ __func__, height);
+ err = -EFAULT;
+ goto err_out;
+ }
+ width = sunxi.disp_ops.get_screen_width(interface->display_id);
+ if (width < 0) {
+ dev_err(sunxi.dev, "%s: Failed to query display width (%d)\n",
+ __func__, width);
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ interface->supported_modes = kzalloc(sizeof(*interface->supported_modes),
+ GFP_KERNEL);
+ if (!interface->supported_modes) {
+ dev_err(sunxi.dev, "%s: Failed to allocate mode struct\n",
+ __func__);
+ err = -ENOMEM;
+ goto err_out;
+ }
+ interface->num_supported_modes = 1;
+ setup_drm_mode(&interface->supported_modes[0], height, width, refresh);
+
+ err = adf_hotplug_notify_connected(&interface->interface,
+ interface->supported_modes, interface->num_supported_modes);
+ if (err) {
+ dev_err(sunxi.dev, "%s: Failed to notify connected (%d)\n",
+ __func__, err);
+ goto err_out;
+ }
+ /* We need to set initial mode */
+ err = adf_interface_set_mode(&interface->interface,
+ &interface->supported_modes[0]);
+ if (err) {
+ dev_err(sunxi.dev, "%s: Failed initial modeset (%d)\n",
+ __func__, err);
+ goto err_out;
+ }
+ err = sunxi.disp_ops.vsync_callback(NULL, sunxi_disp_vsync_callback);
+ if (err) {
+ dev_err(sunxi.dev, "%s: Failed to set vsync callback (%d)\n",
+ __func__, err);
+ goto err_out;
+ }
+ err = 0;
+err_out:
+ return err;
+}
+
+static int adf_init_hdmi_interface(struct sunxi_interface *interface)
+{
+ disp_hotplug_state hotplug_state;
+ int err;
+
+ interface->name = "HDMI";
+ interface->adf_type = ADF_INTF_HDMI;
+ hotplug_state = sunxi.disp_ops.hotplug_state(interface->display_id);
+
+ err = adf_interface_init(&interface->interface, &sunxi.device,
+ interface->adf_type, interface->display_id,
+ ADF_INTF_FLAG_EXTERNAL, &adf_sunxi_interface_ops,
+ interface->name);
+ if (err) {
+ dev_err(sunxi.dev, "%s: Failed to init adf interface %d (%d)\n",
+ __func__, interface->display_id, err);
+ goto err_out;
+ }
+
+ switch (hotplug_state) {
+ case DISP_HOTPLUG_CONNECT:
+ interface->connected = true;
+ break;
+ default:
+ dev_err(sunxi.dev, "%s: Error querying hotplug state for display id %d\n",
+ __func__, interface->display_id);
+ hotplug_state = DISP_HOTPLUG_DISCONNECT;
+ /* Fall-thru, act as if disconnected*/
+ case DISP_HOTPLUG_DISCONNECT:
+ interface->connected = false;
+ break;
+ }
+ /* Call the hotplug function to setup modes */
+ sunxi_disp_hotplug_callback(interface, hotplug_state);
+
+ err = 0;
+err_out:
+ return err;
+}
+
+
+static void adf_init_interface(struct sunxi_interface *interface, int id)
+{
+ BUG_ON(!interface);
+ memset(interface, 0, sizeof(*interface));
+ interface->disp_type = sunxi.disp_ops.get_output_type(id);
+ interface->display_id = id;
+ dev_dbg(sunxi.dev, "%s: interface %d\n", __func__, id);
+
+ switch (interface->disp_type) {
+ default:
+ dev_err(sunxi.dev, "%s: Unsupported interface type %d for display %d\n",
+ __func__, interface->disp_type, id);
+ interface->disp_type = DISP_OUTPUT_TYPE_NONE;
+ /* Fall-thru */
+ case DISP_OUTPUT_TYPE_NONE:
+ dev_dbg(sunxi.dev, "%s: Skipping interface %d - type %d\n",
+ __func__, id, interface->disp_type);
+ interface->connected = false;
+ return;
+ case DISP_OUTPUT_TYPE_LCD:
+ adf_init_lcd_interface(interface);
+ break;
+ case DISP_OUTPUT_TYPE_HDMI:
+ adf_init_hdmi_interface(interface);
+ break;
+ }
+}
+
+static int adf_sunxi_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ int dpy = 0;
+ int ovl = 0;
+
+ memset(&sunxi.last_config, 0, sizeof(sunxi.last_config));
+
+ atomic_set(&sunxi.postcount, 0);
+ atomic_set(&sunxi.callbackcount, 0);
+ init_waitqueue_head(&sunxi.post_wait_queue);
+
+ sunxi.dev = &pdev->dev;
+
+ err = adf_device_init(&sunxi.device, sunxi.dev,
+ &adf_sunxi_device_ops, "sunxi_device");
+ if (err) {
+ dev_err(sunxi.dev, "Failed to init ADF device (%d)\n",
+ err);
+ goto err_out;
+ }
+
+ err = disp_get_composer_ops(&sunxi.disp_ops);
+ if (err) {
+ dev_err(sunxi.dev, "Failed to get composer ops (%d)\n",
+ err);
+ goto err_free_overlays;
+ }
+ /* Set the retire callback */
+ err = sunxi.disp_ops.set_retire_callback(sunxi_retire_callback);
+ if (err) {
+ dev_err(sunxi.dev, "Failed to set retire callback (%d)\n",
+ err);
+ goto err_free_overlays;
+ }
+ /* The HDMI must be enabled to receive hotplug events, which in turn
+ * must already must have a valid mode set
+ */
+ err = sunxi.disp_ops.hdmi_set_mode(1, DISP_TV_MOD_720P_60HZ);
+ if (err) {
+ dev_warn(sunxi.dev, "Failed to enable initial hdmi mode on dpy 1 (%d)\n",
+ err);
+ /* Not fatal */
+ }
+ dev_dbg(sunxi.dev, "%s: %d hdmi_enable\n", __func__, __LINE__);
+ err = sunxi.disp_ops.hdmi_enable(1);
+ if (err) {
+ dev_warn(sunxi.dev, "Failed to enable hdmi on dpy 1 (%d)\n",
+ err);
+ /* Not fatal */
+ }
+
+ for (dpy = 0; dpy < MAX_DISPLAYS; dpy++)
+ adf_init_interface(&sunxi.interfaces[dpy], dpy);
+
+ for (dpy = 0; dpy < MAX_DISPLAYS; dpy++) {
+ if (sunxi.interfaces[dpy].disp_type == DISP_OUTPUT_TYPE_NONE)
+ continue;
+ for (ovl = 0; ovl < NUM_OVERLAYS; ovl++) {
+ err = adf_overlay_engine_init(
+ &sunxi.overlays[dpy][ovl].overlay,
+ &sunxi.device, &adf_sunxi_overlay_ops,
+ "sunxi_overlay_%d-%d", dpy, ovl);
+ if (err) {
+ dev_err(sunxi.dev, "Failed to init overlay %d-%d (%d)\n",
+ dpy, ovl, err);
+ goto err_free_overlays;
+ }
+ err = adf_attachment_allow(&sunxi.device,
+ &sunxi.overlays[dpy][ovl].overlay,
+ &sunxi.interfaces[dpy].interface);
+
+ if (err) {
+ dev_err(sunxi.dev, "Failed to attach overlay %d-%d (%d)\n",
+ dpy, ovl, err);
+ goto err_free_overlays;
+ }
+ }
+ }
+
+
+ sunxi.ion_heap_id = ION_HEAP_TYPE_CARVEOUT;
+
+ sunxi.ion_client = ion_client_create(idev, "adf_sunxi");
+
+ if (IS_ERR(sunxi.ion_client)) {
+ err = PTR_ERR(sunxi.ion_client);
+ dev_err(sunxi.dev, "Failed to create ion client (%d)\n",
+ err);
+ goto err_free_overlays;
+ }
+
+#ifdef SUPPORT_ADF_SUNXI_FBDEV
+ err = adf_fbdev_init(&sunxi.fbdev,
+ &sunxi.interfaces[DISPLAY_INTERNAL].interface,
+ &sunxi.overlays[DISPLAY_INTERNAL].overlay,
+ sunxi.interfaces[DISPLAY_INTERNAL].width,
+ sunxi.interfaces[DISPLAY_INTERNAL].height,
+ DRM_FORMAT_BGRA8888,
+ &adf_sunxi_fb_ops,
+ "adf_sunxi_fb");
+ if (err) {
+ dev_err(sunxi.dev, "Failed to init ADF fbdev (%d)\n", err);
+ goto err_free_ion_client;
+ }
+#endif
+
+ sunxi.debugfs_config_file = debugfs_create_file("adf_debug", S_IRUGO,
+ NULL, NULL, &adf_sunxi_debugfs_fops);
+
+ sunxi.debugfs_val_log = debugfs_create_file("adf_val_log", S_IRUGO,
+ NULL, NULL, &adf_sunxi_debugfs_val_fops);
+ dev_err(sunxi.dev, "Successfully loaded adf_sunxi\n");
+
+ return 0;
+#ifdef SUPPORT_ADF_SUNXI_FBDEV
+err_free_ion_client:
+#endif
+ ion_client_destroy(sunxi.ion_client);
+err_free_overlays:
+ for (; dpy > 0; dpy--) {
+ if (sunxi.interfaces[dpy-1].disp_type == DISP_OUTPUT_TYPE_NONE)
+ continue;
+ for (; ovl > 0; ovl--) {
+ adf_overlay_engine_destroy(
+ &sunxi.overlays[dpy-1][ovl-1].overlay);
+ }
+ }
+ dpy = MAX_DISPLAYS;
+ for (; dpy > 0; dpy--) {
+ if (sunxi.interfaces[dpy-1].disp_type == DISP_OUTPUT_TYPE_NONE)
+ continue;
+ if (sunxi.interfaces[dpy-1].disp_type == DISP_OUTPUT_TYPE_HDMI)
+ adf_sunxi_set_hotplug_state(&sunxi.interfaces[dpy],
+ false);
+ adf_interface_destroy(&sunxi.interfaces[dpy-1].interface);
+ }
+ adf_device_destroy(&sunxi.device);
+err_out:
+ debugfs_remove(sunxi.debugfs_config_file);
+ sunxi.debugfs_config_file = NULL;
+ debugfs_remove(sunxi.debugfs_val_log);
+ sunxi.debugfs_val_log = NULL;
+ return err;
+}
+
+static int adf_sunxi_remove(struct platform_device *pdev)
+{
+ int dpy;
+ int ovl;
+#ifdef SUPPORT_ADF_SUNXI_FBDEV
+ adf_fbdev_destroy(&sunxi.fbdev);
+#endif
+ debugfs_remove(sunxi.debugfs_config_file);
+ sunxi.debugfs_config_file = NULL;
+ debugfs_remove(sunxi.debugfs_val_log);
+ sunxi.debugfs_val_log = NULL;
+ ion_client_destroy(sunxi.ion_client);
+ for (dpy = 0; dpy < MAX_DISPLAYS; dpy++) {
+ if (sunxi.interfaces[dpy].disp_type == DISP_OUTPUT_TYPE_NONE)
+ continue;
+ for (ovl = 0; ovl < NUM_OVERLAYS; ovl++)
+ adf_overlay_engine_destroy(
+ &sunxi.overlays[dpy][ovl].overlay);
+ }
+ for (dpy = 0; dpy < MAX_DISPLAYS; dpy++) {
+ if (sunxi.interfaces[dpy].disp_type == DISP_OUTPUT_TYPE_NONE)
+ continue;
+ if (sunxi.interfaces[dpy].disp_type == DISP_OUTPUT_TYPE_HDMI)
+ adf_sunxi_set_hotplug_state(&sunxi.interfaces[dpy],
+ false);
+ adf_interface_destroy(&sunxi.interfaces[dpy].interface);
+ }
+ adf_device_destroy(&sunxi.device);
+ return 0;
+}
+
+static void adf_sunxi_device_release(struct device *dev)
+{
+ /* NOOP */
+}
+
+static int adf_sunxi_device_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ return 0;
+}
+static int adf_sunxi_device_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_device adf_sunxi_platform_device = {
+ .name = "adf_sunxi",
+ .id = -1,
+ .dev.release = adf_sunxi_device_release,
+};
+
+struct platform_driver adf_sunxi_platform_driver = {
+ .driver.name = "adf_sunxi",
+ .probe = adf_sunxi_probe,
+ .remove = adf_sunxi_remove,
+ .suspend = adf_sunxi_device_suspend,
+ .resume = adf_sunxi_device_resume,
+};
+
+static int __init adf_sunxi_init(void)
+{
+ platform_device_register(&adf_sunxi_platform_device);
+ platform_driver_register(&adf_sunxi_platform_driver);
+ return 0;
+}
+
+static void __exit adf_sunxi_exit(void)
+{
+ platform_device_unregister(&adf_sunxi_platform_device);
+ platform_driver_unregister(&adf_sunxi_platform_driver);
+}
+
+module_init(adf_sunxi_init);
+module_exit(adf_sunxi_exit);
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File adf_sunxi.h
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _ADF_SUNXI_
+#define _ADF_SUNXI_
+
+extern struct ion_device *idev;
+
+#include <video/drv_display.h>
+#define DISPLAY_INTERNAL 0
+#define DISPLAY_HDMI 1
+#define DISPLAY_EDP 2
+
+struct setup_dispc_data {
+ int layer_num[3];
+ disp_layer_info layer_info[3][4];
+ void *hConfigData;
+};
+
+struct disp_composer_ops {
+ int (*get_screen_width)(u32 screen_id);
+ int (*get_screen_height)(u32 screen_id);
+ int (*get_output_type)(u32 screen_id);
+ int (*hdmi_enable)(u32 screen_id);
+ int (*hdmi_disable)(u32 screen_id);
+ int (*hdmi_set_mode)(u32 screen_id, disp_tv_mode mode);
+ int (*hdmi_get_mode)(u32 screen_id);
+ int (*hdmi_check_support_mode)(u32 screen_id, u8 mode);
+ int (*is_support_scaler_layer)(unsigned int screen_id,
+ unsigned int src_w, unsigned int src_h, unsigned int out_w,
+ unsigned int out_h);
+ int (*dispc_gralloc_queue)(struct setup_dispc_data *psDispcData);
+ int (*set_retire_callback)(void (*retire_fn)(void));
+ int (*vsync_enable)(u32 screen_id, bool enable);
+ int (*vsync_callback)(void *user_data, void (*cb_fn)(void *user_data,
+ u32 screen_id));
+ int (*hotplug_enable)(u32 screen_id, bool enable);
+ int (*hotplug_callback)(u32 screen_id, void *user_data,
+ hdmi_hotplug_callback_function cb_fn);
+ int (*hotplug_state)(u32 screen_id);
+
+};
+extern int disp_get_composer_ops(struct disp_composer_ops *ops);
+
+#endif
--- /dev/null
+config POWERVR_APOLLO
+ tristate "PowerVR Apollo test chip support"
+ depends on X86
+ default n
+
+config POWERVR_ADF_PDP
+ tristate "ADF driver for Apollo PDP"
+ default y
+ depends on ADF
+ depends on POWERVR_APOLLO
--- /dev/null
+obj-$(CONFIG_POWERVR_APOLLO) += apollo.o
+apollo-y += apollo_drv.o ion_lma_heap.o pci_support.o
+ccflags-y += -include drivers/staging/imgtec/config_kernel.h \
+ -I$(src) \
+ -Idrivers/staging/imgtec \
+ -Idrivers/staging/imgtec/rogue \
+ -Idrivers/staging/imgtec/rogue/hwdefs \
+ -Idrivers/staging/imgtec/rogue/hwdefs/km
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File adf_pdp.c
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*
+ * This is an example ADF display driver for the testchip's PDP output
+ */
+
+/* #define SUPPORT_ADF_PDP_FBDEV */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include <drm/drm_fourcc.h>
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+
+#ifdef SUPPORT_ADF_PDP_FBDEV
+#include <video/adf_fbdev.h>
+#endif
+
+#include PVR_ANDROID_ION_HEADER
+
+/* for sync_fence_put */
+#include PVR_ANDROID_SYNC_HEADER
+
+#include "apollo_drv.h"
+#include "adf_common.h"
+#include "debugfs_dma_buf.h"
+
+#include "pdp_regs.h"
+#include "tcf_rgbpdp_regs.h"
+#include "tcf_pll.h"
+
+#include "pvrmodule.h"
+
+#define DRV_NAME APOLLO_DEVICE_NAME_PDP
+
+#ifndef ADF_PDP_WIDTH
+#define ADF_PDP_WIDTH 1280
+#endif
+
+#ifndef ADF_PDP_HEIGHT
+#define ADF_PDP_HEIGHT 720
+#endif
+
+MODULE_DESCRIPTION("APOLLO PDP display driver");
+
+static int pdp_display_width = ADF_PDP_WIDTH;
+static int pdp_display_height = ADF_PDP_HEIGHT;
+module_param(pdp_display_width, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(pdp_display_width, "PDP display width");
+module_param(pdp_display_height, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(pdp_display_height, "PDP display height");
+
+static DEFINE_SPINLOCK(gFlipLock);
+
+struct pdp_timing_data {
+ u32 h_display;
+ u32 h_back_porch;
+ u32 h_total;
+ u32 h_active_start;
+ u32 h_left_border;
+ u32 h_right_border;
+ u32 h_front_porch;
+
+ u32 v_display;
+ u32 v_back_porch;
+ u32 v_total;
+ u32 v_active_start;
+ u32 v_top_border;
+ u32 v_bottom_border;
+ u32 v_front_porch;
+ u32 v_refresh;
+ u32 clock_freq;
+};
+
+static const struct pdp_timing_data pdp_supported_modes[] = {
+ {
+ .h_display = 640,
+ .h_back_porch = 64,
+ .h_total = 800,
+ .h_active_start = 144,
+ .h_left_border = 144,
+ .h_right_border = 784,
+ .h_front_porch = 784,
+
+ .v_display = 480,
+ .v_back_porch = 7,
+ .v_total = 497,
+ .v_active_start = 16,
+ .v_top_border = 16,
+ .v_bottom_border = 496,
+ .v_front_porch = 496,
+
+ .v_refresh = 60,
+ .clock_freq = 23856000,
+ },
+ {
+ .h_display = 800,
+ .h_back_porch = 80,
+ .h_total = 1024,
+ .h_active_start = 192,
+ .h_left_border = 192,
+ .h_right_border = 992,
+ .h_front_porch = 992,
+
+ .v_display = 600,
+ .v_back_porch = 7,
+ .v_total = 621,
+ .v_active_start = 20,
+ .v_top_border = 20,
+ .v_bottom_border = 620,
+ .v_front_porch = 620,
+
+ .v_refresh = 60,
+ .clock_freq = 38154000,
+ },
+ {
+ .h_display = 1024,
+ .h_back_porch = 104,
+ .h_total = 1344,
+ .h_active_start = 264,
+ .h_left_border = 264,
+ .h_right_border = 1288,
+ .h_front_porch = 1288,
+
+ .v_display = 768,
+ .v_back_porch = 7,
+ .v_total = 795,
+ .v_active_start = 26,
+ .v_top_border = 26,
+ .v_bottom_border = 794,
+ .v_front_porch = 794,
+
+ .v_refresh = 59,
+ .clock_freq = 64108000,
+ },
+ {
+ .h_display = 1280,
+ .h_back_porch = 136,
+ .h_total = 1664,
+ .h_active_start = 328,
+ .h_left_border = 328,
+ .h_right_border = 1608,
+ .h_front_porch = 1608,
+
+ .v_display = 720,
+ .v_back_porch = 7,
+ .v_total = 745,
+ .v_active_start = 24,
+ .v_top_border = 24,
+ .v_bottom_border = 744,
+ .v_front_porch = 744,
+
+ .v_refresh = 59,
+ .clock_freq = 74380000,
+ },
+ {
+ .h_display = 1280,
+ .h_back_porch = 136,
+ .h_total = 1680,
+ .h_active_start = 336,
+ .h_left_border = 336,
+ .h_right_border = 1616,
+ .h_front_porch = 1616,
+
+ .v_display = 768,
+ .v_back_porch = 7,
+ .v_total = 795,
+ .v_active_start = 26,
+ .v_top_border = 26,
+ .v_bottom_border = 794,
+ .v_front_porch = 794,
+
+ .v_refresh = 59,
+ .clock_freq = 80136000,
+ },
+ {
+ .h_display = 1280,
+ .h_back_porch = 136,
+ .h_total = 1680,
+ .h_active_start = 336,
+ .h_left_border = 336,
+ .h_right_border = 1616,
+ .h_front_porch = 1616,
+
+ .v_display = 800,
+ .v_back_porch = 7,
+ .v_total = 828,
+ .v_active_start = 27,
+ .v_top_border = 27,
+ .v_bottom_border = 827,
+ .v_front_porch = 827,
+
+ .v_refresh = 59,
+ .clock_freq = 83462000,
+ },
+ {
+ .h_display = 1280,
+ .h_back_porch = 136,
+ .h_total = 1712,
+ .h_active_start = 352,
+ .h_left_border = 352,
+ .h_right_border = 1632,
+ .h_front_porch = 1632,
+
+ .v_display = 1024,
+ .v_back_porch = 7,
+ .v_total = 1059,
+ .v_active_start = 34,
+ .v_top_border = 34,
+ .v_bottom_border = 1058,
+ .v_front_porch = 1058,
+
+ .v_refresh = 60,
+ .clock_freq = 108780000,
+ },
+ {}
+};
+
+
+struct adf_pdp_device {
+ struct ion_client *ion_client;
+
+ struct adf_device adf_device;
+ struct adf_interface adf_interface;
+ struct adf_overlay_engine adf_overlay;
+#ifdef SUPPORT_ADF_PDP_FBDEV
+ struct adf_fbdev adf_fbdev;
+#endif
+
+ struct platform_device *pdev;
+
+ struct apollo_pdp_platform_data *pdata;
+
+ void __iomem *regs;
+ resource_size_t regs_size;
+
+ void __iomem *pll_regs;
+ resource_size_t pll_regs_size;
+
+ struct drm_mode_modeinfo *supported_modes;
+ int num_supported_modes;
+
+ const struct pdp_timing_data *current_timings;
+
+ atomic_t refcount;
+
+ atomic_t num_validates;
+ int num_posts;
+
+ atomic_t vsync_triggered;
+ wait_queue_head_t vsync_wait_queue;
+ atomic_t requested_vsync_state;
+ atomic_t vsync_state;
+
+ /* This is set when the last client has released this device, causing
+ * all outstanding posts to be ignored
+ */
+ atomic_t released;
+
+ struct {
+ u32 str1surf;
+ u32 str1posn;
+ u32 str1addrctrl;
+ } flip_registers;
+};
+
+static const u32 pdp_supported_formats[] = {
+ DRM_FORMAT_BGRA8888,
+};
+#define NUM_SUPPORTED_FORMATS 1
+
+static const struct {
+ u32 drm_format;
+ u32 bytes_per_pixel;
+ u32 pixfmt_word;
+} pdp_format_table[] = {
+ { DRM_FORMAT_BGRA8888, 4, DCPDP_STR1SURF_FORMAT_ARGB8888 },
+ {},
+};
+
+static int pdp_mode_count(struct adf_pdp_device *pdp)
+{
+ int i = 0;
+
+ while (pdp_supported_modes[i].h_display)
+ i++;
+ return i;
+}
+
+static int pdp_mode_id(struct adf_pdp_device *pdp, u32 height, u32 width)
+{
+ int i;
+
+ for (i = 0; pdp_supported_modes[i].h_display; i++) {
+ const struct pdp_timing_data *tdata = &pdp_supported_modes[i];
+
+ if (tdata->h_display == width && tdata->v_display == height)
+ return i;
+ }
+ dev_err(&pdp->pdev->dev, "Failed to find matching mode for %dx%d\n",
+ width, height);
+ return -1;
+}
+
+static const struct pdp_timing_data *pdp_timing_data(
+ struct adf_pdp_device *pdp, int mode_id)
+{
+ if (mode_id >= pdp_mode_count(pdp) || mode_id < 0)
+ return NULL;
+ return &pdp_supported_modes[mode_id];
+}
+
+static void pdp_mode_to_drm_mode(struct adf_pdp_device *pdp, int mode_id,
+ struct drm_mode_modeinfo *drm_mode)
+{
+ const struct pdp_timing_data *pdp_mode = pdp_timing_data(pdp, mode_id);
+
+ BUG_ON(pdp_mode == NULL);
+ memset(drm_mode, 0, sizeof(*drm_mode));
+
+ drm_mode->hdisplay = pdp_mode->h_display;
+ drm_mode->vdisplay = pdp_mode->v_display;
+ drm_mode->vrefresh = pdp_mode->v_refresh;
+
+ adf_modeinfo_set_name(drm_mode);
+}
+
+static u32 pdp_read_reg(struct adf_pdp_device *pdp, resource_size_t reg_offset)
+{
+ BUG_ON(reg_offset > pdp->regs_size-4);
+ return ioread32(pdp->regs + reg_offset);
+}
+
+static void pdp_write_reg(struct adf_pdp_device *pdp,
+ resource_size_t reg_offset, u32 reg_value)
+{
+ BUG_ON(reg_offset > pdp->regs_size-4);
+ iowrite32(reg_value, pdp->regs + reg_offset);
+}
+
+static void pll_write_reg(struct adf_pdp_device *pdp,
+ resource_size_t reg_offset, u32 reg_value)
+{
+ BUG_ON(reg_offset < TCF_PLL_PLL_PDP_CLK0);
+ BUG_ON(reg_offset > pdp->pll_regs_size + TCF_PLL_PLL_PDP_CLK0 - 4);
+ iowrite32(reg_value, pdp->pll_regs +
+ reg_offset - TCF_PLL_PLL_PDP_CLK0);
+}
+
+static void pdp_devres_release(struct device *dev, void *res)
+{
+ /* No extra cleanup needed */
+}
+
+static u32 pdp_format_bpp(u32 drm_format)
+{
+ int i;
+
+ for (i = 0; pdp_format_table[i].drm_format != 0; i++) {
+ if (pdp_format_table[i].drm_format == drm_format)
+ return pdp_format_table[i].bytes_per_pixel;
+ }
+ WARN(1, "Unsupported drm format");
+ return 0;
+}
+
+static u32 pdp_format(u32 drm_format)
+{
+ int i;
+
+ for (i = 0; pdp_format_table[i].drm_format != 0; i++) {
+ if (pdp_format_table[i].drm_format == drm_format)
+ return pdp_format_table[i].pixfmt_word;
+ }
+ WARN(1, "Unsupported drm format");
+ return 0;
+}
+
+static void pdp_enable_scanout(struct adf_pdp_device *pdp)
+{
+ u32 reg_value;
+ /* Turn on scanout */
+ reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL);
+ reg_value &= ~(STR1STREN_MASK);
+ reg_value |= 0x1 << STR1STREN_SHIFT;
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, reg_value);
+}
+
+static void pdp_disable_scanout(struct adf_pdp_device *pdp)
+{
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, 0);
+}
+
+static bool pdp_vsync_triggered(struct adf_pdp_device *pdp)
+{
+ return atomic_read(&pdp->vsync_triggered) == 1;
+}
+
+static void pdp_enable_vsync(struct adf_pdp_device *pdp)
+{
+ u32 reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB);
+
+ reg_value |= (0x1 << INTEN_VBLNK1_SHIFT);
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB, reg_value);
+}
+
+static void pdp_disable_vsync(struct adf_pdp_device *pdp)
+{
+ u32 reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB);
+
+ reg_value &= ~(0x1 << INTEN_VBLNK1_SHIFT);
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB, reg_value);
+}
+
+static void pdp_enable_interrupt(struct adf_pdp_device *pdp)
+{
+ int err = 0;
+ err = apollo_enable_interrupt(pdp->pdev->dev.parent,
+ APOLLO_INTERRUPT_PDP);
+ if (err) {
+ dev_err(&pdp->pdev->dev,
+ "apollo_enable_interrupt failed (%d)\n", err);
+ }
+}
+
+static void pdp_disable_interrupt(struct adf_pdp_device *pdp)
+{
+ int err = 0;
+ err = apollo_disable_interrupt(pdp->pdev->dev.parent,
+ APOLLO_INTERRUPT_PDP);
+ if (err) {
+ dev_err(&pdp->pdev->dev,
+ "apollo_disable_interrupt failed (%d)\n", err);
+ }
+}
+
+static void pdp_post(struct adf_device *adf_dev, struct adf_post *cfg,
+ void *driver_state)
+{
+ int num_validates_snapshot = *(int *)driver_state;
+ dma_addr_t buf_addr;
+ u32 reg_value = 0;
+ unsigned long flags;
+
+ /* Set vsync wait timeout to 4x expected vsync */
+ struct adf_pdp_device *pdp = devres_find(adf_dev->dev,
+ pdp_devres_release, NULL, NULL);
+ long timeout =
+ msecs_to_jiffies((1000 / pdp->current_timings->v_refresh) * 4);
+
+ /* Null-flip handling, used to push buffers off screen during an error
+ * state to stop them blocking subsequent rendering
+ */
+ if (cfg->n_bufs == 0 || atomic_read(&pdp->released) == 1)
+ goto out_update_num_posts;
+
+ WARN_ON(cfg->n_bufs != 1);
+ WARN_ON(cfg->mappings->sg_tables[0]->nents != 1);
+
+ spin_lock_irqsave(&gFlipLock, flags);
+
+ buf_addr = sg_phys(cfg->mappings->sg_tables[0]->sgl);
+ /* Convert the cpu address to a device address */
+ buf_addr -= pdp->pdata->memory_base;
+
+ debugfs_dma_buf_set(cfg->bufs[0].dma_bufs[0]);
+
+ /* Set surface register w/height, width & format */
+ reg_value = (cfg->bufs[0].w-1) << STR1WIDTH_SHIFT;
+ reg_value |= (cfg->bufs[0].h-1) << STR1HEIGHT_SHIFT;
+ reg_value |= pdp_format(cfg->bufs[0].format) << STR1PIXFMT_SHIFT;
+ pdp->flip_registers.str1surf = reg_value;
+
+ /* Set stride register */
+ reg_value = (cfg->bufs[0].pitch[0] >> DCPDP_STR1POSN_STRIDE_SHIFT)-1;
+ pdp->flip_registers.str1posn = reg_value;
+
+ /* Set surface address without resetting any other bits in the
+ * register
+ */
+ reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL);
+ reg_value &= ~(STR1BASE_MASK);
+ reg_value |= (buf_addr >> DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT)
+ & STR1BASE_MASK;
+
+ pdp->flip_registers.str1addrctrl = reg_value;
+ atomic_set(&pdp->vsync_triggered, 0);
+
+ spin_unlock_irqrestore(&gFlipLock, flags);
+
+ /* Wait until the buffer is on-screen, so we know the previous buffer
+ * has been retired and off-screen.
+ *
+ * If vsync was already off when this post was serviced, we need to
+ * enable the vsync again briefly so the register updates we shadowed
+ * above get applied and we don't signal the fence prematurely. One
+ * vsync afterwards, we'll disable the vsync again.
+ */
+ if (!atomic_xchg(&pdp->vsync_state, 1))
+ pdp_enable_vsync(pdp);
+
+ if (wait_event_timeout(pdp->vsync_wait_queue,
+ pdp_vsync_triggered(pdp), timeout) == 0) {
+ dev_err(&pdp->pdev->dev, "Post VSync wait timeout");
+ /* Undefined behaviour if this times out */
+ }
+out_update_num_posts:
+ pdp->num_posts = num_validates_snapshot;
+}
+
+static bool pdp_supports_event(struct adf_obj *obj, enum adf_event_type type)
+{
+ switch (obj->type) {
+ case ADF_OBJ_INTERFACE:
+ {
+ switch (type) {
+ case ADF_EVENT_VSYNC:
+ return true;
+ default:
+ return false;
+ }
+ }
+ default:
+ return false;
+ }
+}
+
+static void pdp_irq_handler(void *data)
+{
+ struct adf_pdp_device *pdp = data;
+ unsigned long flags;
+ u32 int_status = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT);
+
+ if (int_status & INTS_VBLNK1_MASK)
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR,
+ (0x1 << INTCLR_VBLNK1_SHIFT));
+
+ spin_lock_irqsave(&gFlipLock, flags);
+
+ /* If we're idle, and a vsync disable was requested, do it now.
+ * This code assumes that the HWC will always re-enable vsync
+ * explicitly before posting new configurations.
+ */
+ if (atomic_read(&pdp->num_validates) == pdp->num_posts) {
+ if (!atomic_read(&pdp->requested_vsync_state)) {
+ pdp_disable_vsync(pdp);
+ atomic_set(&pdp->vsync_state, 0);
+ }
+ }
+
+ if (int_status & INTS_VBLNK1_MASK) {
+ /* Write the registers for the next buffer to display */
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF,
+ pdp->flip_registers.str1surf);
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_PDP_STR1POSN,
+ pdp->flip_registers.str1posn);
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL,
+ pdp->flip_registers.str1addrctrl);
+ pdp_enable_scanout(pdp);
+
+ adf_vsync_notify(&pdp->adf_interface, ktime_get());
+ atomic_set(&pdp->vsync_triggered, 1);
+ wake_up(&pdp->vsync_wait_queue);
+ }
+
+ spin_unlock_irqrestore(&gFlipLock, flags);
+}
+
+static void pdp_set_event(struct adf_obj *obj, enum adf_event_type type,
+ bool enabled)
+{
+ struct adf_pdp_device *pdp;
+ bool old;
+
+ switch (type) {
+ case ADF_EVENT_VSYNC:
+ {
+ pdp = devres_find(obj->parent->dev, pdp_devres_release,
+ NULL, NULL);
+ atomic_set(&pdp->requested_vsync_state, enabled);
+ if (enabled) {
+ old = atomic_xchg(&pdp->vsync_state, enabled);
+ if (!old)
+ pdp_enable_vsync(pdp);
+ }
+ break;
+ }
+ default:
+ BUG();
+ }
+}
+
+static void pdp_set_clocks(struct adf_pdp_device *pdp, u32 clock_freq_hz)
+{
+ u32 clock_freq_mhz = (clock_freq_hz + 500000) / 1000000;
+
+ pll_write_reg(pdp, TCF_PLL_PLL_PDP_CLK0, clock_freq_mhz);
+ if (clock_freq_mhz >= 50)
+ pll_write_reg(pdp, TCF_PLL_PLL_PDP_CLK1TO5, 0);
+ else
+ pll_write_reg(pdp, TCF_PLL_PLL_PDP_CLK1TO5, 0x3);
+
+ pll_write_reg(pdp, TCF_PLL_PLL_PDP_DRP_GO, 1);
+ udelay(1000);
+ pll_write_reg(pdp, TCF_PLL_PLL_PDP_DRP_GO, 0);
+}
+
+static int pdp_modeset(struct adf_interface *intf,
+ struct drm_mode_modeinfo *mode)
+{
+ u32 reg_value = 0;
+ int err = 0;
+ struct adf_pdp_device *pdp = devres_find(intf->base.parent->dev,
+ pdp_devres_release, NULL, NULL);
+ int mode_id = pdp_mode_id(pdp, mode->vdisplay, mode->hdisplay);
+ const struct pdp_timing_data *tdata = pdp_timing_data(pdp, mode_id);
+
+
+ if (!tdata) {
+ dev_err(&pdp->pdev->dev, "Failed to find mode for %ux%u\n",
+ mode->hdisplay, mode->vdisplay);
+ err = -ENXIO;
+ goto err_out;
+ }
+ /* Disable scanout */
+ pdp_disable_scanout(pdp);
+ /* Disable sync gen */
+ reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL);
+ reg_value &= ~(SYNCACTIVE_MASK);
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, reg_value);
+
+ pdp_set_clocks(pdp, tdata->clock_freq);
+
+ if (pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL)
+ != 0x0000C010) {
+ /* Buffer request threshold */
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL,
+ 0x00001C10);
+ }
+
+ /* Border colour */
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL, 0x00005544);
+
+ /* Update control */
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL, 0);
+
+ /* Set hsync */
+ reg_value = tdata->h_back_porch << HBPS_SHIFT;
+ reg_value |= tdata->h_total << HT_SHIFT;
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1, reg_value);
+
+ reg_value = tdata->h_active_start << HAS_SHIFT;
+ reg_value |= tdata->h_left_border << HLBS_SHIFT;
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2, reg_value);
+
+ reg_value = tdata->h_front_porch << HFPS_SHIFT;
+ reg_value |= tdata->h_right_border << HRBS_SHIFT;
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3, reg_value);
+
+ /* Set vsync */
+ reg_value = tdata->v_back_porch << VBPS_SHIFT;
+ reg_value |= tdata->v_total << VT_SHIFT;
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1, reg_value);
+
+ reg_value = tdata->v_active_start << VAS_SHIFT;
+ reg_value |= tdata->v_top_border << VTBS_SHIFT;
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2, reg_value);
+
+ reg_value = tdata->v_front_porch << VFPS_SHIFT;
+ reg_value |= tdata->v_bottom_border << VBBS_SHIFT;
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3, reg_value);
+
+ /* Horizontal data enable */
+ reg_value = tdata->h_active_start << HDES_SHIFT;
+ reg_value |= tdata->h_front_porch << HDEF_SHIFT;
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL, reg_value);
+
+ /* Vertical data enable */
+ reg_value = tdata->v_active_start << VDES_SHIFT;
+ reg_value |= tdata->v_front_porch << VDEF_SHIFT;
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL, reg_value);
+
+ /* Vertical event start and vertical fetch start */
+ reg_value = tdata->v_back_porch << VFETCH_SHIFT;
+ reg_value |= tdata->v_front_porch << VEVENT_SHIFT;
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT, reg_value);
+
+ /* Enable sync gen last and set up polarities of sync/blank */
+ reg_value = 0x1 << SYNCACTIVE_SHIFT;
+ reg_value |= 0x1 << FIELDPOL_SHIFT;
+ reg_value |= 0x1 << BLNKPOL_SHIFT;
+ reg_value |= 0x1 << VSPOL_SHIFT;
+ reg_value |= 0x1 << HSPOL_SHIFT;
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, reg_value);
+
+ intf->current_mode = *mode;
+ pdp->current_timings = tdata;
+
+err_out:
+ return err;
+}
+
+static int pdp_blank(struct adf_interface *intf,
+ u8 state)
+{
+ u32 reg_value;
+ struct adf_pdp_device *pdp = devres_find(intf->base.parent->dev,
+ pdp_devres_release, NULL, NULL);
+
+ if (state != DRM_MODE_DPMS_OFF &&
+ state != DRM_MODE_DPMS_ON)
+ return -EINVAL;
+
+ reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL);
+ switch (state) {
+ case DRM_MODE_DPMS_OFF:
+ reg_value |= 0x1 << POWERDN_SHIFT;
+ break;
+ case DRM_MODE_DPMS_ON:
+ reg_value &= ~(POWERDN_MASK);
+ break;
+ }
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, reg_value);
+
+ return 0;
+}
+
+static int pdp_alloc_simple_buffer(struct adf_interface *intf, u16 w, u16 h,
+ u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
+{
+ struct adf_pdp_device *pdp = devres_find(intf->base.parent->dev,
+ pdp_devres_release, NULL, NULL);
+ int err = 0;
+ u32 size = w * h * pdp_format_bpp(format);
+ struct ion_handle *hdl = ion_alloc(pdp->ion_client, size, 0,
+ (1 << pdp->pdata->ion_heap_id), 0);
+ if (IS_ERR(hdl)) {
+ err = PTR_ERR(hdl);
+ dev_err(&pdp->pdev->dev, "ion_alloc failed (%d)\n", err);
+ goto err_out;
+ }
+ *dma_buf = ion_share_dma_buf(pdp->ion_client, hdl);
+ if (IS_ERR(*dma_buf)) {
+ err = PTR_ERR(hdl);
+ dev_err(&pdp->pdev->dev,
+ "ion_share_dma_buf failed (%d)\n", err);
+ goto err_free_buffer;
+ }
+ *pitch = w * pdp_format_bpp(format);
+ *offset = 0;
+err_free_buffer:
+ ion_free(pdp->ion_client, hdl);
+err_out:
+ return err;
+}
+
+static int pdp_describe_simple_post(struct adf_interface *intf,
+ struct adf_buffer *fb, void *data, size_t *size)
+{
+ struct adf_post_ext *post_ext = data;
+ static int post_id;
+
+ struct drm_clip_rect full_screen = {
+ .x2 = ADF_PDP_WIDTH,
+ .y2 = ADF_PDP_HEIGHT,
+ };
+
+ /* NOTE: an upstream ADF bug means we can't test *size instead */
+ BUG_ON(sizeof(struct adf_post_ext) +
+ 1 * sizeof(struct adf_buffer_config_ext)
+ > ADF_MAX_CUSTOM_DATA_SIZE);
+
+ *size = sizeof(struct adf_post_ext) +
+ 1 * sizeof(struct adf_buffer_config_ext);
+
+ post_ext->post_id = ++post_id;
+
+ post_ext->bufs_ext[0].crop = full_screen;
+ post_ext->bufs_ext[0].display = full_screen;
+ post_ext->bufs_ext[0].transform = ADF_BUFFER_TRANSFORM_NONE_EXT;
+ post_ext->bufs_ext[0].blend_type = ADF_BUFFER_BLENDING_PREMULT_EXT;
+ post_ext->bufs_ext[0].plane_alpha = 0xff;
+
+ return 0;
+}
+
+static int
+adf_pdp_open(struct adf_obj *obj, struct inode *inode, struct file *file)
+{
+ struct adf_device *dev =
+ (struct adf_device *)obj->parent;
+ struct adf_pdp_device *pdp = devres_find(dev->dev,
+ pdp_devres_release, NULL, NULL);
+ atomic_inc(&pdp->refcount);
+ atomic_set(&pdp->released, 0);
+ return 0;
+}
+
+static void
+adf_pdp_release(struct adf_obj *obj, struct inode *inode, struct file *file)
+{
+ struct adf_device *dev =
+ (struct adf_device *)obj->parent;
+ struct adf_pdp_device *pdp = devres_find(dev->dev,
+ pdp_devres_release, NULL, NULL);
+ struct sync_fence *release_fence;
+
+ if (atomic_dec_return(&pdp->refcount))
+ return;
+
+ /* Make sure we have no outstanding posts waiting */
+ atomic_set(&pdp->released, 1);
+ atomic_set(&pdp->requested_vsync_state, 0);
+ atomic_set(&pdp->vsync_triggered, 1);
+ wake_up_all(&pdp->vsync_wait_queue);
+ /* This special "null" flip works around a problem with ADF
+ * which leaves buffers pinned by the display engine even
+ * after all ADF clients have closed.
+ *
+ * The "null" flip is pipelined like any other. The user won't
+ * be able to unload this module until it has been posted.
+ */
+ release_fence = adf_device_post(dev, NULL, 0, NULL, 0, NULL, 0);
+ if (IS_ERR_OR_NULL(release_fence)) {
+ dev_err(dev->dev,
+ "Failed to queue null flip command (err=%d).\n",
+ (int)PTR_ERR(release_fence));
+ return;
+ }
+
+ sync_fence_put(release_fence);
+}
+
+static int pdp_validate(struct adf_device *dev, struct adf_post *cfg,
+ void **driver_state)
+{
+ struct adf_pdp_device *pdp = devres_find(dev->dev,
+ pdp_devres_release, NULL, NULL);
+ int err = adf_img_validate_simple(dev, cfg, driver_state);
+
+ if (err == 0 && cfg->mappings) {
+ /* We store a snapshot of num_validates in driver_state at the
+ * time validate was called, which will be passed to the post
+ * function. This snapshot is copied into (i.e. overwrites)
+ * num_posts, rather then simply incrementing num_posts, to
+ * handle cases e.g. during fence timeouts where validates
+ * are called without corresponding posts.
+ */
+ int *validates = kmalloc(sizeof(*validates), GFP_KERNEL);
+ *validates = atomic_inc_return(&pdp->num_validates);
+ *driver_state = validates;
+ } else {
+ *driver_state = NULL;
+ }
+ return err;
+}
+
+static void pdp_state_free(struct adf_device *dev, void *driver_state)
+{
+ kfree(driver_state);
+}
+
+static struct adf_device_ops adf_pdp_device_ops = {
+ .owner = THIS_MODULE,
+ .base = {
+ .open = adf_pdp_open,
+ .release = adf_pdp_release,
+ .ioctl = adf_img_ioctl,
+ },
+ .state_free = pdp_state_free,
+ .validate = pdp_validate,
+ .post = pdp_post,
+};
+
+static struct adf_interface_ops adf_pdp_interface_ops = {
+ .base = {
+ .supports_event = pdp_supports_event,
+ .set_event = pdp_set_event,
+ },
+ .modeset = pdp_modeset,
+ .blank = pdp_blank,
+ .alloc_simple_buffer = pdp_alloc_simple_buffer,
+ .describe_simple_post = pdp_describe_simple_post,
+};
+
+static struct adf_overlay_engine_ops adf_pdp_overlay_ops = {
+ .supported_formats = &pdp_supported_formats[0],
+ .n_supported_formats = NUM_SUPPORTED_FORMATS,
+};
+
+#ifdef SUPPORT_ADF_PDP_FBDEV
+static struct fb_ops adf_pdp_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = adf_fbdev_open,
+ .fb_release = adf_fbdev_release,
+ .fb_check_var = adf_fbdev_check_var,
+ .fb_set_par = adf_fbdev_set_par,
+ .fb_blank = adf_fbdev_blank,
+ .fb_pan_display = adf_fbdev_pan_display,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_mmap = adf_fbdev_mmap,
+};
+#endif
+
+static int adf_pdp_probe_device(struct platform_device *pdev)
+{
+ struct adf_pdp_device *pdp;
+ int err = 0;
+ int i, default_mode_id;
+ struct resource *registers;
+ struct pci_dev *pci_dev = to_pci_dev(pdev->dev.parent);
+ struct apollo_pdp_platform_data *pdata = pdev->dev.platform_data;
+
+ pdp = devres_alloc(pdp_devres_release, sizeof(*pdp),
+ GFP_KERNEL);
+ if (!pdp) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ devres_add(&pdev->dev, pdp);
+
+ pdp->pdata = pdata;
+ pdp->pdev = pdev;
+
+ err = pci_enable_device(pci_dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to enable PDP pci device (%d)\n", err);
+ goto err_out;
+ }
+
+ atomic_set(&pdp->refcount, 0);
+ atomic_set(&pdp->num_validates, 0);
+ pdp->num_posts = 0;
+
+ pdp->ion_client = ion_client_create(pdata->ion_device, "adf_pdp");
+ if (IS_ERR(pdp->ion_client)) {
+ err = PTR_ERR(pdp->ion_client);
+ dev_err(&pdev->dev,
+ "Failed to create PDP ION client (%d)\n", err);
+ goto err_disable_pci;
+ }
+
+ registers = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "pdp-regs");
+ pdp->regs = devm_ioremap_resource(&pdev->dev, registers);
+ if (IS_ERR(pdp->regs)) {
+ err = PTR_ERR(pdp->regs);
+ dev_err(&pdev->dev, "Failed to map PDP registers (%d)\n", err);
+ goto err_destroy_ion_client;
+ }
+ pdp->regs_size = resource_size(registers);
+
+ registers = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "pll-regs");
+ pdp->pll_regs = devm_ioremap_resource(&pdev->dev, registers);
+ if (IS_ERR(pdp->pll_regs)) {
+ err = PTR_ERR(pdp->pll_regs);
+ dev_err(&pdev->dev, "Failed to map PDP registers (%d)\n", err);
+ goto err_destroy_ion_client;
+ }
+ pdp->pll_regs_size = resource_size(registers);
+
+ err = adf_device_init(&pdp->adf_device, &pdp->pdev->dev,
+ &adf_pdp_device_ops, "pdp_device");
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init ADF device (%d)\n", err);
+ goto err_destroy_ion_client;
+ }
+
+ err = adf_interface_init(&pdp->adf_interface, &pdp->adf_device,
+ ADF_INTF_DVI, 0, ADF_INTF_FLAG_PRIMARY, &adf_pdp_interface_ops,
+ "pdp_interface");
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init ADF interface (%d)\n", err);
+ goto err_destroy_adf_device;
+ }
+
+ err = adf_overlay_engine_init(&pdp->adf_overlay, &pdp->adf_device,
+ &adf_pdp_overlay_ops, "pdp_overlay");
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init ADF overlay (%d)\n", err);
+ goto err_destroy_adf_interface;
+ }
+
+ err = adf_attachment_allow(&pdp->adf_device, &pdp->adf_overlay,
+ &pdp->adf_interface);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to attach overlay (%d)\n", err);
+ goto err_destroy_adf_overlay;
+ }
+
+ pdp->num_supported_modes = pdp_mode_count(pdp);
+ pdp->supported_modes = kzalloc(sizeof(*pdp->supported_modes)
+ * pdp->num_supported_modes, GFP_KERNEL);
+
+ if (!pdp->supported_modes) {
+ dev_err(&pdev->dev, "Failed to allocate supported modeinfo structs\n");
+ err = -ENOMEM;
+ goto err_destroy_adf_overlay;
+ }
+
+ for (i = 0; i < pdp->num_supported_modes; i++)
+ pdp_mode_to_drm_mode(pdp, i, &pdp->supported_modes[i]);
+
+ default_mode_id = pdp_mode_id(pdp, pdp_display_height,
+ pdp_display_width);
+ if (default_mode_id == -1) {
+ default_mode_id = 0;
+ dev_err(&pdev->dev, "No modeline found for requested display size (%dx%d)\n",
+ pdp_display_width, pdp_display_height);
+ }
+
+ /* Initial modeset... */
+ err = pdp_modeset(&pdp->adf_interface,
+ &pdp->supported_modes[default_mode_id]);
+ if (err) {
+ dev_err(&pdev->dev, "Initial modeset failed (%d)\n", err);
+ goto err_destroy_modelist;
+ }
+
+ err = adf_hotplug_notify_connected(&pdp->adf_interface,
+ pdp->supported_modes, pdp->num_supported_modes);
+ if (err) {
+ dev_err(&pdev->dev, "Initial hotplug notify failed (%d)\n",
+ err);
+ goto err_destroy_modelist;
+ }
+ err = apollo_set_interrupt_handler(pdp->pdev->dev.parent,
+ APOLLO_INTERRUPT_PDP,
+ pdp_irq_handler, pdp);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set interrupt handler (%d)\n",
+ err);
+ goto err_destroy_modelist;
+ }
+#ifdef SUPPORT_ADF_PDP_FBDEV
+ err = adf_fbdev_init(&pdp->adf_fbdev, &pdp->adf_interface,
+ &pdp->adf_overlay, pdp_display_width,
+ pdp_display_height, DRM_FORMAT_BGRA8888,
+ &adf_pdp_fb_ops, "adf_pdp_fb");
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init ADF fbdev (%d)\n", err);
+ goto err_destroy_modelist;
+ }
+#endif
+
+ init_waitqueue_head(&pdp->vsync_wait_queue);
+ atomic_set(&pdp->requested_vsync_state, 0);
+ atomic_set(&pdp->vsync_state, 0);
+
+ if (debugfs_dma_buf_init("pdp_raw"))
+ dev_err(&pdev->dev, "Failed to create debug fs file for raw access\n");
+
+ pdp_enable_interrupt(pdp);
+
+ return err;
+err_destroy_modelist:
+ kfree(pdp->supported_modes);
+err_destroy_adf_overlay:
+ adf_overlay_engine_destroy(&pdp->adf_overlay);
+err_destroy_adf_interface:
+ adf_interface_destroy(&pdp->adf_interface);
+err_destroy_adf_device:
+ adf_device_destroy(&pdp->adf_device);
+err_destroy_ion_client:
+ ion_client_destroy(pdp->ion_client);
+err_disable_pci:
+ pci_disable_device(pci_dev);
+err_out:
+ dev_err(&pdev->dev, "Failed to initialise PDP device\n");
+ return err;
+}
+
+static int adf_pdp_remove_device(struct platform_device *pdev)
+{
+ int err = 0;
+ struct pci_dev *pci_dev = to_pci_dev(pdev->dev.parent);
+ struct adf_pdp_device *pdp = devres_find(&pdev->dev, pdp_devres_release,
+ NULL, NULL);
+
+ debugfs_dma_buf_deinit();
+
+ pdp_disable_scanout(pdp);
+
+ pdp_disable_vsync(pdp);
+ pdp_disable_interrupt(pdp);
+ apollo_set_interrupt_handler(pdp->pdev->dev.parent,
+ APOLLO_INTERRUPT_PDP,
+ NULL, NULL);
+ /* Disable scanout */
+ pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, 0);
+ kfree(pdp->supported_modes);
+#ifdef SUPPORT_ADF_PDP_FBDEV
+ adf_fbdev_destroy(&pdp->adf_fbdev);
+#endif
+ adf_overlay_engine_destroy(&pdp->adf_overlay);
+ adf_interface_destroy(&pdp->adf_interface);
+ adf_device_destroy(&pdp->adf_device);
+ ion_client_destroy(pdp->ion_client);
+ pci_disable_device(pci_dev);
+ return err;
+}
+
+static void adf_pdp_shutdown_device(struct platform_device *pdev)
+{
+ /* No cleanup needed, all done in remove_device */
+}
+
+static struct platform_device_id pdp_platform_device_id_table[] = {
+ { .name = APOLLO_DEVICE_NAME_PDP, .driver_data = 0 },
+ { },
+};
+
+static struct platform_driver pdp_platform_driver = {
+ .probe = adf_pdp_probe_device,
+ .remove = adf_pdp_remove_device,
+ .shutdown = adf_pdp_shutdown_device,
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .id_table = pdp_platform_device_id_table,
+};
+
+static int __init adf_pdp_init(void)
+{
+ return platform_driver_register(&pdp_platform_driver);
+}
+
+static void __exit adf_pdp_exit(void)
+{
+ platform_driver_unregister(&pdp_platform_driver);
+}
+
+module_init(adf_pdp_init);
+module_exit(adf_pdp_exit);
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File adf_pdp.c
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*
+ * This is an example ADF display driver for the testchip's 5 PDP with fbdc
+ * support
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include <drm/drm_fourcc.h>
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+
+#include PVR_ANDROID_ION_HEADER
+
+/* for sync_fence_put */
+#include PVR_ANDROID_SYNC_HEADER
+
+#include "apollo_drv.h"
+#include "adf_common.h"
+#include "debugfs_dma_buf.h"
+
+#include "pvrmodule.h"
+
+#include "pdp_tc5_regs.h"
+#include "pdp_tc5_fbdc_regs.h"
+
+#define DRV_NAME APOLLO_DEVICE_NAME_PDP
+
+#ifndef ADF_PDP_WIDTH
+#define ADF_PDP_WIDTH 1280
+#endif
+
+#ifndef ADF_PDP_HEIGHT
+#define ADF_PDP_HEIGHT 720
+#endif
+
+#define DRM_FORMAT_BGRA8888_DIRECT_16x4 fourcc_code('I', 'M', 'G', '0')
+
+MODULE_DESCRIPTION("APOLLO TC5 PDP display driver");
+
+static int pdp_display_width = ADF_PDP_WIDTH;
+static int pdp_display_height = ADF_PDP_HEIGHT;
+module_param(pdp_display_width, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(pdp_display_width, "PDP display width");
+module_param(pdp_display_height, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(pdp_display_height, "PDP display height");
+
+static DEFINE_SPINLOCK(gFlipLock);
+
+struct pdp_timing_data {
+ u32 h_display;
+ u32 h_back_porch;
+ u32 h_total;
+ u32 h_active_start;
+ u32 h_left_border;
+ u32 h_right_border;
+ u32 h_front_porch;
+
+ u32 v_display;
+ u32 v_back_porch;
+ u32 v_total;
+ u32 v_active_start;
+ u32 v_top_border;
+ u32 v_bottom_border;
+ u32 v_front_porch;
+ u32 v_refresh;
+};
+
+static const struct pdp_timing_data pdp_supported_modes[] = {
+ {
+ .h_display = 1280,
+ .h_back_porch = 40,
+ .h_total = 1650,
+ .h_active_start = 260,
+ .h_left_border = 260,
+ .h_right_border = 1540,
+ .h_front_porch = 1540,
+
+ .v_display = 720,
+ .v_back_porch = 5,
+ .v_total = 750,
+ .v_active_start = 25,
+ .v_top_border = 25,
+ .v_bottom_border = 745,
+ .v_front_porch = 745,
+
+ .v_refresh = 60,
+ },
+ {}
+};
+
+struct adf_pdp_device {
+ struct ion_client *ion_client;
+
+ struct adf_device adf_device;
+ struct adf_interface adf_interface;
+ struct adf_overlay_engine adf_overlay;
+
+ struct platform_device *pdev;
+
+ struct apollo_pdp_platform_data *pdata;
+
+ void __iomem *regs;
+ resource_size_t regs_size;
+
+ void __iomem *fbdc_regs;
+ resource_size_t fbdc_regs_size;
+
+ void __iomem *i2c_regs;
+ resource_size_t i2c_regs_size;
+
+ struct drm_mode_modeinfo *supported_modes;
+ int num_supported_modes;
+
+ atomic_t refcount;
+
+ atomic_t num_validates;
+ int num_posts;
+
+ atomic_t vsync_triggered;
+ wait_queue_head_t vsync_wait_queue;
+ atomic_t requested_vsync_state;
+ atomic_t vsync_state;
+
+ const struct pdp_timing_data *current_timings;
+ u32 current_drm_format;
+
+ u32 baseaddr;
+};
+
+static const u32 pdp_supported_formats[] = {
+ DRM_FORMAT_BGRA8888_DIRECT_16x4,
+};
+#define NUM_SUPPORTED_FORMATS 1
+
+static const struct {
+ u32 drm_format;
+ u32 bytes_per_pixel;
+ u32 pixfmt_word;
+} pdp_format_table[] = {
+ /* 01000b / 8h 8-bit alpha + 24-bit rgb888 [RGBA] */
+ { DRM_FORMAT_BGRA8888_DIRECT_16x4, 4, 0x8 },
+ {},
+};
+
+static int pdp_mode_count(struct adf_pdp_device *pdp)
+{
+ int i = 0;
+
+ while (pdp_supported_modes[i].h_display)
+ i++;
+ return i;
+}
+
+static int pdp_mode_id(struct adf_pdp_device *pdp, u32 height, u32 width)
+{
+ int i;
+
+ for (i = 0; pdp_supported_modes[i].h_display; i++) {
+ const struct pdp_timing_data *tdata = &pdp_supported_modes[i];
+
+ if (tdata->h_display == width && tdata->v_display == height)
+ return i;
+ }
+ dev_err(&pdp->pdev->dev, "Failed to find matching mode for %dx%d\n",
+ width, height);
+ return -1;
+}
+
+static const struct pdp_timing_data *pdp_timing_data(
+ struct adf_pdp_device *pdp, int mode_id)
+{
+ if (mode_id >= pdp_mode_count(pdp) || mode_id < 0)
+ return NULL;
+ return &pdp_supported_modes[mode_id];
+}
+
+static void pdp_mode_to_drm_mode(struct adf_pdp_device *pdp, int mode_id,
+ struct drm_mode_modeinfo *drm_mode)
+{
+ const struct pdp_timing_data *pdp_mode;
+
+ pdp_mode = pdp_timing_data(pdp, mode_id);
+ BUG_ON(pdp_mode == NULL);
+
+ memset(drm_mode, 0, sizeof(*drm_mode));
+
+ drm_mode->hdisplay = pdp_mode->h_display;
+ drm_mode->vdisplay = pdp_mode->v_display;
+ drm_mode->vrefresh = pdp_mode->v_refresh;
+
+ adf_modeinfo_set_name(drm_mode);
+}
+
+static u32 pdp_read_reg(struct adf_pdp_device *pdp, resource_size_t reg_offset)
+{
+ BUG_ON(reg_offset > pdp->regs_size-4);
+ return ioread32(pdp->regs + reg_offset);
+}
+
+static void pdp_write_reg(struct adf_pdp_device *pdp,
+ resource_size_t reg_offset, u32 reg_value)
+{
+ BUG_ON(reg_offset > pdp->regs_size-4);
+ iowrite32(reg_value, pdp->regs + reg_offset);
+}
+
+static void pdp_write_fbdc_reg(struct adf_pdp_device *pdp,
+ resource_size_t reg_offset, u32 reg_value)
+{
+ BUG_ON(reg_offset > pdp->fbdc_regs_size-4);
+ iowrite32(reg_value, pdp->fbdc_regs + reg_offset);
+}
+
+#define I2C_TIMEOUT 10000
+
+static void pdp_write_i2c(struct adf_pdp_device *pdp, u32 reg_addr, u32 data)
+{
+ int i;
+
+ iowrite32(0x7a, pdp->i2c_regs + 0x04);
+ iowrite32(reg_addr, pdp->i2c_regs + 0x08);
+ iowrite32(data, pdp->i2c_regs + 0x0c);
+ iowrite32(0x1, pdp->i2c_regs + 0x14);
+
+ for (i = 0; i < I2C_TIMEOUT; i++) {
+ if (ioread32(pdp->i2c_regs + 0x18) == 0)
+ break;
+ }
+
+ if (i == I2C_TIMEOUT)
+ dev_err(&pdp->pdev->dev, "i2c write timeout\n");
+}
+
+static u32 pdp_read_i2c(struct adf_pdp_device *pdp, u32 reg_addr)
+{
+ int i;
+
+ iowrite32(0x7b, pdp->i2c_regs + 0x04);
+ iowrite32(reg_addr, pdp->i2c_regs + 0x08);
+ iowrite32(0x1, pdp->i2c_regs + 0x14);
+
+ for (i = 0; i < I2C_TIMEOUT; i++) {
+ if (ioread32(pdp->i2c_regs + 0x18) == 0)
+ break;
+ }
+
+ if (i == I2C_TIMEOUT) {
+ dev_err(&pdp->pdev->dev, "i2c read timeout\n");
+ return 0;
+ }
+ return ioread32(pdp->i2c_regs + 0x10);
+}
+
+static void pdp_devres_release(struct device *dev, void *res)
+{
+ /* No extra cleanup needed */
+}
+
+static u32 pdp_format_bpp(u32 drm_format)
+{
+ int i;
+
+ for (i = 0; pdp_format_table[i].drm_format != 0; i++) {
+ if (pdp_format_table[i].drm_format == drm_format)
+ return pdp_format_table[i].bytes_per_pixel;
+ }
+ WARN(1, "Unsupported drm format");
+ return 0;
+}
+
+static u32 pdp_format(u32 drm_format)
+{
+ int i;
+
+ for (i = 0; pdp_format_table[i].drm_format != 0; i++) {
+ if (pdp_format_table[i].drm_format == drm_format)
+ return pdp_format_table[i].pixfmt_word;
+ }
+ WARN(1, "Unsupported drm format");
+ return 0;
+}
+
+static void pdp_enable_scanout(struct adf_pdp_device *pdp, u32 base_addr)
+{
+ u32 reg_value;
+
+ /* Set the base address to the fbdc module */
+ pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_BASE_ADDRESS,
+ base_addr);
+ /* Turn on scanout */
+ reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1CTRL);
+ reg_value &= ~(PVR5__GRPH1STREN_MASK);
+ reg_value |= 0x1 << PVR5__GRPH1STREN_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1CTRL, reg_value);
+}
+
+static void pdp_disable_scanout(struct adf_pdp_device *pdp)
+{
+ u32 reg_value;
+
+ /* Turn off scanout */
+ reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1CTRL);
+ reg_value &= ~(PVR5__GRPH1STREN_MASK);
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1CTRL, reg_value);
+ /* Reset the base address in the fbdc module */
+ pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_BASE_ADDRESS,
+ 0);
+}
+
+static bool pdp_vsync_triggered(struct adf_pdp_device *pdp)
+{
+ return atomic_read(&pdp->vsync_triggered) == 1;
+}
+
+static void pdp_enable_ints(struct adf_pdp_device *pdp)
+{
+ int err = 0;
+ u32 reg_value;
+
+ reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_INTENAB);
+ reg_value &= ~(PVR5__INTEN_VBLNK0_MASK);
+ reg_value |= 0x1 << PVR5__INTEN_VBLNK0_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_INTENAB, reg_value);
+
+ err = apollo_enable_interrupt(pdp->pdev->dev.parent,
+ APOLLO_INTERRUPT_TC5_PDP);
+ if (err) {
+ dev_err(&pdp->pdev->dev,
+ "apollo_enable_interrupt failed (%d)\n", err);
+ }
+}
+
+static void pdp_disable_ints(struct adf_pdp_device *pdp)
+{
+ int err = 0;
+ u32 reg_value;
+
+ reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_INTENAB);
+ reg_value &= ~(PVR5__INTEN_VBLNK0_MASK);
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_INTENAB, reg_value);
+
+ err = apollo_disable_interrupt(pdp->pdev->dev.parent,
+ APOLLO_INTERRUPT_TC5_PDP);
+ if (err) {
+ dev_err(&pdp->pdev->dev,
+ "apollo_disable_interrupt failed (%d)\n", err);
+ }
+}
+
+static void pdp_post(struct adf_device *adf_dev, struct adf_post *cfg,
+ void *driver_state)
+{
+ int num_validates_snapshot = *(int *)driver_state;
+ unsigned long flags;
+
+ /* Set vsync wait timeout to 4x expected vsync */
+ struct adf_pdp_device *pdp = devres_find(adf_dev->dev,
+ pdp_devres_release, NULL, NULL);
+ long timeout =
+ msecs_to_jiffies((1000 / pdp->current_timings->v_refresh) * 4);
+
+ /* Null-flip handling, used to push buffers off screen during an error
+ * state to stop them blocking subsequent rendering */
+ if (cfg->n_bufs == 0) {
+ pdp_disable_scanout(pdp);
+ return;
+ }
+
+ /* We don't support changing the configuration on the fly */
+ if (pdp->current_timings->h_display != cfg->bufs[0].w ||
+ pdp->current_timings->v_display != cfg->bufs[0].h ||
+ pdp->current_drm_format != cfg->bufs[0].format) {
+ dev_err(&pdp->pdev->dev, "Unsupported configuration on post\n");
+ return;
+ }
+
+ WARN_ON(cfg->n_bufs != 1);
+ WARN_ON(cfg->mappings->sg_tables[0]->nents != 1);
+
+ spin_lock_irqsave(&gFlipLock, flags);
+
+ debugfs_dma_buf_set(cfg->bufs[0].dma_bufs[0]);
+
+ /* Set surface address and enable the scanouts */
+ pdp_enable_scanout(pdp, sg_phys(cfg->mappings->sg_tables[0]->sgl) -
+ pdp->pdata->memory_base);
+
+ atomic_set(&pdp->vsync_triggered, 0);
+
+ spin_unlock_irqrestore(&gFlipLock, flags);
+
+ /* Wait until the buffer is on-screen, so we know the previous buffer
+ * has been retired and off-screen.
+ *
+ * If vsync was already off when this post was serviced, we need to
+ * enable the vsync again briefly so the register updates we shadowed
+ * above get applied and we don't signal the fence prematurely. One
+ * vsync afterwards, we'll disable the vsync again.
+ */
+ if (!atomic_xchg(&pdp->vsync_state, 1))
+ pdp_enable_ints(pdp);
+
+ if (wait_event_timeout(pdp->vsync_wait_queue,
+ pdp_vsync_triggered(pdp), timeout) == 0) {
+ dev_err(&pdp->pdev->dev, "Post VSync wait timeout");
+ /* Undefined behaviour if this times out */
+ }
+
+ pdp->num_posts = num_validates_snapshot;
+}
+
+static bool pdp_supports_event(struct adf_obj *obj, enum adf_event_type type)
+{
+ switch (obj->type) {
+ case ADF_OBJ_INTERFACE:
+ {
+ switch (type) {
+ case ADF_EVENT_VSYNC:
+ return true;
+ default:
+ return false;
+ }
+ }
+ default:
+ return false;
+ }
+}
+
+static void pdp_irq_handler(void *data)
+{
+ struct adf_pdp_device *pdp = data;
+ unsigned long flags;
+ u32 int_status;
+
+ int_status = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_INTSTAT);
+
+ spin_lock_irqsave(&gFlipLock, flags);
+
+ /* If we're idle, and a vsync disable was requested, do it now.
+ * This code assumes that the HWC will always re-enable vsync
+ * explicitly before posting new configurations.
+ */
+ if (atomic_read(&pdp->num_validates) == pdp->num_posts) {
+ if (!atomic_read(&pdp->requested_vsync_state)) {
+ pdp_disable_ints(pdp);
+ atomic_set(&pdp->vsync_state, 0);
+ }
+ }
+
+ if ((int_status & PVR5__INTS_VBLNK0_MASK)) {
+ /* Notify the framework of the just occurred vblank */
+ adf_vsync_notify(&pdp->adf_interface, ktime_get());
+ atomic_set(&pdp->vsync_triggered, 1);
+ wake_up(&pdp->vsync_wait_queue);
+ }
+
+ spin_unlock_irqrestore(&gFlipLock, flags);
+}
+
+static void pdp_set_event(struct adf_obj *obj, enum adf_event_type type,
+ bool enabled)
+{
+ struct adf_pdp_device *pdp;
+ bool old;
+
+ switch (type) {
+ case ADF_EVENT_VSYNC:
+ {
+ pdp = devres_find(obj->parent->dev, pdp_devres_release,
+ NULL, NULL);
+ atomic_set(&pdp->requested_vsync_state, enabled);
+ if (enabled) {
+ old = atomic_xchg(&pdp->vsync_state, enabled);
+ if (!old)
+ pdp_enable_ints(pdp);
+ }
+ break;
+ }
+ default:
+ BUG();
+ }
+}
+
+static int pdp_unblank_hdmi(struct adf_pdp_device *pdp)
+{
+ int err = 0, i;
+ u32 reg_value;
+
+ /* Powering up the ADV7511 sometimes doesn't come up immediately, so
+ * give multiple power ons.
+ */
+ for (i = 0; i < 6; i++) {
+ pdp_write_i2c(pdp, 0x41, 0x10);
+ msleep(500);
+ }
+ msleep(1000);
+ reg_value = pdp_read_i2c(pdp, 0x41);
+ if (reg_value == 0x10) {
+ dev_err(&pdp->pdev->dev, "i2c: ADV7511 powered up\n");
+ } else {
+ dev_err(&pdp->pdev->dev, "i2c: Failed to power up ADV7511\n");
+ err = -EFAULT;
+ }
+
+ return err;
+}
+
+static void pdp_blank_hdmi(struct adf_pdp_device *pdp)
+{
+ pdp_write_i2c(pdp, 0x41, 0x50);
+}
+
+static void pdp_enable_hdmi(struct adf_pdp_device *pdp)
+{
+ u32 reg_value = 0;
+ int i;
+
+ /* Set scl clock.
+ Assuming i2c_master clock is at 50 MHz */
+ iowrite32(0x18, pdp->i2c_regs);
+
+ reg_value = pdp_read_i2c(pdp, 0xf5);
+ if (reg_value != 0x75) {
+ dev_err(&pdp->pdev->dev, "i2c: 1st register read failed: %x\n",
+ reg_value);
+ goto err_out;
+ }
+
+ reg_value = pdp_read_i2c(pdp, 0xf6);
+ if (reg_value != 0x11) {
+ dev_err(&pdp->pdev->dev, "i2c: 2nd register read failed: %x\n",
+ reg_value);
+ goto err_out;
+ }
+
+ /* Check the HPD and Monitor Sense */
+ for (i = 0; i < 50; i++) {
+ reg_value = pdp_read_i2c(pdp, 0x42);
+ if (reg_value == 0x70) {
+ dev_err(&pdp->pdev->dev, "i2c: Hot Plug and Monitor Sense detected ...\n");
+ break;
+ } else if (reg_value == 0x50) {
+ dev_err(&pdp->pdev->dev, "i2c: Only Hot Plug detected ...\n");
+ } else if (reg_value == 0x03) {
+ dev_err(&pdp->pdev->dev, "i2c: Only Monitor Sense detected ...\n");
+ }
+ }
+
+ if (pdp_unblank_hdmi(pdp))
+ goto err_out;
+
+ /* Writing the fixed registers */
+ pdp_write_i2c(pdp, 0x98, 0x03);
+ pdp_write_i2c(pdp, 0x9a, 0xe0);
+ pdp_write_i2c(pdp, 0x9c, 0x30);
+ pdp_write_i2c(pdp, 0x9d, 0x61);
+ pdp_write_i2c(pdp, 0xa2, 0xa4);
+ pdp_write_i2c(pdp, 0xa3, 0xa4);
+ pdp_write_i2c(pdp, 0xe0, 0xd0);
+ pdp_write_i2c(pdp, 0xf9, 0x00);
+
+ /* Starting video input */
+ /* Disable I2S */
+ pdp_write_i2c(pdp, 0x0c, 0x80);
+
+ /* Select input video format */
+ pdp_write_i2c(pdp, 0x15, 0x10);
+
+ /* Select Colour Depth and output format */
+ pdp_write_i2c(pdp, 0x16, 0x30);
+
+ /* Select Aspect Ratio */
+ pdp_write_i2c(pdp, 0x17, 0x02);
+
+ /* Other settings */
+ pdp_write_i2c(pdp, 0x48, 0x00);
+ pdp_write_i2c(pdp, 0x55, 0x12);
+
+ /* Select Picture Aspect Ratio */
+ pdp_write_i2c(pdp, 0x56, 0x28);
+
+ /* GC enable */
+ pdp_write_i2c(pdp, 0x40, 0x80);
+
+ /* 24 bits/pixel */
+ pdp_write_i2c(pdp, 0x4c, 0x04);
+
+ /* Select HDMI Mode */
+ pdp_write_i2c(pdp, 0xaf, 0x16);
+
+ /* Set VIC to Receiver */
+ pdp_write_i2c(pdp, 0x3d, 0x04);
+
+ for (i = 0; i < 50; i++) {
+ reg_value = pdp_read_i2c(pdp, 0x3e);
+ if (reg_value == 0x10) {
+ dev_err(&pdp->pdev->dev, "i2c: VIC detected as 720P, 60 Hz, 16:9...\n");
+ break;
+ }
+ }
+
+ if (i == 50)
+ dev_err(&pdp->pdev->dev, "i2c: Desired VIC not detected\n");
+
+ /* Write to PD register again */
+ pdp_write_i2c(pdp, 0x41, 0x10);
+
+err_out:
+ return;
+}
+
+static int pdp_modeset(struct adf_interface *intf,
+ struct drm_mode_modeinfo *mode)
+{
+ const struct pdp_timing_data *tdata;
+ struct adf_pdp_device *pdp;
+ int mode_id, err = 0;
+ u32 reg_value = 0;
+
+ pdp = devres_find(intf->base.parent->dev, pdp_devres_release,
+ NULL, NULL);
+ mode_id = pdp_mode_id(pdp, mode->vdisplay, mode->hdisplay);
+ tdata = pdp_timing_data(pdp, mode_id);
+
+ if (!tdata) {
+ dev_err(&pdp->pdev->dev, "Failed to find mode for %ux%u\n",
+ mode->hdisplay, mode->vdisplay);
+ err = -ENXIO;
+ goto err_out;
+ }
+
+ /* Make sure all the following register writes are applied instantly */
+ reg_value = 0x1 << PVR5__BYPASS_DOUBLE_BUFFERING_SHIFT;
+ reg_value |= 0x1 << PVR5__REGISTERS_VALID_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_REGISTER_UPDATE_CTRL, reg_value);
+
+ /* Power down mode */
+ reg_value = 0x1 << PVR5__POWERDN_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL, reg_value);
+
+ /* Background color (green) */
+ reg_value = 0x0099FF66;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_BGNDCOL, reg_value);
+
+ /* Set alpha blend mode to global alpha blending (10b / 2h) and
+ * disable everything else.
+ */
+ reg_value = 0x2 << PVR5__GRPH1BLEND_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1CTRL, reg_value);
+
+ /* Global alpha */
+ reg_value = 0xff << PVR5__GRPH1GALPHA_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1BLND, reg_value);
+
+ /* Reset base addr of the non-FBCDC part. This is not used. */
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1_BASEADDR, 0);
+
+ /* Graphics video pixel format:
+ * 01000b / 8h 8-bit alpha + 24-bit rgb888 [RGBA].
+ */
+ pdp->current_drm_format = DRM_FORMAT_BGRA8888_DIRECT_16x4;
+ reg_value = pdp_format(pdp->current_drm_format)
+ << PVR5__GRPH1PIXFMT_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1SURF, reg_value);
+
+ /* Reset position of the plane */
+ reg_value = 0 << PVR5__GRPH1XSTART_SHIFT;
+ reg_value |= 0 << PVR5__GRPH1YSTART_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1POSN, reg_value);
+
+ /* Stride of surface in 16byte words - 1 */
+ reg_value = (tdata->h_display * 4 / 16 - 1) << PVR5__GRPH1STRIDE_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1STRIDE, reg_value);
+
+ /* Size:
+ * Width of surface in pixels - 1
+ * Height of surface in lines - 1 */
+ reg_value = (tdata->h_display - 1) << PVR5__GRPH1WIDTH_SHIFT;
+ reg_value |= (tdata->v_display - 1) << PVR5__GRPH1HEIGHT_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1SIZE, reg_value);
+
+ /* H-time */
+ reg_value = tdata->h_back_porch << PVR5__HBPS_SHIFT;
+ reg_value |= tdata->h_total << PVR5__HT_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_HSYNC1, reg_value);
+ reg_value = tdata->h_active_start << PVR5__HAS_SHIFT;
+ reg_value |= tdata->h_left_border << PVR5__HLBS_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_HSYNC2, reg_value);
+ reg_value = tdata->h_front_porch << PVR5__HFPS_SHIFT;
+ reg_value |= tdata->h_right_border << PVR5__HRBS_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_HSYNC3, reg_value);
+
+ /* V-time */
+ reg_value = tdata->v_back_porch << PVR5__VBPS_SHIFT;
+ reg_value |= tdata->v_total << PVR5__VT_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_VSYNC1, reg_value);
+ reg_value = tdata->v_active_start << PVR5__VAS_SHIFT;
+ reg_value |= tdata->v_top_border << PVR5__VTBS_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_VSYNC2, reg_value);
+ reg_value = tdata->v_front_porch << PVR5__VFPS_SHIFT;
+ reg_value |= tdata->v_bottom_border << PVR5__VBBS_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_VSYNC3, reg_value);
+
+ /* Horizontal data enable */
+ reg_value = tdata->h_left_border << PVR5__HDES_SHIFT;
+ reg_value |= tdata->h_front_porch << PVR5__HDEF_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_HDECTRL, reg_value);
+
+ /* Vertical data enable */
+ reg_value = tdata->v_top_border << PVR5__VDES_SHIFT;
+ reg_value |= tdata->v_front_porch << PVR5__VDEF_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_VDECTRL, reg_value);
+
+ /* Vertical event start and vertical fetch start */
+ reg_value = tdata->v_back_porch << PVR5__VFETCH_SHIFT;
+ reg_value |= tdata->v_bottom_border << PVR5__VEVENT_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_VEVENT, reg_value);
+
+ /* Now enable the fbdc module (direct_16x4) */
+ /* Set the number of tiles per plane */
+ pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_NUM_TILES,
+ (tdata->h_display * tdata->v_display) / (16 * 4));
+ /* Set the number of the tile per line */
+ pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_PER_LINE,
+ tdata->h_display / 16);
+ /* Set the color format */
+ pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_PIXEL_FORMAT, 0xc);
+ /* Reset base address */
+ pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_BASE_ADDRESS, 0x0);
+ /* Set invalidate request */
+ reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL);
+ if ((reg_value & PVR5__VSPOL_MASK) >> PVR5__VSPOL_SHIFT == 0x1) {
+ pdp_write_fbdc_reg(pdp,
+ PVR5__PDP_FBDC_INTRFC_INVALIDATE_REQUEST, 0x1);
+ } else {
+ pdp_write_fbdc_reg(pdp,
+ PVR5__PDP_FBDC_INTRFC_INVALIDATE_REQUEST, 0x0);
+ }
+
+ /* Enable vsync again */
+ reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL);
+ reg_value &= ~(PVR5__SYNCACTIVE_MASK);
+ reg_value |= 0x1 << PVR5__SYNCACTIVE_SHIFT;
+ reg_value &= ~(PVR5__BLNKPOL_MASK);
+ reg_value |= 0x1 << PVR5__BLNKPOL_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL, reg_value);
+
+ /* Update control */
+ reg_value = 0x1 << PVR5__USE_VBLANK_SHIFT;
+ reg_value |= 0x1 << PVR5__REGISTERS_VALID_SHIFT;
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_REGISTER_UPDATE_CTRL, reg_value);
+
+ intf->current_mode = *mode;
+ pdp->current_timings = tdata;
+
+ pdp_enable_hdmi(pdp);
+
+err_out:
+ return err;
+}
+
+static int pdp_blank(struct adf_interface *intf,
+ u8 state)
+{
+ struct adf_pdp_device *pdp;
+ u32 reg_value;
+
+ pdp = devres_find(intf->base.parent->dev, pdp_devres_release,
+ NULL, NULL);
+
+ if (state != DRM_MODE_DPMS_OFF && state != DRM_MODE_DPMS_ON)
+ return -EINVAL;
+
+ reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL);
+ switch (state) {
+ case DRM_MODE_DPMS_OFF:
+ reg_value &= ~(PVR5__POWERDN_MASK);
+ reg_value |= 0x1 << PVR5__POWERDN_SHIFT;
+/* pdp_blank_hdmi(pdp);*/
+ break;
+ case DRM_MODE_DPMS_ON:
+ reg_value &= ~(PVR5__POWERDN_MASK);
+/* pdp_unblank_hdmi(pdp);*/
+ break;
+ }
+ pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL, reg_value);
+
+ return 0;
+}
+
+static int pdp_alloc_simple_buffer(struct adf_interface *intf, u16 w, u16 h,
+ u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
+{
+ u32 size = w * h * pdp_format_bpp(format);
+ struct adf_pdp_device *pdp;
+ struct ion_handle *hdl;
+ int err = 0;
+
+ pdp = devres_find(intf->base.parent->dev, pdp_devres_release,
+ NULL, NULL);
+ hdl = ion_alloc(pdp->ion_client, size, 0,
+ (1 << pdp->pdata->ion_heap_id), 0);
+ if (IS_ERR(hdl)) {
+ err = PTR_ERR(hdl);
+ dev_err(&pdp->pdev->dev, "ion_alloc failed (%d)\n", err);
+ goto err_out;
+ }
+ *dma_buf = ion_share_dma_buf(pdp->ion_client, hdl);
+ if (IS_ERR(*dma_buf)) {
+ err = PTR_ERR(hdl);
+ dev_err(&pdp->pdev->dev,
+ "ion_share_dma_buf failed (%d)\n", err);
+ goto err_free_buffer;
+ }
+ *pitch = w * pdp_format_bpp(format);
+ *offset = 0;
+err_free_buffer:
+ ion_free(pdp->ion_client, hdl);
+err_out:
+ return err;
+}
+
+static int pdp_describe_simple_post(struct adf_interface *intf,
+ struct adf_buffer *fb, void *data, size_t *size)
+{
+ struct adf_post_ext *post_ext = data;
+ static int post_id;
+
+ struct drm_clip_rect full_screen = {
+ .x2 = ADF_PDP_WIDTH,
+ .y2 = ADF_PDP_HEIGHT,
+ };
+
+ /* NOTE: an upstream ADF bug means we can't test *size instead */
+ BUG_ON(ADF_MAX_CUSTOM_DATA_SIZE < sizeof(struct adf_post_ext) +
+ 1 * sizeof(struct adf_buffer_config_ext));
+
+ *size = sizeof(struct adf_post_ext) +
+ 1 * sizeof(struct adf_buffer_config_ext);
+
+ post_ext->post_id = ++post_id;
+
+ post_ext->bufs_ext[0].crop = full_screen;
+ post_ext->bufs_ext[0].display = full_screen;
+ post_ext->bufs_ext[0].transform = ADF_BUFFER_TRANSFORM_NONE_EXT;
+ post_ext->bufs_ext[0].blend_type = ADF_BUFFER_BLENDING_PREMULT_EXT;
+ post_ext->bufs_ext[0].plane_alpha = 0xff;
+
+ return 0;
+}
+
+static int
+adf_pdp_open(struct adf_obj *obj, struct inode *inode, struct file *file)
+{
+ struct adf_device *dev = (struct adf_device *)obj->parent;
+ struct adf_pdp_device *pdp;
+
+ pdp = devres_find(dev->dev, pdp_devres_release, NULL, NULL);
+
+ atomic_inc(&pdp->refcount);
+ return 0;
+}
+
+static void
+adf_pdp_release(struct adf_obj *obj, struct inode *inode, struct file *file)
+{
+ struct adf_device *dev = (struct adf_device *)obj->parent;
+ struct sync_fence *release_fence;
+ struct adf_pdp_device *pdp;
+
+ pdp = devres_find(dev->dev, pdp_devres_release, NULL, NULL);
+
+ if (atomic_dec_return(&pdp->refcount))
+ return;
+
+ /* Make sure we have no outstanding posts waiting */
+ atomic_set(&pdp->vsync_triggered, 1);
+ wake_up_all(&pdp->vsync_wait_queue);
+ /* This special "null" flip works around a problem with ADF
+ * which leaves buffers pinned by the display engine even
+ * after all ADF clients have closed.
+ *
+ * The "null" flip is pipelined like any other. The user won't
+ * be able to unload this module until it has been posted.
+ */
+ release_fence = adf_device_post(dev, NULL, 0, NULL, 0, NULL, 0);
+ if (IS_ERR_OR_NULL(release_fence)) {
+ dev_err(dev->dev,
+ "Failed to queue null flip command (err=%d).\n",
+ (int)PTR_ERR(release_fence));
+ return;
+ }
+
+ sync_fence_put(release_fence);
+}
+
+static int adf_img_validate_custom_format(struct adf_device *dev,
+ struct adf_buffer *buf)
+{
+ int i;
+
+ for (i = 0; pdp_format_table[i].drm_format != 0; i++) {
+ if (pdp_format_table[i].drm_format == buf->format)
+ return 1;
+ }
+ return 0;
+}
+
+static int pdp_validate(struct adf_device *dev, struct adf_post *cfg,
+ void **driver_state)
+{
+ struct adf_pdp_device *pdp;
+ int err;
+
+ pdp = devres_find(dev->dev, pdp_devres_release, NULL, NULL);
+
+ err = adf_img_validate_simple(dev, cfg, driver_state);
+ if (err == 0 && cfg->mappings) {
+ /* We store a snapshot of num_validates in driver_state at the
+ * time validate was called, which will be passed to the post
+ * function. This snapshot is copied into (i.e. overwrites)
+ * num_posts, rather then simply incrementing num_posts, to
+ * handle cases e.g. during fence timeouts where validates
+ * are called without corresponding posts.
+ */
+ int *validates = kmalloc(sizeof(*validates), GFP_KERNEL);
+ *validates = atomic_inc_return(&pdp->num_validates);
+ *driver_state = validates;
+ } else {
+ *driver_state = NULL;
+ }
+ return err;
+}
+
+static void pdp_state_free(struct adf_device *dev, void *driver_state)
+{
+ kfree(driver_state);
+}
+
+static struct adf_device_ops adf_pdp_device_ops = {
+ .owner = THIS_MODULE,
+ .base = {
+ .open = adf_pdp_open,
+ .release = adf_pdp_release,
+ .ioctl = adf_img_ioctl,
+ },
+ .validate_custom_format = adf_img_validate_custom_format,
+ .validate = pdp_validate,
+ .post = pdp_post,
+ .state_free = pdp_state_free,
+};
+
+static struct adf_interface_ops adf_pdp_interface_ops = {
+ .base = {
+ .supports_event = pdp_supports_event,
+ .set_event = pdp_set_event,
+ },
+ .modeset = pdp_modeset,
+ .blank = pdp_blank,
+ .alloc_simple_buffer = pdp_alloc_simple_buffer,
+ .describe_simple_post = pdp_describe_simple_post,
+};
+
+static struct adf_overlay_engine_ops adf_pdp_overlay_ops = {
+ .supported_formats = &pdp_supported_formats[0],
+ .n_supported_formats = NUM_SUPPORTED_FORMATS,
+};
+
+static int adf_pdp_probe_device(struct platform_device *pdev)
+{
+ struct apollo_pdp_platform_data *pdata = pdev->dev.platform_data;
+ struct pci_dev *pci_dev = to_pci_dev(pdev->dev.parent);
+ int err = 0, i, default_mode_id;
+ struct adf_pdp_device *pdp;
+ struct resource *registers;
+ u32 core_id, core_rev;
+
+ pdp = devres_alloc(pdp_devres_release, sizeof(*pdp), GFP_KERNEL);
+ if (!pdp) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ devres_add(&pdev->dev, pdp);
+
+ pdp->pdata = pdata;
+ pdp->pdev = pdev;
+
+ err = pci_enable_device(pci_dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to enable PDP pci device (%d)\n", err);
+ goto err_out;
+ }
+
+ atomic_set(&pdp->refcount, 0);
+ atomic_set(&pdp->num_validates, 0);
+ pdp->num_posts = 0;
+
+ pdp->ion_client = ion_client_create(pdata->ion_device, "adf_pdp");
+ if (IS_ERR(pdp->ion_client)) {
+ err = PTR_ERR(pdp->ion_client);
+ dev_err(&pdev->dev,
+ "Failed to create PDP ION client (%d)\n", err);
+ goto err_disable_pci;
+ }
+
+ registers = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "tc5-pdp2-regs");
+ pdp->regs = devm_ioremap_resource(&pdev->dev, registers);
+ if (IS_ERR(pdp->regs)) {
+ err = PTR_ERR(pdp->regs);
+ dev_err(&pdev->dev, "Failed to map PDP registers (%d)\n", err);
+ goto err_destroy_ion_client;
+ }
+ pdp->regs_size = resource_size(registers);
+
+ registers = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "tc5-pdp2-fbdc-regs");
+ pdp->fbdc_regs = devm_ioremap_resource(&pdev->dev, registers);
+ if (IS_ERR(pdp->fbdc_regs)) {
+ err = PTR_ERR(pdp->fbdc_regs);
+ dev_err(&pdev->dev, "Failed to map PDP fbdc registers (%d)\n",
+ err);
+ goto err_destroy_ion_client;
+ }
+ pdp->fbdc_regs_size = resource_size(registers);
+
+ registers = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "tc5-adv5711-regs");
+ pdp->i2c_regs = devm_ioremap_resource(&pdev->dev, registers);
+ if (IS_ERR(pdp->i2c_regs)) {
+ err = PTR_ERR(pdp->i2c_regs);
+ dev_err(&pdev->dev, "Failed to map ADV5711 i2c registers (%d)\n",
+ err);
+ goto err_destroy_ion_client;
+ }
+ pdp->i2c_regs_size = resource_size(registers);
+
+ core_id = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_CORE_ID);
+ core_rev = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_CORE_REV);
+
+ dev_err(&pdev->dev, "pdp2 core id/rev: %d.%d.%d/%d.%d.%d\n",
+ (core_id & PVR5__GROUP_ID_MASK) >> PVR5__GROUP_ID_SHIFT,
+ (core_id & PVR5__CORE_ID_MASK) >> PVR5__CORE_ID_SHIFT,
+ (core_id & PVR5__CONFIG_ID_MASK) >> PVR5__CONFIG_ID_SHIFT,
+ (core_rev & PVR5__MAJOR_REV_MASK) >> PVR5__MAJOR_REV_SHIFT,
+ (core_rev & PVR5__MINOR_REV_MASK) >> PVR5__MINOR_REV_SHIFT,
+ (core_rev & PVR5__MAINT_REV_MASK) >> PVR5__MAINT_REV_SHIFT);
+
+
+ err = adf_device_init(&pdp->adf_device, &pdp->pdev->dev,
+ &adf_pdp_device_ops, "pdp_device");
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init ADF device (%d)\n", err);
+ goto err_destroy_ion_client;
+ }
+
+ err = adf_interface_init(&pdp->adf_interface, &pdp->adf_device,
+ ADF_INTF_DVI, 0, ADF_INTF_FLAG_PRIMARY, &adf_pdp_interface_ops,
+ "pdp_interface");
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init ADF interface (%d)\n", err);
+ goto err_destroy_adf_device;
+ }
+
+ err = adf_overlay_engine_init(&pdp->adf_overlay, &pdp->adf_device,
+ &adf_pdp_overlay_ops, "pdp_overlay");
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init ADF overlay (%d)\n", err);
+ goto err_destroy_adf_interface;
+ }
+
+ err = adf_attachment_allow(&pdp->adf_device, &pdp->adf_overlay,
+ &pdp->adf_interface);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to attach overlay (%d)\n", err);
+ goto err_destroy_adf_overlay;
+ }
+
+ pdp->num_supported_modes = pdp_mode_count(pdp);
+ pdp->supported_modes = kzalloc(sizeof(*pdp->supported_modes)
+ * pdp->num_supported_modes, GFP_KERNEL);
+
+ if (!pdp->supported_modes) {
+ dev_err(&pdev->dev, "Failed to allocate supported modeinfo structs\n");
+ err = -ENOMEM;
+ goto err_destroy_adf_overlay;
+ }
+
+ for (i = 0; i < pdp->num_supported_modes; i++)
+ pdp_mode_to_drm_mode(pdp, i, &pdp->supported_modes[i]);
+
+ default_mode_id = pdp_mode_id(pdp, pdp_display_height,
+ pdp_display_width);
+ if (default_mode_id == -1) {
+ default_mode_id = 0;
+ dev_err(&pdev->dev, "No modeline found for requested display size (%dx%d)\n",
+ pdp_display_width, pdp_display_height);
+ }
+
+ /* Initial modeset... */
+ err = pdp_modeset(&pdp->adf_interface,
+ &pdp->supported_modes[default_mode_id]);
+ if (err) {
+ dev_err(&pdev->dev, "Initial modeset failed (%d)\n", err);
+ goto err_destroy_modelist;
+ }
+
+ err = adf_hotplug_notify_connected(&pdp->adf_interface,
+ pdp->supported_modes, pdp->num_supported_modes);
+ if (err) {
+ dev_err(&pdev->dev, "Initial hotplug notify failed (%d)\n",
+ err);
+ goto err_destroy_modelist;
+ }
+ err = apollo_set_interrupt_handler(pdp->pdev->dev.parent,
+ APOLLO_INTERRUPT_TC5_PDP,
+ pdp_irq_handler, pdp);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set interrupt handler (%d)\n",
+ err);
+ goto err_destroy_modelist;
+ }
+
+ init_waitqueue_head(&pdp->vsync_wait_queue);
+ atomic_set(&pdp->requested_vsync_state, 0);
+ atomic_set(&pdp->vsync_state, 0);
+
+ if (debugfs_dma_buf_init("pdp_raw"))
+ dev_err(&pdev->dev, "Failed to create debug fs file for raw access\n");
+
+ return err;
+err_destroy_modelist:
+ kfree(pdp->supported_modes);
+err_destroy_adf_overlay:
+ adf_overlay_engine_destroy(&pdp->adf_overlay);
+err_destroy_adf_interface:
+ adf_interface_destroy(&pdp->adf_interface);
+err_destroy_adf_device:
+ adf_device_destroy(&pdp->adf_device);
+err_destroy_ion_client:
+ ion_client_destroy(pdp->ion_client);
+err_disable_pci:
+ pci_disable_device(pci_dev);
+err_out:
+ dev_err(&pdev->dev, "Failed to initialise PDP device\n");
+ return err;
+}
+
+static int adf_pdp_remove_device(struct platform_device *pdev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(pdev->dev.parent);
+ struct adf_pdp_device *pdp;
+ int err = 0;
+
+ pdp = devres_find(&pdev->dev, pdp_devres_release, NULL, NULL);
+
+ debugfs_dma_buf_deinit();
+
+ /* Disable scanout */
+ pdp_disable_scanout(pdp);
+ pdp_disable_ints(pdp);
+ apollo_set_interrupt_handler(pdp->pdev->dev.parent,
+ APOLLO_INTERRUPT_TC5_PDP,
+ NULL, NULL);
+ /* Disable hdmi */
+ pdp_blank_hdmi(pdp);
+ kfree(pdp->supported_modes);
+ adf_overlay_engine_destroy(&pdp->adf_overlay);
+ adf_interface_destroy(&pdp->adf_interface);
+ adf_device_destroy(&pdp->adf_device);
+ ion_client_destroy(pdp->ion_client);
+ pci_disable_device(pci_dev);
+ return err;
+}
+
+static void adf_pdp_shutdown_device(struct platform_device *pdev)
+{
+ /* No cleanup needed, all done in remove_device */
+}
+
+static struct platform_device_id pdp_platform_device_id_table[] = {
+ { .name = APOLLO_DEVICE_NAME_PDP, .driver_data = 0 },
+ { },
+};
+
+static struct platform_driver pdp_platform_driver = {
+ .probe = adf_pdp_probe_device,
+ .remove = adf_pdp_remove_device,
+ .shutdown = adf_pdp_shutdown_device,
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .id_table = pdp_platform_device_id_table,
+};
+
+static int __init adf_pdp_init(void)
+{
+ return platform_driver_register(&pdp_platform_driver);
+}
+
+static void __exit adf_pdp_exit(void)
+{
+ platform_driver_unregister(&pdp_platform_driver);
+}
+
+module_init(adf_pdp_init);
+module_exit(adf_pdp_exit);
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File apollo.c
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*
+ * This is a device driver for the apollo testchip framework. It creates
+ * platform devices for the pdp and ext sub-devices, and exports functions
+ * to manage the shared interrupt handling
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/thermal.h>
+
+#if defined(APOLLO_FAKE_INTERRUPTS)
+#define FAKE_INTERRUPT_TIME_MS 16
+#include <linux/timer.h>
+#include <linux/time.h>
+#endif
+
+#if defined(CONFIG_MTRR)
+#include <asm/mtrr.h>
+#endif
+
+#include "apollo_drv.h"
+
+#include "apollo_regs.h"
+#include "tcf_clk_ctrl.h"
+#include "tcf_pll.h"
+
+/* Odin (3rd gen TCF FPGA) */
+#include "odin_defs.h"
+#include "odin_regs.h"
+#include "bonnie_tcf.h"
+
+#include "pvrmodule.h"
+
+#if defined(SUPPORT_ION)
+#if defined(SUPPORT_RGX)
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+#define APOLLO_ION_HEAP_COUNT 4
+#else
+#define APOLLO_ION_HEAP_COUNT 3
+#endif
+#else
+#define APOLLO_ION_HEAP_COUNT 2
+#endif
+#include "ion_lma_heap.h"
+#endif
+
+#if defined(SUPPORT_APOLLO_FPGA) || defined(SUPPORT_RGX)
+#include <linux/debugfs.h>
+#endif
+
+#define DRV_NAME "apollo"
+
+/* Convert a byte offset to a 32 bit dword offset */
+#define DWORD_OFFSET(byte_offset) ((byte_offset)>>2)
+
+/* How much memory to give to the PDP heap (used for pdp buffers). */
+#define APOLLO_PDP_MEM_SIZE ((TC_DISPLAY_MEM_SIZE)*1024*1024)
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+/* How much memory to give to the secure heap. */
+#define APOLLO_SECURE_MEM_SIZE ((TC_SECURE_MEM_SIZE)*1024*1024)
+#endif
+
+/* This is a guess of what's a minimum sensible size for the ext heap
+ * It is only used for a warning if the ext heap is smaller, and does
+ * not affect the functional logic in any way
+ */
+#define APOLLO_EXT_MINIMUM_MEM_SIZE (10*1024*1024)
+
+#define PCI_VENDOR_ID_POWERVR 0x1010
+#define DEVICE_ID_PCI_APOLLO_FPGA 0x1CF1
+#define DEVICE_ID_PCIE_APOLLO_FPGA 0x1CF2
+
+#define APOLLO_MEM_PCI_BASENUM (2)
+
+#define APOLLO_INTERRUPT_FLAG_PDP (1 << PDP1_INT_SHIFT)
+#define APOLLO_INTERRUPT_FLAG_EXT (1 << EXT_INT_SHIFT)
+
+MODULE_DESCRIPTION("APOLLO testchip framework driver");
+
+static int apollo_core_clock = RGX_TC_CORE_CLOCK_SPEED;
+static int apollo_mem_clock = RGX_TC_MEM_CLOCK_SPEED;
+
+module_param(apollo_core_clock, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(apollo_core_clock, "Apollo core clock speed");
+module_param(apollo_mem_clock, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(apollo_mem_clock, "Apollo memory clock speed");
+
+static unsigned long apollo_pdp_mem_size = APOLLO_PDP_MEM_SIZE;
+
+module_param(apollo_pdp_mem_size, ulong, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(apollo_pdp_mem_size,
+ "Apollo PDP reserved memory size in bytes");
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+static unsigned long apollo_secure_mem_size = APOLLO_PDP_MEM_SIZE;
+
+module_param(apollo_secure_mem_size, ulong, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(apollo_secure_mem_size,
+ "Apollo secure reserved memory size in bytes");
+#endif
+
+static int apollo_sys_clock = RGX_TC_SYS_CLOCK_SPEED;
+module_param(apollo_sys_clock, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(apollo_sys_clock, "Apollo system clock speed (TCF5 only)");
+
+enum apollo_version_t {
+ APOLLO_VERSION_TCF_2 = 0,
+ APOLLO_VERSION_TCF_5,
+ APOLLO_VERSION_TCF_BONNIE,
+ ODIN_VERSION_TCF_BONNIE
+};
+
+#if defined(SUPPORT_RGX)
+
+static struct debugfs_blob_wrapper apollo_debugfs_rogue_name_blobs[] = {
+ [APOLLO_VERSION_TCF_2] = {
+ .data = "hood", /* probably */
+ .size = sizeof("hood") - 1,
+ },
+ [APOLLO_VERSION_TCF_5] = {
+ .data = "fpga (unknown)",
+ .size = sizeof("fpga (unknown)") - 1,
+ },
+ [APOLLO_VERSION_TCF_BONNIE] = {
+ .data = "bonnie",
+ .size = sizeof("bonnie") - 1,
+ },
+ [ODIN_VERSION_TCF_BONNIE] = {
+ .data = "bonnie",
+ .size = sizeof("bonnie") - 1,
+ },
+};
+
+#endif /* defined(SUPPORT_RGX) */
+
+struct apollo_interrupt_handler {
+ bool enabled;
+ void (*handler_function)(void *);
+ void *handler_data;
+};
+
+struct apollo_region {
+ resource_size_t base;
+ resource_size_t size;
+};
+
+struct apollo_io_region {
+ struct apollo_region region;
+ void __iomem *registers;
+};
+
+struct apollo_device {
+ struct pci_dev *pdev;
+
+ struct apollo_io_region tcf;
+ struct apollo_io_region tcf_pll;
+
+ spinlock_t interrupt_handler_lock;
+ spinlock_t interrupt_enable_lock;
+
+ struct apollo_interrupt_handler
+ interrupt_handlers[APOLLO_INTERRUPT_COUNT];
+
+ struct apollo_region apollo_mem;
+
+ struct platform_device *pdp_dev;
+
+ resource_size_t pdp_heap_mem_base;
+ resource_size_t pdp_heap_mem_size;
+
+ struct platform_device *ext_dev;
+
+ resource_size_t ext_heap_mem_base;
+ resource_size_t ext_heap_mem_size;
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+ resource_size_t secure_heap_mem_base;
+ resource_size_t secure_heap_mem_size;
+#endif
+
+ enum apollo_version_t version;
+
+ struct thermal_zone_device *thermal_zone;
+
+#if defined(APOLLO_FAKE_INTERRUPTS)
+ struct timer_list timer;
+#endif
+
+#if defined(SUPPORT_ION)
+ struct ion_device *ion_device;
+ struct ion_heap *ion_heaps[APOLLO_ION_HEAP_COUNT];
+ int ion_heap_count;
+#endif
+
+#if defined(CONFIG_MTRR) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ int mtrr;
+#endif
+
+#if defined(SUPPORT_APOLLO_FPGA) || defined(SUPPORT_RGX)
+ struct dentry *debugfs_apollo_dir;
+#endif
+#if defined(SUPPORT_APOLLO_FPGA)
+ struct apollo_io_region fpga;
+ struct dentry *debugfs_apollo_regs;
+ struct dentry *debugfs_apollo_pll_regs;
+ struct dentry *debugfs_fpga_regs;
+ struct dentry *debugfs_apollo_mem;
+#endif
+#if defined(SUPPORT_RGX)
+ struct dentry *debugfs_rogue_name;
+#endif
+ bool odin;
+};
+
+#if defined(SUPPORT_APOLLO_FPGA)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
+
+static struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data, const struct file_operations *fops,
+ loff_t file_size)
+{
+ struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);
+
+ if (de)
+ de->d_inode->i_size = file_size;
+ return de;
+}
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0)) */
+
+static ssize_t apollo_debugfs_sanity_check(size_t *count, loff_t *ppos,
+ resource_size_t region_size)
+{
+ if (*ppos < 0)
+ return -EFAULT;
+
+ if (*ppos + *count > region_size)
+ *count = region_size - *ppos;
+
+ if ((*ppos) % sizeof(u32))
+ return -EINVAL;
+
+ if ((*count) % sizeof(u32))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t apollo_debugfs_read_io(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ struct apollo_io_region *io = file->private_data;
+ ssize_t err;
+ loff_t i;
+
+ err = apollo_debugfs_sanity_check(&count, ppos, io->region.size);
+ if (err)
+ return err;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ for (i = 0; i < count; i += sizeof(u32), (*ppos) += sizeof(u32))
+ *(u32 *)(buf + i) = ioread32(io->registers + *ppos);
+
+ return count;
+}
+
+static ssize_t apollo_debugfs_write_io(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct apollo_io_region *io = file->private_data;
+ ssize_t err;
+ loff_t i;
+
+ err = apollo_debugfs_sanity_check(&count, ppos, io->region.size);
+ if (err)
+ return err;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ for (i = 0; i < count; i += sizeof(u32), (*ppos) += sizeof(u32))
+ iowrite32(*(u32 *)(buf + i), io->registers + *ppos);
+
+ return count;
+}
+
+static ssize_t apollo_debugfs_read_mem(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ struct apollo_region *region = file->private_data;
+ void *memory;
+ ssize_t err;
+
+ err = apollo_debugfs_sanity_check(&count, ppos, region->size);
+ if (err)
+ return err;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ memory = ioremap_wc(region->base + *ppos, count);
+ if (!memory)
+ return -EFAULT;
+
+ memcpy(buf, memory, count);
+
+ iounmap(memory);
+ (*ppos) += count;
+ return count;
+}
+
+static ssize_t apollo_debugfs_write_mem(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct apollo_region *region = file->private_data;
+ void *memory;
+ ssize_t err;
+
+ err = apollo_debugfs_sanity_check(&count, ppos, region->size);
+ if (err)
+ return err;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ memory = ioremap_wc(region->base + *ppos, count);
+ if (!memory)
+ return -EFAULT;
+
+ memcpy(memory, buf, count);
+
+ /* Flush the write combiner? */
+ ioread32(memory + count - sizeof(u32));
+
+ iounmap(memory);
+ (*ppos) += count;
+ return count;
+}
+
+static const struct file_operations apollo_io_debugfs_fops = {
+ .open = simple_open,
+ .read = apollo_debugfs_read_io,
+ .write = apollo_debugfs_write_io,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations apollo_mem_debugfs_fops = {
+ .open = simple_open,
+ .read = apollo_debugfs_read_mem,
+ .write = apollo_debugfs_write_mem,
+ .llseek = default_llseek,
+};
+
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+
+static int request_pci_io_addr(struct pci_dev *pdev, u32 index,
+ resource_size_t offset, resource_size_t length)
+{
+ resource_size_t start, end;
+
+ start = pci_resource_start(pdev, index);
+ end = pci_resource_end(pdev, index);
+
+ if ((start + offset + length - 1) > end)
+ return -EIO;
+ if (pci_resource_flags(pdev, index) & IORESOURCE_IO) {
+ if (request_region(start + offset, length, DRV_NAME) == NULL)
+ return -EIO;
+ } else {
+ if (request_mem_region(start + offset, length, DRV_NAME)
+ == NULL)
+ return -EIO;
+ }
+ return 0;
+}
+
+static void release_pci_io_addr(struct pci_dev *pdev, u32 index,
+ resource_size_t start, resource_size_t length)
+{
+ if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
+ release_region(start, length);
+ else
+ release_mem_region(start, length);
+}
+
+static void pll_write_reg(struct apollo_device *apollo,
+ resource_size_t reg_offset, u32 reg_value)
+{
+ BUG_ON(reg_offset < TCF_PLL_PLL_CORE_CLK0);
+ BUG_ON(reg_offset > apollo->tcf_pll.region.size +
+ TCF_PLL_PLL_CORE_CLK0 - 4);
+
+ /* Tweak the offset because we haven't mapped the full pll region */
+ iowrite32(reg_value, apollo->tcf_pll.registers +
+ reg_offset - TCF_PLL_PLL_CORE_CLK0);
+}
+
+static void apollo_set_clocks(struct apollo_device *apollo)
+{
+ u32 val;
+
+ /* This is disabled for TCF2 since the current FPGA builds do not
+ * like their core clocks being set (it takes apollo down).
+ */
+ if (apollo->version != APOLLO_VERSION_TCF_2) {
+ val = apollo_core_clock / 1000000;
+ pll_write_reg(apollo, TCF_PLL_PLL_CORE_CLK0, val);
+
+ val = 0x1 << PLL_CORE_DRP_GO_SHIFT;
+ pll_write_reg(apollo, TCF_PLL_PLL_CORE_DRP_GO, val);
+ }
+
+ val = apollo_mem_clock / 1000000;
+ pll_write_reg(apollo, TCF_PLL_PLL_MEMIF_CLK0, val);
+
+ val = 0x1 << PLL_MEM_DRP_GO_SHIFT;
+ pll_write_reg(apollo, TCF_PLL_PLL_MEM_DRP_GO, val);
+
+ if (apollo->version == APOLLO_VERSION_TCF_5) {
+ val = apollo_sys_clock / 1000000;
+ pll_write_reg(apollo, TCF_PLL_PLL_SYSIF_CLK0, val);
+
+ val = 0x1 << PLL_MEM_DRP_GO_SHIFT;
+ pll_write_reg(apollo, TCF_PLL_PLL_SYS_DRP_GO, val);
+ }
+
+ dev_dbg(&apollo->pdev->dev, "Setting clocks to %uMHz/%uMHz\n",
+ apollo_core_clock / 1000000,
+ apollo_mem_clock / 1000000);
+ udelay(400);
+}
+
+#if defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
+
+/*
+ * A return value of:
+ * 0 or more means success
+ * -1 means we were unable to add an mtrr but we should continue
+ * -2 means we were unable to add an mtrr but we shouldn't continue
+ */
+static int mtrr_setup(struct pci_dev *pdev,
+ resource_size_t mem_start,
+ resource_size_t mem_size)
+{
+ int err;
+ int mtrr;
+
+ /* Reset MTRR */
+ mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_UNCACHABLE, 0);
+ if (mtrr < 0) {
+ dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n",
+ __LINE__, __func__, mtrr);
+ mtrr = -2;
+ goto err_out;
+ }
+
+ err = mtrr_del(mtrr, mem_start, mem_size);
+ if (err < 0) {
+ dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n",
+ __LINE__, __func__, err);
+ mtrr = -2;
+ goto err_out;
+ }
+
+ mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRBACK, 0);
+ if (mtrr < 0) {
+ /* Stop, but not an error as this may be already be setup */
+ dev_dbg(&pdev->dev,
+ "%d - %s: mtrr_add failed (%d) - probably means the mtrr is already setup\n",
+ __LINE__, __func__, mtrr);
+ mtrr = -1;
+ goto err_out;
+ }
+
+ err = mtrr_del(mtrr, mem_start, mem_size);
+ if (err < 0) {
+ dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n",
+ __LINE__, __func__, err);
+ mtrr = -2;
+ goto err_out;
+ }
+
+ if (mtrr == 0) {
+ /* Replace 0 with a non-overlapping WRBACK mtrr */
+ err = mtrr_add(0, mem_start, MTRR_TYPE_WRBACK, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n",
+ __LINE__, __func__, err);
+ mtrr = -2;
+ goto err_out;
+ }
+ }
+
+ mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRCOMB, 0);
+ if (mtrr < 0) {
+ dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n",
+ __LINE__, __func__, mtrr);
+ mtrr = -1;
+ }
+
+err_out:
+ return mtrr;
+}
+
+#endif /* defined(CONFIG_MTRR) && (LINUX_VERSION_CODE<KERNEL_VERSION(4,1,0)) */
+
+static void apollo_set_mem_mode(struct apollo_device *apollo)
+{
+ u32 val;
+
+ val = ioread32(apollo->tcf.registers + TCF_CLK_CTRL_TEST_CTRL);
+ val &= ~(ADDRESS_FORCE_MASK | PCI_TEST_MODE_MASK | HOST_ONLY_MODE_MASK
+ | HOST_PHY_MODE_MASK);
+ val |= (0x1 << ADDRESS_FORCE_SHIFT);
+ iowrite32(val, apollo->tcf.registers + TCF_CLK_CTRL_TEST_CTRL);
+}
+
+static void apollo_devres_release(struct device *dev, void *res)
+{
+ /* No extra cleanup needed */
+}
+
+static void spi_write(struct apollo_device *apollo, u32 off, u32 val)
+{
+ if (apollo->odin) {
+ iowrite32(off, apollo->tcf.registers
+ + ODN_REG_BANK_TCF_SPI_MASTER
+ + ODN_SPI_MST_ADDR_RDNWR);
+ iowrite32(val, apollo->tcf.registers
+ + ODN_REG_BANK_TCF_SPI_MASTER
+ + ODN_SPI_MST_WDATA);
+ iowrite32(TCF_SPI_MST_GO_MASK, apollo->tcf.registers
+ + ODN_REG_BANK_TCF_SPI_MASTER
+ + ODN_SPI_MST_GO);
+ } else {
+ iowrite32(off, apollo->tcf.registers
+ + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR);
+ iowrite32(val, apollo->tcf.registers
+ + TCF_CLK_CTRL_TCF_SPI_MST_WDATA);
+ iowrite32(TCF_SPI_MST_GO_MASK, apollo->tcf.registers
+ + TCF_CLK_CTRL_TCF_SPI_MST_GO);
+ }
+ udelay(1000);
+}
+
+static int spi_read(struct apollo_device *apollo, u32 off, u32 *val)
+{
+ int cnt = 0;
+ u32 spi_mst_status;
+
+ if (apollo->odin) {
+ iowrite32(0x40000 | off, apollo->tcf.registers
+ + ODN_REG_BANK_TCF_SPI_MASTER
+ + ODN_SPI_MST_ADDR_RDNWR);
+ iowrite32(TCF_SPI_MST_GO_MASK, apollo->tcf.registers
+ + ODN_REG_BANK_TCF_SPI_MASTER
+ + ODN_SPI_MST_GO);
+ } else {
+ iowrite32(0x40000 | off, apollo->tcf.registers
+ + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR);
+ iowrite32(TCF_SPI_MST_GO_MASK, apollo->tcf.registers
+ + TCF_CLK_CTRL_TCF_SPI_MST_GO);
+ }
+
+ udelay(100);
+
+ do {
+ if (apollo->odin) {
+ spi_mst_status = ioread32(apollo->tcf.registers
+ + ODN_REG_BANK_TCF_SPI_MASTER
+ + ODN_SPI_MST_STATUS);
+ } else {
+ spi_mst_status = ioread32(apollo->tcf.registers
+ + TCF_CLK_CTRL_TCF_SPI_MST_STATUS);
+ }
+
+ if (cnt++ > 10000) {
+ dev_err(&apollo->pdev->dev,
+ "spi_read: Time out reading SPI reg (0x%x)\n",
+ off);
+ return -1;
+ }
+
+ } while (spi_mst_status != 0x08);
+
+ if (apollo->odin) {
+ *val = ioread32(apollo->tcf.registers
+ + ODN_REG_BANK_TCF_SPI_MASTER
+ + ODN_SPI_MST_RDATA);
+ } else {
+ *val = ioread32(apollo->tcf.registers
+ + TCF_CLK_CTRL_TCF_SPI_MST_RDATA);
+ }
+
+ return 0;
+}
+
+static int is_interface_aligned_es2(u32 eyes, u32 clk_taps, u32 train_ack)
+{
+ u32 max_eye_start = eyes >> 16;
+ u32 min_eye_end = eyes & 0xffff;
+
+ /* If either the training or training ack failed, we haven't aligned */
+ if (!(clk_taps & 0x10000) || !(train_ack & 0x100))
+ return 0;
+
+ /* If the max eye >= min eye it means the readings are nonsense */
+ if (max_eye_start >= min_eye_end)
+ return 0;
+
+ /* If we failed the ack pattern more than 4 times */
+ if (((train_ack & 0xf0) >> 4) > 4)
+ return 0;
+
+ /* If there is less than 7 taps (240ps @40ps/tap, this number should be
+ * lower for the fpga, since its taps are bigger We should really
+ * calculate the "7" based on the interface clock speed.
+ */
+ if ((min_eye_end - max_eye_start) < 7)
+ return 0;
+
+ return 1;
+}
+
+static u32 sai_read_es2(struct apollo_device *apollo, u32 addr)
+{
+ iowrite32(0x200 | addr, apollo->tcf.registers + 0x300);
+ iowrite32(0x1 | addr, apollo->tcf.registers + 0x318);
+ return ioread32(apollo->tcf.registers + 0x310);
+}
+
+static int iopol32_nonzero(u32 mask, void __iomem *addr)
+{
+ int polnum;
+ u32 read_value;
+
+ for (polnum = 0; polnum < 50; polnum++) {
+ read_value = ioread32(addr) & mask;
+ if (read_value != 0)
+ break;
+ msleep(20);
+ }
+ if (polnum == 50) {
+ pr_err(DRV_NAME " iopol32_nonzero timeout\n");
+ return -ETIME;
+ }
+ return 0;
+}
+
+static int apollo_align_interface_es2(struct apollo_device *apollo)
+{
+ int reg = 0;
+ u32 reg_reset_n;
+ int reset_cnt = 0;
+ int err = -EFAULT;
+ bool aligned = false;
+
+ /* Try to enable the core clock PLL */
+ spi_write(apollo, 0x1, 0x0);
+ reg = ioread32(apollo->tcf.registers + 0x320);
+ reg |= 0x1;
+ iowrite32(reg, apollo->tcf.registers + 0x320);
+ reg &= 0xfffffffe;
+ iowrite32(reg, apollo->tcf.registers + 0x320);
+ msleep(1000);
+
+ if (spi_read(apollo, 0x2, ®)) {
+ dev_err(&apollo->pdev->dev,
+ "Unable to read PLL status\n");
+ goto err_out;
+ }
+
+ if (reg == 0x1) {
+ /* Select DUT PLL as core clock */
+ reg = ioread32(apollo->tcf.registers +
+ TCF_CLK_CTRL_DUT_CONTROL_1);
+ reg &= 0xfffffff7;
+ iowrite32(reg, apollo->tcf.registers +
+ TCF_CLK_CTRL_DUT_CONTROL_1);
+ } else {
+ dev_err(&apollo->pdev->dev,
+ "PLL has failed to lock, status = %x\n", reg);
+ goto err_out;
+ }
+
+ reg_reset_n = ioread32(apollo->tcf.registers +
+ TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+
+ while (!aligned && reset_cnt < 10 &&
+ apollo->version != APOLLO_VERSION_TCF_5) {
+ int bank;
+ u32 eyes;
+ u32 clk_taps;
+ u32 train_ack;
+
+ ++reset_cnt;
+
+ /* Reset the DUT to allow the SAI to retrain */
+ reg_reset_n &= ~(0x1 << DUT_RESETN_SHIFT);
+ iowrite32(reg_reset_n, apollo->tcf.registers +
+ TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+ udelay(100);
+ reg_reset_n |= (0x1 << DUT_RESETN_SHIFT);
+ iowrite32(reg_reset_n, apollo->tcf.registers +
+ TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+ udelay(100);
+
+ /* Assume alignment passed, if any bank fails on either DUT or
+ * FPGA we will set this to false and try again for a max of 10
+ * times.
+ */
+ aligned = true;
+
+ /* For each of the banks */
+ for (bank = 0; bank < 10; bank++) {
+ int bank_aligned = 0;
+ /* Check alignment on the DUT */
+ u32 bank_base = 0x7000 + (0x1000 * bank);
+
+ spi_read(apollo, bank_base + 0x4, &eyes);
+ spi_read(apollo, bank_base + 0x3, &clk_taps);
+ spi_read(apollo, bank_base + 0x6, &train_ack);
+
+ bank_aligned = is_interface_aligned_es2(
+ eyes, clk_taps, train_ack);
+ if (!bank_aligned) {
+ dev_warn(&apollo->pdev->dev,
+ "Alignment check failed, retrying\n");
+ aligned = false;
+ break;
+ }
+
+ /* Check alignment on the FPGA */
+ bank_base = 0xb0 + (0x10 * bank);
+
+ eyes = sai_read_es2(apollo, bank_base + 0x4);
+ clk_taps = sai_read_es2(apollo, bank_base + 0x3);
+ train_ack = sai_read_es2(apollo, bank_base + 0x6);
+
+ bank_aligned = is_interface_aligned_es2(
+ eyes, clk_taps, train_ack);
+
+ if (!bank_aligned) {
+ dev_warn(&apollo->pdev->dev,
+ "Alignment check failed, retrying\n");
+ aligned = false;
+ break;
+ }
+ }
+ }
+
+ if (!aligned) {
+ dev_err(&apollo->pdev->dev, "Unable to initialise the testchip (interface alignment failure), please restart the system.\n");
+ goto err_out;
+ }
+
+ if (reset_cnt > 1) {
+ dev_dbg(&apollo->pdev->dev, "Note: The testchip required more than one reset to find a good interface alignment!\n");
+ dev_dbg(&apollo->pdev->dev, " This should be harmless, but if you do suspect foul play, please reset the machine.\n");
+ dev_dbg(&apollo->pdev->dev, " If you continue to see this message you may want to report it to IMGWORKS.\n");
+ }
+
+ err = 0;
+err_out:
+ return err;
+}
+
+
+
+#define SAI_STATUS_UNALIGNED 0
+#define SAI_STATUS_ALIGNED 1
+#define SAI_STATUS_ERROR 2
+
+/* returns 1 for aligned, 0 for unaligned */
+static int get_odin_sai_status(struct apollo_device *apollo, int bank)
+{
+ void __iomem *bank_addr = apollo->tcf.registers
+ + ODN_REG_BANK_SAI_RX_DDR(bank);
+ void __iomem *reg_addr;
+ u32 eyes;
+ u32 clk_taps;
+ u32 train_ack;
+ int bank_aligned;
+
+ reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_EYES;
+ eyes = ioread32(reg_addr);
+
+ reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_CLK_TAPS;
+ clk_taps = ioread32(reg_addr);
+
+ reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK;
+ train_ack = ioread32(reg_addr);
+
+#if 0 /* enable this to get debug info if the board is not aligning */
+ dev_info(&apollo->pdev->dev,
+ "odin bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n",
+ bank, eyes, clk_taps, train_ack);
+#endif
+ bank_aligned = is_interface_aligned_es2(eyes, clk_taps, train_ack);
+
+ if (bank_aligned)
+ return SAI_STATUS_ALIGNED;
+
+ dev_warn(&apollo->pdev->dev, "odin bank %d is unaligned\n", bank);
+ return SAI_STATUS_UNALIGNED;
+}
+
+
+/* Read the odin multi clocked bank align status.
+ * Returns 1 for aligned, 0 for unaligned
+ */
+static int read_odin_mca_status(struct apollo_device *apollo)
+{
+ void __iomem *bank_addr = apollo->tcf.registers
+ + ODN_REG_BANK_MULTI_CLK_ALIGN;
+ void __iomem *reg_addr = bank_addr + ODN_MCA_DEBUG_MCA_STATUS;
+ u32 mca_status;
+
+ mca_status = ioread32(reg_addr);
+
+#if 0 /* Enable this if there are alignment issues */
+ dev_info(&apollo->pdev->dev,
+ "Odin MCA_STATUS = %08x\n", mca_status);
+#endif
+ return mca_status & ODN_ALIGNMENT_FOUND_MASK;
+}
+
+
+/* Read the DUT multi clocked bank align status.
+ * Returns 1 for aligned, 0 for unaligned
+ */
+static int read_dut_mca_status(struct apollo_device *apollo)
+{
+ int mca_status;
+ const int mca_status_register_offset = 1; /* not in bonnie_tcf.h */
+ int spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN);
+
+ spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN)
+ + mca_status_register_offset;
+
+ spi_read(apollo, spi_address, &mca_status);
+
+#if 0 /* Enable this if there are alignment issues */
+ dev_info(&apollo->pdev->dev,
+ "DUT MCA_STATUS = %08x\n", mca_status);
+#endif
+ return mca_status & 1; /* 'alignment found' status is in bit 1 */
+}
+
+
+
+/* returns 1 for aligned, 0 for unaligned */
+static int get_dut_sai_status(struct apollo_device *apollo, int bank)
+{
+ u32 eyes;
+ u32 clk_taps;
+ u32 train_ack;
+ int bank_aligned;
+ const u32 bank_base = DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_RX_1
+ + (BONNIE_TCF_OFFSET_SAI_RX_DELTA * bank));
+ int spi_timeout;
+
+ spi_timeout = spi_read(apollo, bank_base
+ + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_EYES), &eyes);
+ if (spi_timeout)
+ return SAI_STATUS_ERROR;
+
+ spi_read(apollo, bank_base
+ + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_CLK_TAPS), &clk_taps);
+ spi_read(apollo, bank_base
+ + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_TRAIN_ACK), &train_ack);
+
+#if 0 /* enable this to get debug info if the board is not aligning */
+ dev_info(&apollo->pdev->dev,
+ "dut bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n",
+ bank, eyes, clk_taps, train_ack);
+#endif
+ bank_aligned = is_interface_aligned_es2(eyes, clk_taps, train_ack);
+
+ if (bank_aligned)
+ return SAI_STATUS_ALIGNED;
+
+ dev_warn(&apollo->pdev->dev, "dut bank %d is unaligned\n", bank);
+ return SAI_STATUS_UNALIGNED;
+}
+
+
+/* Do a hard reset on the DUT */
+static int odin_hard_reset(struct apollo_device *apollo)
+{
+ int reset_cnt = 0;
+ bool aligned = false;
+ int alignment_found;
+
+ msleep(100);
+
+ /* It is essential to do an SPI reset once on power-up before
+ * doing any DUT reads via the SPI interface.
+ */
+ iowrite32(1, apollo->tcf.registers /* set bit 1 low */
+ + ODN_CORE_EXTERNAL_RESETN);
+ msleep(20);
+
+ iowrite32(3, apollo->tcf.registers /* set bit 1 high */
+ + ODN_CORE_EXTERNAL_RESETN);
+ msleep(20);
+
+ while (!aligned && (reset_cnt < 20)) {
+
+ int bank;
+
+ /* Reset the DUT to allow the SAI to retrain */
+ iowrite32(2, /* set bit 0 low */
+ apollo->tcf.registers
+ + ODN_CORE_EXTERNAL_RESETN);
+
+ /* Hold the DUT in reset for 50mS */
+ msleep(50);
+
+ /* Take the DUT out of reset */
+ iowrite32(3, /* set bit 0 hi */
+ apollo->tcf.registers
+ + ODN_CORE_EXTERNAL_RESETN);
+ reset_cnt++;
+
+ /* Wait 200mS for the DUT to stabilise */
+ msleep(200);
+
+ /* Check the odin Multi Clocked bank Align status */
+ alignment_found = read_odin_mca_status(apollo);
+ dev_info(&apollo->pdev->dev,
+ "Odin mca_status indicates %s\n",
+ (alignment_found)?"aligned":"UNALIGNED");
+
+ /* Check the DUT MCA status */
+ alignment_found = read_dut_mca_status(apollo);
+ dev_info(&apollo->pdev->dev,
+ "DUT mca_status indicates %s\n",
+ (alignment_found)?"aligned":"UNALIGNED");
+
+ /* If all banks have aligned then the reset was successful */
+ for (bank = 0; bank < 10; bank++) {
+
+ int dut_aligned = 0;
+ int odin_aligned = 0;
+
+ odin_aligned = get_odin_sai_status(apollo, bank);
+ dut_aligned = get_dut_sai_status(apollo, bank);
+
+ if (dut_aligned == SAI_STATUS_ERROR)
+ return SAI_STATUS_ERROR;
+
+ if (!dut_aligned || !odin_aligned) {
+ aligned = false;
+ break;
+ }
+ aligned = true;
+ }
+
+ if (aligned) {
+ dev_info(&apollo->pdev->dev,
+ "all banks have aligned\n");
+ break;
+ }
+
+ dev_warn(&apollo->pdev->dev,
+ "Warning- not all banks have aligned. Trying again.\n");
+ }
+
+ if (!aligned)
+ dev_warn(&apollo->pdev->dev, "odin_hard_reset failed\n");
+
+ return (aligned) ? 0 : 1; /* return 0 for success */
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+static int apollo_thermal_get_temp(struct thermal_zone_device *thermal, unsigned long *t)
+#else
+static int apollo_thermal_get_temp(struct thermal_zone_device *thermal, int *t)
+#endif
+{
+ struct apollo_device *apollo;
+ int err = -ENODEV;
+ u32 tmp;
+
+ if (!thermal)
+ goto err_out;
+
+ apollo = (struct apollo_device *)thermal->devdata;
+
+ if (!apollo)
+ goto err_out;
+
+ if (spi_read(apollo, TCF_TEMP_SENSOR_SPI_OFFSET, &tmp)) {
+ dev_err(&apollo->pdev->dev,
+ "Failed to read apollo temperature sensor\n");
+
+ goto err_out;
+ }
+
+ /* Report this in millidegree Celsius */
+ *t = TCF_TEMP_SENSOR_TO_C(tmp) * 1000;
+
+ err = 0;
+
+err_out:
+ return err;
+}
+
+static struct thermal_zone_device_ops apollo_thermal_dev_ops = {
+ .get_temp = apollo_thermal_get_temp,
+};
+
+static int apollo_hard_reset(struct apollo_device *apollo)
+{
+ u32 reg;
+ u32 reg_reset_n = 0;
+
+ /* For displaying some build info */
+ u32 build_inc;
+ u32 build_owner;
+
+ int err = 0;
+
+ /* This is required for SPI reset which is not yet implemented. */
+ /*u32 aux_reset_n;*/
+
+ if (apollo->version == APOLLO_VERSION_TCF_2) {
+ /* Power down */
+ reg = ioread32(apollo->tcf.registers +
+ TCF_CLK_CTRL_DUT_CONTROL_1);
+ reg &= ~DUT_CTRL_VCC_0V9EN;
+ reg &= ~DUT_CTRL_VCC_1V8EN;
+ reg |= DUT_CTRL_VCC_IO_INH;
+ reg |= DUT_CTRL_VCC_CORE_INH;
+ iowrite32(reg, apollo->tcf.registers +
+ TCF_CLK_CTRL_DUT_CONTROL_1);
+ msleep(500);
+ }
+
+ /* Put everything into reset */
+ iowrite32(reg_reset_n, apollo->tcf.registers +
+ TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+
+ /* Set clock speed here, before reset. */
+ apollo_set_clocks(apollo);
+
+ /* Put take GLB_CLKG and SCB out of reset */
+ reg_reset_n |= (0x1 << GLB_CLKG_EN_SHIFT);
+ reg_reset_n |= (0x1 << SCB_RESETN_SHIFT);
+ iowrite32(reg_reset_n, apollo->tcf.registers +
+ TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+ msleep(100);
+
+ if (apollo->version == APOLLO_VERSION_TCF_2) {
+ /* Enable the voltage control regulators on DUT */
+ reg = ioread32(apollo->tcf.registers +
+ TCF_CLK_CTRL_DUT_CONTROL_1);
+ reg |= DUT_CTRL_VCC_0V9EN;
+ reg |= DUT_CTRL_VCC_1V8EN;
+ reg &= ~DUT_CTRL_VCC_IO_INH;
+ reg &= ~DUT_CTRL_VCC_CORE_INH;
+ iowrite32(reg, apollo->tcf.registers +
+ TCF_CLK_CTRL_DUT_CONTROL_1);
+ msleep(300);
+ }
+ /* Take PDP1 and PDP2 out of reset */
+ reg_reset_n |= (0x1 << PDP1_RESETN_SHIFT);
+ reg_reset_n |= (0x1 << PDP2_RESETN_SHIFT);
+
+ iowrite32(reg_reset_n, apollo->tcf.registers +
+ TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+ msleep(100);
+
+ /* Take DDR out of reset */
+ reg_reset_n |= (0x1 << DDR_RESETN_SHIFT);
+ iowrite32(reg_reset_n, apollo->tcf.registers +
+ TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+
+ /* Take DUT_DCM out of reset */
+ reg_reset_n |= (0x1 << DUT_DCM_RESETN_SHIFT);
+ iowrite32(reg_reset_n, apollo->tcf.registers +
+ TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+ msleep(100);
+
+
+ err = iopol32_nonzero(DCM_LOCK_STATUS_MASK,
+ apollo->tcf.registers + TCF_CLK_CTRL_DCM_LOCK_STATUS);
+
+ if (err != 0)
+ goto err_out;
+
+ if (apollo->version == APOLLO_VERSION_TCF_2) {
+ /* Set ODT to a specific value that seems to provide the most
+ * stable signals.
+ */
+ spi_write(apollo, 0x11, 0x413130);
+ }
+
+ /* Take DUT out of reset */
+ reg_reset_n |= (0x1 << DUT_RESETN_SHIFT);
+ iowrite32(reg_reset_n, apollo->tcf.registers +
+ TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+ msleep(100);
+
+ if (apollo->version != APOLLO_VERSION_TCF_5) {
+ err = apollo_align_interface_es2(apollo);
+ if (err)
+ goto err_out;
+ }
+
+ if (apollo->version == APOLLO_VERSION_TCF_2) {
+ /* Enable the temperature sensor */
+ spi_write(apollo, 0xc, 0); /* power up */
+ spi_write(apollo, 0xc, 2); /* reset */
+ spi_write(apollo, 0xc, 6); /* init & run */
+
+ /* Register a new thermal zone */
+ apollo->thermal_zone = thermal_zone_device_register("apollo", 0, 0, apollo,
+ &apollo_thermal_dev_ops,
+ NULL, 0, 0);
+ if (IS_ERR(apollo->thermal_zone)) {
+ dev_warn(&apollo->pdev->dev, "Couldn't register thermal zone");
+ apollo->thermal_zone = NULL;
+ }
+ }
+
+ /* Check the build */
+ reg = ioread32(apollo->tcf.registers + 0x10);
+ build_inc = (reg >> 12) & 0xff;
+ build_owner = (reg >> 20) & 0xf;
+
+ if (build_inc) {
+ dev_alert(&apollo->pdev->dev,
+ "BE WARNED: You are not running a tagged release of the FPGA!\n");
+
+ dev_alert(&apollo->pdev->dev, "Owner: 0x%01x, Inc: 0x%02x\n",
+ build_owner, build_inc);
+ }
+
+ dev_dbg(&apollo->pdev->dev, "FPGA Release: %u.%02u\n", reg >> 8 & 0xf,
+ reg & 0xff);
+
+err_out:
+ return err;
+}
+
+static int apollo_hw_init(struct apollo_device *apollo)
+{
+ u32 reg;
+
+ apollo_hard_reset(apollo);
+ apollo_set_mem_mode(apollo);
+
+ if (apollo->version == APOLLO_VERSION_TCF_BONNIE) {
+ /* Enable ASTC via SPI */
+ if (spi_read(apollo, 0xf, ®)) {
+ dev_err(&apollo->pdev->dev,
+ "Failed to read apollo ASTC register\n");
+ goto err_out;
+ }
+
+ reg |= 0x1 << 4;
+ spi_write(apollo, 0xf, reg);
+ }
+
+err_out:
+ return 0;
+}
+
+
+static int odin_hw_init(struct apollo_device *apollo)
+{
+ int err;
+
+ err = odin_hard_reset(apollo);
+
+ dev_info(&apollo->pdev->dev, "odin_hw_init %s\n",
+ (err) ? "failed" : "succeeded");
+ return err;
+
+}
+
+
+/* Reads PLL status and temp sensor if there is one */
+int apollo_sys_info(struct device *dev, u32 *tmp, u32 *pll)
+{
+ int err = -ENODEV;
+ struct apollo_device *apollo = devres_find(dev, apollo_devres_release,
+ NULL, NULL);
+
+ if (!apollo) {
+ dev_err(dev, "No apollo device resources found\n");
+ goto err_out;
+ }
+
+ if (apollo->version == APOLLO_VERSION_TCF_5) {
+ /* Not implemented on TCF5 */
+ err = 0;
+ goto err_out;
+ }
+
+ if (apollo->version == APOLLO_VERSION_TCF_2) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+ unsigned long t;
+#else
+ int t;
+#endif
+
+ err = apollo_thermal_get_temp(apollo->thermal_zone, &t);
+ if (err)
+ goto err_out;
+ *tmp = t / 1000;
+ }
+
+ if (apollo->odin) {
+ *pll = 0;
+ } else {
+ if (spi_read(apollo, 0x2, pll)) {
+ dev_err(dev, "Failed to read PLL status\n");
+ goto err_out;
+ }
+ }
+ err = 0;
+
+err_out:
+ return err;
+}
+EXPORT_SYMBOL(apollo_sys_info);
+
+int apollo_core_clock_speed(struct device *dev)
+{
+ return apollo_core_clock;
+}
+EXPORT_SYMBOL(apollo_core_clock_speed);
+
+#define HEX2DEC(v) ((((v) >> 4) * 10) + ((v) & 0x0F))
+
+/* Read revision ID registers */
+int apollo_sys_strings(struct device *dev,
+ char *str_fpga_rev, size_t size_fpga_rev,
+ char *str_tcf_core_rev, size_t size_tcf_core_rev,
+ char *str_tcf_core_target_build_id,
+ size_t size_tcf_core_target_build_id,
+ char *str_pci_ver, size_t size_pci_ver,
+ char *str_macro_ver, size_t size_macro_ver)
+{
+ int err = 0;
+ u32 val;
+ resource_size_t host_fpga_base;
+ void __iomem *host_fpga_registers;
+
+ struct apollo_device *apollo = devres_find(dev, apollo_devres_release,
+ NULL, NULL);
+
+ if (!str_fpga_rev ||
+ !size_fpga_rev ||
+ !str_tcf_core_rev ||
+ !size_tcf_core_rev ||
+ !str_tcf_core_target_build_id ||
+ !size_tcf_core_target_build_id ||
+ !str_pci_ver ||
+ !size_pci_ver ||
+ !str_macro_ver ||
+ !size_macro_ver) {
+
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (!apollo) {
+ dev_err(dev, "No apollo device resources found\n");
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ if (apollo->odin) {
+ char temp_str[12];
+
+ /* Read the Odin major and minor revision ID register Rx-xx */
+ val = ioread32(apollo->tcf.registers + ODN_CORE_REVISION);
+
+ snprintf(str_tcf_core_rev,
+ size_tcf_core_rev,
+ "%d.%d",
+ HEX2DEC((val & ODN_REVISION_MAJOR_MASK)
+ >> ODN_REVISION_MAJOR_SHIFT),
+ HEX2DEC((val & ODN_REVISION_MINOR_MASK)
+ >> ODN_REVISION_MINOR_SHIFT));
+
+ dev_info(&apollo->pdev->dev, "Odin core revision %s\n",
+ str_tcf_core_rev);
+
+ /* Read the Odin register containing the Perforce changelist
+ * value that the FPGA build was generated from
+ */
+ val = ioread32(apollo->tcf.registers + ODN_CORE_CHANGE_SET);
+
+ snprintf(str_tcf_core_target_build_id,
+ size_tcf_core_target_build_id,
+ "%d",
+ (val & ODN_CHANGE_SET_SET_MASK)
+ >> ODN_CHANGE_SET_SET_SHIFT);
+
+ /* Read the Odin User_ID register containing the User ID for
+ * identification of a modified build
+ */
+ val = ioread32(apollo->tcf.registers + ODN_CORE_USER_ID);
+
+ snprintf(temp_str,
+ sizeof(temp_str),
+ "%d",
+ HEX2DEC((val & ODN_USER_ID_ID_MASK)
+ >> ODN_USER_ID_ID_SHIFT));
+
+ /* Read the Odin User_Build register containing the User build
+ * number for identification of modified builds
+ */
+ val = ioread32(apollo->tcf.registers + ODN_CORE_USER_BUILD);
+
+ snprintf(temp_str,
+ sizeof(temp_str),
+ "%d",
+ HEX2DEC((val & ODN_USER_BUILD_BUILD_MASK)
+ >> ODN_USER_BUILD_BUILD_SHIFT));
+
+ return 0;
+ }
+
+
+ /* To get some of the version information we need to read from a
+ * register that we don't normally have mapped. Map it temporarily
+ * (without trying to reserve it) to get the information we need.
+ */
+ host_fpga_base =
+ pci_resource_start(apollo->pdev, SYS_APOLLO_REG_PCI_BASENUM)
+ + 0x40F0;
+
+ host_fpga_registers = ioremap_nocache(host_fpga_base, 0x04);
+ if (!host_fpga_registers) {
+ dev_err(&apollo->pdev->dev,
+ "Failed to map host fpga registers\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ /* Create the components of the PCI and macro versions */
+ val = ioread32(host_fpga_registers);
+ snprintf(str_pci_ver, size_pci_ver, "%d",
+ HEX2DEC((val & 0x00FF0000) >> 16));
+ snprintf(str_macro_ver, size_macro_ver, "%d.%d",
+ (val & 0x00000F00) >> 8,
+ HEX2DEC((val & 0x000000FF) >> 0));
+
+ /* Unmap the register now that we no longer need it */
+ iounmap(host_fpga_registers);
+
+ /* Create the components of the FPGA revision number */
+ val = ioread32(apollo->tcf.registers + TCF_CLK_CTRL_FPGA_REV_REG);
+ snprintf(str_fpga_rev, size_fpga_rev, "%d.%d.%d",
+ HEX2DEC((val & FPGA_REV_REG_MAJOR_MASK)
+ >> FPGA_REV_REG_MAJOR_SHIFT),
+ HEX2DEC((val & FPGA_REV_REG_MINOR_MASK)
+ >> FPGA_REV_REG_MINOR_SHIFT),
+ HEX2DEC((val & FPGA_REV_REG_MAINT_MASK)
+ >> FPGA_REV_REG_MAINT_SHIFT));
+
+ /* Create the components of the TCF core revision number */
+ val = ioread32(apollo->tcf.registers + TCF_CLK_CTRL_TCF_CORE_REV_REG);
+ snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d.%d.%d",
+ HEX2DEC((val & TCF_CORE_REV_REG_MAJOR_MASK)
+ >> TCF_CORE_REV_REG_MAJOR_SHIFT),
+ HEX2DEC((val & TCF_CORE_REV_REG_MINOR_MASK)
+ >> TCF_CORE_REV_REG_MINOR_SHIFT),
+ HEX2DEC((val & TCF_CORE_REV_REG_MAINT_MASK)
+ >> TCF_CORE_REV_REG_MAINT_SHIFT));
+
+ /* Create the component of the TCF core target build ID */
+ val = ioread32(apollo->tcf.registers +
+ TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG);
+ snprintf(str_tcf_core_target_build_id, size_tcf_core_target_build_id,
+ "%d",
+ (val & TCF_CORE_TARGET_BUILD_ID_MASK)
+ >> TCF_CORE_TARGET_BUILD_ID_SHIFT);
+
+err_out:
+ return err;
+}
+EXPORT_SYMBOL(apollo_sys_strings);
+
+static irqreturn_t apollo_irq_handler(int irq, void *data)
+{
+ u32 interrupt_status;
+ u32 interrupt_clear = 0;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ struct apollo_device *apollo = (struct apollo_device *)data;
+
+ spin_lock_irqsave(&apollo->interrupt_handler_lock, flags);
+
+#if defined(APOLLO_FAKE_INTERRUPTS)
+ /* If we're faking interrupts pretend we got both ext and PDP ints */
+ interrupt_status = APOLLO_INTERRUPT_FLAG_EXT
+ | APOLLO_INTERRUPT_FLAG_PDP;
+#else
+ interrupt_status = ioread32(apollo->tcf.registers
+ + TCF_CLK_CTRL_INTERRUPT_STATUS);
+#endif
+
+ if (interrupt_status & APOLLO_INTERRUPT_FLAG_EXT) {
+ struct apollo_interrupt_handler *ext_int =
+ &apollo->interrupt_handlers[APOLLO_INTERRUPT_EXT];
+
+ if (ext_int->enabled && ext_int->handler_function) {
+ ext_int->handler_function(ext_int->handler_data);
+ interrupt_clear |= APOLLO_INTERRUPT_FLAG_EXT;
+ }
+ ret = IRQ_HANDLED;
+ }
+ if (interrupt_status & APOLLO_INTERRUPT_FLAG_PDP) {
+ struct apollo_interrupt_handler *pdp_int =
+ &apollo->interrupt_handlers[APOLLO_INTERRUPT_PDP];
+
+ if (pdp_int->enabled && pdp_int->handler_function) {
+ pdp_int->handler_function(pdp_int->handler_data);
+ interrupt_clear |= APOLLO_INTERRUPT_FLAG_PDP;
+ }
+ ret = IRQ_HANDLED;
+ }
+
+ if (apollo->version == APOLLO_VERSION_TCF_5) {
+ /* On TC5 the interrupt is not by the TC framework, but
+ * by the PDP itself. So we always have to callback to the tc5
+ * pdp code regardless of the interrupt status of the TCF.
+ */
+ struct apollo_interrupt_handler *pdp_int =
+ &apollo->interrupt_handlers[APOLLO_INTERRUPT_TC5_PDP];
+
+ if (pdp_int->enabled && pdp_int->handler_function) {
+ pdp_int->handler_function(pdp_int->handler_data);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ if (interrupt_clear)
+ iowrite32(0xffffffff,
+ apollo->tcf.registers + TCF_CLK_CTRL_INTERRUPT_CLEAR);
+
+ spin_unlock_irqrestore(&apollo->interrupt_handler_lock, flags);
+
+ return ret;
+}
+
+static irqreturn_t odin_irq_handler(int irq, void *data)
+{
+ u32 interrupt_status;
+ u32 interrupt_clear = 0;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ struct apollo_device *apollo = (struct apollo_device *)data;
+
+ spin_lock_irqsave(&apollo->interrupt_handler_lock, flags);
+
+ interrupt_status = ioread32(apollo->tcf.registers
+ +ODN_CORE_INTERRUPT_STATUS);
+
+ if (interrupt_status & ODN_INTERRUPT_STATUS_DUT) {
+ struct apollo_interrupt_handler *ext_int =
+ &apollo->interrupt_handlers[APOLLO_INTERRUPT_EXT];
+
+ if (ext_int->enabled && ext_int->handler_function) {
+ ext_int->handler_function(ext_int->handler_data);
+ interrupt_clear |= ODN_INTERRUPT_CLEAR_DUT;
+ }
+ ret = IRQ_HANDLED;
+ }
+ if (interrupt_status & ODN_INTERRUPT_STATUS_PDP1) {
+ struct apollo_interrupt_handler *pdp_int =
+ &apollo->interrupt_handlers[APOLLO_INTERRUPT_PDP];
+
+ if (pdp_int->enabled && pdp_int->handler_function) {
+ pdp_int->handler_function(pdp_int->handler_data);
+ interrupt_clear |= ODN_INTERRUPT_CLEAR_PDP1;
+ }
+ ret = IRQ_HANDLED;
+ }
+
+ if (interrupt_clear)
+ iowrite32(interrupt_clear,
+ apollo->tcf.registers + ODN_CORE_INTERRUPT_CLR);
+
+ spin_unlock_irqrestore(&apollo->interrupt_handler_lock, flags);
+
+ return ret;
+}
+
+#if defined(APOLLO_FAKE_INTERRUPTS)
+static void apollo_irq_fake_wrapper(unsigned long data)
+{
+ struct apollo_device *apollo = (struct apollo_device *)data;
+
+ apollo_irq_handler(0, apollo);
+
+ mod_timer(&apollo->timer,
+ jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS));
+}
+#endif
+
+static int apollo_enable_irq(struct apollo_device *apollo)
+{
+ int err = 0;
+
+#if defined(APOLLO_FAKE_INTERRUPTS)
+ setup_timer(&apollo->timer, apollo_irq_fake_wrapper,
+ (unsigned long)apollo);
+ mod_timer(&apollo->timer,
+ jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS));
+#else
+ {
+ u32 val;
+
+ iowrite32(0, apollo->tcf.registers +
+ TCF_CLK_CTRL_INTERRUPT_ENABLE);
+ iowrite32(0xffffffff, apollo->tcf.registers +
+ TCF_CLK_CTRL_INTERRUPT_CLEAR);
+
+ /* Set sense to active high */
+ val = ioread32(apollo->tcf.registers +
+ TCF_CLK_CTRL_INTERRUPT_OP_CFG) & ~(INT_SENSE_MASK);
+ iowrite32(val, apollo->tcf.registers +
+ TCF_CLK_CTRL_INTERRUPT_OP_CFG);
+
+ err = request_irq(apollo->pdev->irq, apollo_irq_handler,
+ IRQF_SHARED, DRV_NAME, apollo);
+ }
+#endif
+ return err;
+}
+
+static void apollo_disable_irq(struct apollo_device *apollo)
+{
+#if defined(APOLLO_FAKE_INTERRUPTS)
+ del_timer_sync(&apollo->timer);
+#else
+ iowrite32(0, apollo->tcf.registers +
+ TCF_CLK_CTRL_INTERRUPT_ENABLE);
+ iowrite32(0xffffffff, apollo->tcf.registers +
+ TCF_CLK_CTRL_INTERRUPT_CLEAR);
+
+ free_irq(apollo->pdev->irq, apollo);
+#endif
+}
+
+static int odin_enable_irq(struct apollo_device *apollo)
+{
+ int err = 0;
+
+#if defined(APOLLO_FAKE_INTERRUPTS)
+ setup_timer(&apollo->timer, apollo_irq_fake_wrapper,
+ (unsigned long)apollo);
+ mod_timer(&apollo->timer,
+ jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS));
+#else
+ iowrite32(0, apollo->tcf.registers +
+ ODN_CORE_INTERRUPT_ENABLE);
+ iowrite32(0xffffffff, apollo->tcf.registers +
+ ODN_CORE_INTERRUPT_CLR);
+
+ dev_info(&apollo->pdev->dev,
+ "Registering IRQ %d for use by Odin\n",
+ apollo->pdev->irq);
+
+ err = request_irq(apollo->pdev->irq, odin_irq_handler,
+ IRQF_SHARED, DRV_NAME, apollo);
+
+ if (err) {
+ dev_err(&apollo->pdev->dev,
+ "Error - IRQ %d failed to register\n",
+ apollo->pdev->irq);
+ } else {
+ dev_info(&apollo->pdev->dev,
+ "IRQ %d was successfully registered for use by Odin\n",
+ apollo->pdev->irq);
+ }
+#endif
+ return err;
+}
+
+static void odin_disable_irq(struct apollo_device *apollo)
+{
+ dev_info(&apollo->pdev->dev, "odin_disable_irq\n");
+
+#if defined(APOLLO_FAKE_INTERRUPTS)
+ del_timer_sync(&apollo->timer);
+#else
+ iowrite32(0, apollo->tcf.registers +
+ ODN_CORE_INTERRUPT_ENABLE);
+ iowrite32(0xffffffff, apollo->tcf.registers +
+ ODN_CORE_INTERRUPT_CLR);
+
+ free_irq(apollo->pdev->irq, apollo);
+#endif
+}
+
+static int register_pdp_device(struct apollo_device *apollo)
+{
+ int err = 0;
+ resource_size_t reg_start = (apollo->odin) ?
+ pci_resource_start(apollo->pdev, ODN_SYS_BAR) :
+ pci_resource_start(apollo->pdev, SYS_APOLLO_REG_PCI_BASENUM);
+ struct resource pdp_resources_es2[] = {
+ DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET,
+ SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"),
+ DEFINE_RES_MEM_NAMED(reg_start +
+ SYS_APOLLO_REG_PLL_OFFSET +
+ TCF_PLL_PLL_PDP_CLK0,
+ TCF_PLL_PLL_PDP2_DRP_GO -
+ TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"),
+ };
+ struct resource pdp_resources_tcf5[] = {
+ DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET,
+ SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"),
+ DEFINE_RES_MEM_NAMED(reg_start +
+ SYS_APOLLO_REG_PLL_OFFSET +
+ TCF_PLL_PLL_PDP_CLK0,
+ TCF_PLL_PLL_PDP2_DRP_GO -
+ TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"),
+ DEFINE_RES_MEM_NAMED(pci_resource_start(apollo->pdev,
+ TC5_SYS_APOLLO_REG_PCI_BASENUM)
+ + TC5_SYS_APOLLO_REG_PDP2_OFFSET,
+ TC5_SYS_APOLLO_REG_PDP2_SIZE, "tc5-pdp2-regs"),
+
+ DEFINE_RES_MEM_NAMED(pci_resource_start(apollo->pdev,
+ TC5_SYS_APOLLO_REG_PCI_BASENUM)
+ + TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET,
+ TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE,
+ "tc5-pdp2-fbdc-regs"),
+
+ DEFINE_RES_MEM_NAMED(pci_resource_start(apollo->pdev,
+ TC5_SYS_APOLLO_REG_PCI_BASENUM)
+ + TC5_SYS_APOLLO_REG_HDMI_OFFSET,
+ TC5_SYS_APOLLO_REG_HDMI_SIZE,
+ "tc5-adv5711-regs"),
+ };
+ struct resource pdp_resources_odin[] = {
+ DEFINE_RES_MEM_NAMED(reg_start +
+ ODN_PDP_REGS_OFFSET, /* start */
+ ODN_PDP_REGS_SIZE, /* size */
+ "pdp-regs"),
+ DEFINE_RES_MEM_NAMED(reg_start +
+ ODN_SYS_REGS_OFFSET +
+ ODN_REG_BANK_ODN_CLK_BLK +
+ ODN_PDP_P_CLK_OUT_DIVIDER_REG1, /* start */
+ ODN_PDP_P_CLK_IN_DIVIDER_REG -
+ ODN_PDP_P_CLK_OUT_DIVIDER_REG1 + 4, /* size */
+ "pll-regs"),
+ DEFINE_RES_MEM_NAMED(reg_start +
+ ODN_SYS_REGS_OFFSET +
+ ODN_REG_BANK_CORE, /* start */
+ ODN_CORE_MMCM_LOCK_STATUS + 4, /* size */
+ "odn-core"),
+ };
+
+ struct apollo_pdp_platform_data pdata = {
+#if defined(SUPPORT_ION)
+ .ion_device = apollo->ion_device,
+ .ion_heap_id = ION_HEAP_APOLLO_PDP,
+#endif
+ .memory_base = apollo->apollo_mem.base,
+ .pdp_heap_memory_base = apollo->pdp_heap_mem_base,
+ .pdp_heap_memory_size = apollo->pdp_heap_mem_size,
+ };
+ struct platform_device_info pdp_device_info = {
+ .parent = &apollo->pdev->dev,
+ .name = (apollo->odin) ? ODN_DEVICE_NAME_PDP
+ : APOLLO_DEVICE_NAME_PDP,
+ .id = -2,
+ .data = &pdata,
+ .size_data = sizeof(pdata),
+ .dma_mask = DMA_BIT_MASK(32),
+ };
+
+ if (apollo->version == APOLLO_VERSION_TCF_5) {
+ pdp_device_info.res = pdp_resources_tcf5;
+ pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_tcf5);
+ } else if (apollo->version == APOLLO_VERSION_TCF_2 ||
+ apollo->version == APOLLO_VERSION_TCF_BONNIE) {
+ pdp_device_info.res = pdp_resources_es2;
+ pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_es2);
+ } else if (apollo->odin) {
+ pdp_device_info.res = pdp_resources_odin;
+ pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_odin);
+ } else {
+ dev_err(&apollo->pdev->dev,
+ "Unable to set PDP resource info for unknown apollo device\n");
+ }
+
+ apollo->pdp_dev = platform_device_register_full(&pdp_device_info);
+ if (IS_ERR(apollo->pdp_dev)) {
+ err = PTR_ERR(apollo->pdp_dev);
+ dev_err(&apollo->pdev->dev,
+ "Failed to register PDP device (%d)\n", err);
+ apollo->pdp_dev = NULL;
+ goto err;
+ }
+err:
+ return err;
+}
+
+#if defined(SUPPORT_RGX)
+
+static int register_ext_device(struct apollo_device *apollo)
+{
+ int err = 0;
+ struct resource rogue_resources[] = {
+ DEFINE_RES_MEM_NAMED(pci_resource_start(apollo->pdev,
+ SYS_RGX_REG_PCI_BASENUM),
+ SYS_RGX_REG_REGION_SIZE, "rogue-regs"),
+ };
+ struct resource odin_rogue_resources[] = {
+ DEFINE_RES_MEM_NAMED(pci_resource_start(apollo->pdev,
+ ODN_DUT_SOCIF_BAR),
+ ODN_DUT_SOCIF_SIZE, "rogue-regs"),
+ };
+ struct apollo_rogue_platform_data pdata = {
+#if defined(SUPPORT_ION)
+ .ion_device = apollo->ion_device,
+ .ion_heap_id = ION_HEAP_APOLLO_ROGUE,
+#endif
+ .apollo_memory_base = apollo->apollo_mem.base,
+ .pdp_heap_memory_base = apollo->pdp_heap_mem_base,
+ .pdp_heap_memory_size = apollo->pdp_heap_mem_size,
+ .rogue_heap_memory_base = apollo->ext_heap_mem_base,
+ .rogue_heap_memory_size = apollo->ext_heap_mem_size,
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+ .secure_heap_memory_base = apollo->secure_heap_mem_base,
+ .secure_heap_memory_size = apollo->secure_heap_mem_size,
+#endif
+ };
+ struct platform_device_info rogue_device_info = {
+ .parent = &apollo->pdev->dev,
+ .name = APOLLO_DEVICE_NAME_ROGUE,
+ .id = -2,
+ .res = rogue_resources,
+ .num_res = ARRAY_SIZE(rogue_resources),
+ .data = &pdata,
+ .size_data = sizeof(pdata),
+ .dma_mask = DMA_BIT_MASK(32),
+ };
+ struct platform_device_info odin_rogue_dev_info = {
+ .parent = &apollo->pdev->dev,
+ .name = APOLLO_DEVICE_NAME_ROGUE,
+ .id = -2,
+ .res = odin_rogue_resources,
+ .num_res = ARRAY_SIZE(odin_rogue_resources),
+ .data = &pdata,
+ .size_data = sizeof(pdata),
+ .dma_mask = DMA_BIT_MASK(32),
+ };
+
+ if (apollo->odin) {
+ apollo->ext_dev
+ = platform_device_register_full(&odin_rogue_dev_info);
+ } else {
+ apollo->ext_dev
+ = platform_device_register_full(&rogue_device_info);
+ }
+
+ if (IS_ERR(apollo->ext_dev)) {
+ err = PTR_ERR(apollo->ext_dev);
+ dev_err(&apollo->pdev->dev,
+ "Failed to register rogue device (%d)\n", err);
+ apollo->ext_dev = NULL;
+ }
+ return err;
+}
+
+#elif defined(SUPPORT_APOLLO_FPGA)
+
+static int register_ext_device(struct apollo_device *apollo)
+{
+ int err = 0;
+ struct resource fpga_resources[] = {
+ /* FIXME: Don't overload SYS_RGX_REG_xxx for FPGA */
+ DEFINE_RES_MEM_NAMED(pci_resource_start(apollo->pdev,
+ SYS_RGX_REG_PCI_BASENUM),
+ SYS_RGX_REG_REGION_SIZE, "fpga-regs"),
+ };
+ struct apollo_fpga_platform_data pdata = {
+ .apollo_memory_base = apollo->apollo_mem.base,
+ .pdp_heap_memory_base = apollo->pdp_heap_mem_base,
+ .pdp_heap_memory_size = apollo->pdp_heap_mem_size,
+ };
+ struct platform_device_info fpga_device_info = {
+ .parent = &apollo->pdev->dev,
+ .name = APOLLO_DEVICE_NAME_FPGA,
+ .id = -1,
+ .res = fpga_resources,
+ .num_res = ARRAY_SIZE(fpga_resources),
+ .data = &pdata,
+ .size_data = sizeof(pdata),
+ .dma_mask = DMA_BIT_MASK(32),
+ };
+
+ apollo->ext_dev = platform_device_register_full(&fpga_device_info);
+ if (IS_ERR(apollo->ext_dev)) {
+ err = PTR_ERR(apollo->ext_dev);
+ dev_err(&apollo->pdev->dev,
+ "Failed to register fpga device (%d)\n", err);
+ apollo->ext_dev = NULL;
+ /* Fall through */
+ }
+
+ return err;
+}
+
+#else /* defined(SUPPORT_APOLLO_FPGA) */
+
+static inline int register_ext_device(struct apollo_device *apollo)
+{
+ return 0;
+}
+
+#endif /* defined(SUPPORT_RGX) */
+
+#if defined(SUPPORT_ION)
+
+static int apollo_ion_init(struct apollo_device *apollo, int mem_bar)
+{
+ int i, err = 0;
+ struct ion_platform_heap ion_heap_data[APOLLO_ION_HEAP_COUNT] = {
+ {
+ .type = ION_HEAP_TYPE_SYSTEM,
+ .id = ION_HEAP_TYPE_SYSTEM,
+ .name = "system",
+ },
+ {
+ .type = ION_HEAP_TYPE_CUSTOM,
+ .id = ION_HEAP_APOLLO_PDP,
+ .size = apollo->pdp_heap_mem_size,
+ .base = apollo->pdp_heap_mem_base,
+ .name = "apollo-pdp",
+ },
+#if defined(SUPPORT_RGX)
+ {
+ .type = ION_HEAP_TYPE_CUSTOM,
+ .id = ION_HEAP_APOLLO_ROGUE,
+ .size = apollo->ext_heap_mem_size,
+ .base = apollo->ext_heap_mem_base,
+ .name = "apollo-rogue",
+ },
+#endif /* defined(SUPPORT_RGX) */
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+ {
+ .type = ION_HEAP_TYPE_CUSTOM,
+ .id = ION_HEAP_APOLLO_SECURE,
+ .size = apollo->secure_heap_mem_size,
+ .base = apollo->secure_heap_mem_base,
+ .name = "apollo-secure",
+ },
+#endif /* defined(SUPPORT_FAKE_SECURE_ION_HEAP) */
+ };
+
+ apollo->ion_device = ion_device_create(NULL);
+ if (IS_ERR_OR_NULL(apollo->ion_device)) {
+ err = PTR_ERR(apollo->ion_device);
+ goto err_out;
+ }
+
+ err = request_pci_io_addr(apollo->pdev, mem_bar, 0,
+ apollo->apollo_mem.size);
+ if (err) {
+ dev_err(&apollo->pdev->dev,
+ "Failed to request APOLLO memory (%d)\n", err);
+ goto err_free_device;
+ }
+
+ apollo->ion_heaps[0] = ion_heap_create(&ion_heap_data[0]);
+ if (IS_ERR_OR_NULL(apollo->ion_heaps[0])) {
+ err = PTR_ERR(apollo->ion_heaps[0]);
+ apollo->ion_heaps[0] = NULL;
+ goto err_free_device;
+ }
+ ion_device_add_heap(apollo->ion_device, apollo->ion_heaps[0]);
+
+ for (i = 1; i < APOLLO_ION_HEAP_COUNT; i++) {
+ bool allow_cpu_map = true;
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+ if (ion_heap_data[i].id == ION_HEAP_APOLLO_SECURE)
+ allow_cpu_map = false;
+#endif
+ apollo->ion_heaps[i] = ion_lma_heap_create(&ion_heap_data[i],
+ allow_cpu_map);
+ if (IS_ERR_OR_NULL(apollo->ion_heaps[i])) {
+ err = PTR_ERR(apollo->ion_heaps[i]);
+ apollo->ion_heaps[i] = NULL;
+ goto err_free_heaps;
+ }
+ ion_device_add_heap(apollo->ion_device, apollo->ion_heaps[i]);
+ }
+
+ return 0;
+
+err_free_heaps:
+ ion_heap_destroy(apollo->ion_heaps[0]);
+
+ for (i = 1; i < APOLLO_ION_HEAP_COUNT; i++) {
+ if (!apollo->ion_heaps[i])
+ break;
+ ion_lma_heap_destroy(apollo->ion_heaps[i]);
+ }
+
+ release_pci_io_addr(apollo->pdev, mem_bar,
+ apollo->apollo_mem.base, apollo->apollo_mem.size);
+err_free_device:
+ ion_device_destroy(apollo->ion_device);
+err_out:
+ /* If the ptr was NULL, it is possible that err is 0 in the err path */
+ if (err == 0)
+ err = -ENOMEM;
+ return err;
+}
+
+static void apollo_ion_deinit(struct apollo_device *apollo, int mem_bar)
+{
+ int i = 0;
+
+ ion_device_destroy(apollo->ion_device);
+ ion_heap_destroy(apollo->ion_heaps[0]);
+ for (i = 1; i < APOLLO_ION_HEAP_COUNT; i++)
+ ion_lma_heap_destroy(apollo->ion_heaps[i]);
+ release_pci_io_addr(apollo->pdev, mem_bar,
+ apollo->apollo_mem.base, apollo->apollo_mem.size);
+}
+
+#endif /* defined(SUPPORT_ION) */
+
+static enum apollo_version_t
+apollo_detect_tc_version(struct apollo_device *apollo)
+{
+ u32 val = ioread32(apollo->tcf.registers +
+ TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG);
+
+ switch (val) {
+ default:
+ dev_err(&apollo->pdev->dev,
+ "Unknown TCF core target build ID (0x%x) - assuming Hood ES2 - PLEASE REPORT TO ANDROID TEAM\n",
+ val);
+ /* Fall-through */
+ case 5:
+ dev_err(&apollo->pdev->dev, "Looks like a Hood ES2 TC\n");
+ return APOLLO_VERSION_TCF_2;
+ case 1:
+ dev_err(&apollo->pdev->dev, "Looks like a TCF5\n");
+ return APOLLO_VERSION_TCF_5;
+ case 6:
+ dev_err(&apollo->pdev->dev, "Looks like a Bonnie TC\n");
+ return APOLLO_VERSION_TCF_BONNIE;
+ }
+}
+
+static int setup_io_region(struct pci_dev *pdev,
+ struct apollo_io_region *region, u32 index,
+ resource_size_t offset, resource_size_t size)
+{
+ int err;
+ resource_size_t pci_phys_addr;
+
+ err = request_pci_io_addr(pdev, index, offset, size);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to request apollo registers (err=%d)\n", err);
+ return -EIO;
+ }
+ pci_phys_addr = pci_resource_start(pdev, index);
+ region->region.base = pci_phys_addr + offset;
+ region->region.size = size;
+
+ region->registers
+ = ioremap_nocache(region->region.base, region->region.size);
+
+ if (!region->registers) {
+ dev_err(&pdev->dev, "Failed to map apollo registers\n");
+ release_pci_io_addr(pdev, index,
+ region->region.base, region->region.size);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+static int odin_dev_init(struct apollo_device *apollo, struct pci_dev *pdev)
+{
+ int err;
+ u32 val;
+
+ apollo->version = ODIN_VERSION_TCF_BONNIE;
+ apollo->pdev = pdev;
+
+ spin_lock_init(&apollo->interrupt_handler_lock);
+ spin_lock_init(&apollo->interrupt_enable_lock);
+
+ /* Reserve and map the tcf system registers */
+ err = setup_io_region(pdev, &apollo->tcf,
+ ODN_SYS_BAR, ODN_SYS_REGS_OFFSET, ODN_SYS_REGS_SIZE);
+
+ if (err)
+ goto err_out;
+
+ /* Setup card memory */
+ apollo->apollo_mem.base = pci_resource_start(pdev, ODN_DDR_BAR);
+ apollo->apollo_mem.size = pci_resource_len(pdev, ODN_DDR_BAR);
+
+ if (apollo->apollo_mem.size < apollo_pdp_mem_size) {
+ dev_err(&pdev->dev,
+ "Apollo MEM region (bar 4) has size of %lu which is smaller than the requested PDP heap of %lu",
+ (unsigned long)apollo->apollo_mem.size,
+ (unsigned long)apollo_pdp_mem_size);
+
+ err = -EIO;
+ goto err_odin_unmap_sys_registers;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ /* enable write combining */
+ apollo->mtrr = arch_phys_wc_add(apollo->apollo_mem.base,
+ apollo->apollo_mem.size);
+ if (apollo->mtrr < 0)
+ goto err_odin_unmap_sys_registers;
+
+#elif defined(CONFIG_MTRR)
+ /* enable mtrr region caching */
+ apollo->mtrr = mtrr_setup(pdev, apollo->apollo_mem.base,
+ apollo->apollo_mem.size);
+ if (apollo->mtrr == -2)
+ goto err_odin_unmap_sys_registers;
+#endif
+
+ /* Setup ranges for the device heaps */
+ apollo->pdp_heap_mem_size = apollo_pdp_mem_size;
+
+ /* We know ext_heap_mem_size won't underflow as we've compared
+ * apollo_mem.size against the apollo_pdp_mem_size value earlier
+ */
+ apollo->ext_heap_mem_size =
+ apollo->apollo_mem.size - apollo->pdp_heap_mem_size;
+
+ if (apollo->ext_heap_mem_size < APOLLO_EXT_MINIMUM_MEM_SIZE) {
+ dev_warn(&pdev->dev,
+ "Apollo MEM region (bar 4) has size of %lu, with %lu apollo_pdp_mem_size only %lu bytes are left for ext device, which looks too small",
+ (unsigned long)apollo->apollo_mem.size,
+ (unsigned long)apollo_pdp_mem_size,
+ (unsigned long)apollo->ext_heap_mem_size);
+ /* Continue as this is only a 'helpful warning' not a hard
+ * requirement
+ */
+ }
+ apollo->ext_heap_mem_base = apollo->apollo_mem.base;
+ apollo->pdp_heap_mem_base =
+ apollo->apollo_mem.base + apollo->ext_heap_mem_size;
+
+#if defined(SUPPORT_ION)
+ err = apollo_ion_init(apollo, ODN_DDR_BAR);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialise ION\n");
+ goto err_odin_unmap_sys_registers;
+ }
+ dev_info(&pdev->dev, "apollo_ion_init succeeded\n");
+#endif
+
+ val = ioread32(apollo->tcf.registers + ODN_CORE_REVISION);
+ dev_info(&pdev->dev, "ODN_CORE_REVISION = %08x\n", val);
+
+ val = ioread32(apollo->tcf.registers + ODN_CORE_CHANGE_SET);
+ dev_info(&pdev->dev, "ODN_CORE_CHANGE_SET = %08x\n", val);
+
+ val = ioread32(apollo->tcf.registers + ODN_CORE_USER_ID);
+ dev_info(&pdev->dev, "ODN_CORE_USER_ID = %08x\n", val);
+
+ val = ioread32(apollo->tcf.registers + ODN_CORE_USER_BUILD);
+ dev_info(&pdev->dev, "ODN_CORE_USER_BUILD = %08x\n", val);
+
+err_out:
+ return err;
+
+err_odin_unmap_sys_registers:
+ dev_info(&pdev->dev,
+ "odin_dev_init failed. unmapping the io regions.\n");
+
+ iounmap(apollo->tcf.registers);
+ release_pci_io_addr(pdev, ODN_SYS_BAR,
+ apollo->tcf.region.base, apollo->tcf.region.size);
+ goto err_out;
+}
+
+static int apollo_dev_init(struct apollo_device *apollo, struct pci_dev *pdev)
+{
+ int err;
+
+ apollo->pdev = pdev;
+
+ spin_lock_init(&apollo->interrupt_handler_lock);
+ spin_lock_init(&apollo->interrupt_enable_lock);
+
+ /* Reserve and map the tcf_clk / "sys" registers */
+ err = setup_io_region(pdev, &apollo->tcf,
+ SYS_APOLLO_REG_PCI_BASENUM,
+ SYS_APOLLO_REG_SYS_OFFSET, SYS_APOLLO_REG_SYS_SIZE);
+ if (err)
+ goto err_out;
+
+ /* Reserve and map the tcf_pll registers */
+ err = setup_io_region(pdev, &apollo->tcf_pll,
+ SYS_APOLLO_REG_PCI_BASENUM,
+ SYS_APOLLO_REG_PLL_OFFSET + TCF_PLL_PLL_CORE_CLK0,
+ TCF_PLL_PLL_DRP_STATUS - TCF_PLL_PLL_CORE_CLK0 + 4);
+ if (err)
+ goto err_unmap_sys_registers;
+
+#if defined(SUPPORT_APOLLO_FPGA)
+#define FPGA_REGISTERS_SIZE 4
+ /* If this is a special 'fgpa' build, have the apollo driver manage
+ * the second register bar.
+ */
+ err = setup_io_region(pdev, &apollo->fpga,
+ SYS_RGX_REG_PCI_BASENUM, 0, FPGA_REGISTERS_SIZE);
+ if (err)
+ goto err_unmap_pll_registers;
+#endif
+
+ /* Detect testchip version */
+ apollo->version = apollo_detect_tc_version(apollo);
+
+ /* Setup card memory */
+ apollo->apollo_mem.base =
+ pci_resource_start(pdev, APOLLO_MEM_PCI_BASENUM);
+ apollo->apollo_mem.size =
+ pci_resource_len(pdev, APOLLO_MEM_PCI_BASENUM);
+
+ if (apollo->apollo_mem.size < apollo_pdp_mem_size) {
+ dev_err(&pdev->dev,
+ "Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu",
+ APOLLO_MEM_PCI_BASENUM,
+ (unsigned long)apollo->apollo_mem.size,
+ (unsigned long)apollo_pdp_mem_size);
+ err = -EIO;
+ goto err_unmap_fpga_registers;
+ }
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+ if (apollo->apollo_mem.size < (apollo_pdp_mem_size + apollo_secure_mem_size)) {
+ dev_err(&pdev->dev,
+ "Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu plus the requested secure heap size %lu",
+ APOLLO_MEM_PCI_BASENUM,
+ (unsigned long)apollo->apollo_mem.size,
+ (unsigned long)apollo_pdp_mem_size,
+ (unsigned long)apollo_secure_mem_size);
+ err = -EIO;
+ goto err_unmap_fpga_registers;
+ }
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ apollo->mtrr = arch_phys_wc_add(apollo->apollo_mem.base,
+ apollo->apollo_mem.size);
+ if (apollo->mtrr < 0)
+ goto err_unmap_fpga_registers;
+#elif defined(CONFIG_MTRR)
+ apollo->mtrr = mtrr_setup(pdev, apollo->apollo_mem.base,
+ apollo->apollo_mem.size);
+ if (apollo->mtrr == -2)
+ goto err_unmap_fpga_registers;
+#endif
+
+ /* Setup ranges for the device heaps */
+ apollo->pdp_heap_mem_size = apollo_pdp_mem_size;
+
+ /* We know ext_heap_mem_size won't underflow as we've compared
+ * apollo_mem.size against the apollo_pdp_mem_size value earlier
+ */
+ apollo->ext_heap_mem_size =
+ apollo->apollo_mem.size - apollo->pdp_heap_mem_size;
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+ apollo->ext_heap_mem_size -= apollo_secure_mem_size;
+#endif
+
+ if (apollo->ext_heap_mem_size < APOLLO_EXT_MINIMUM_MEM_SIZE) {
+ dev_warn(&pdev->dev,
+ "Apollo MEM region (bar %d) has size of %lu, with %lu apollo_pdp_mem_size only %lu bytes are left for ext device, which looks too small",
+ APOLLO_MEM_PCI_BASENUM,
+ (unsigned long)apollo->apollo_mem.size,
+ (unsigned long)apollo_pdp_mem_size,
+ (unsigned long)apollo->ext_heap_mem_size);
+ /* Continue as this is only a 'helpful warning' not a hard
+ * requirement
+ */
+ }
+
+ apollo->ext_heap_mem_base = apollo->apollo_mem.base;
+ apollo->pdp_heap_mem_base =
+ apollo->apollo_mem.base + apollo->ext_heap_mem_size;
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+ apollo->secure_heap_mem_base = apollo->pdp_heap_mem_base +
+ apollo->pdp_heap_mem_size;
+ apollo->secure_heap_mem_size = apollo_secure_mem_size;
+#endif
+
+#if defined(SUPPORT_ION)
+ err = apollo_ion_init(apollo, APOLLO_MEM_PCI_BASENUM);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialise ION\n");
+ goto err_unmap_fpga_registers;
+ }
+#endif
+
+#if defined(SUPPORT_APOLLO_FPGA) || defined(SUPPORT_RGX)
+ apollo->debugfs_apollo_dir = debugfs_create_dir("apollo", NULL);
+#endif
+
+#if defined(SUPPORT_APOLLO_FPGA)
+ apollo->debugfs_apollo_regs =
+ debugfs_create_file_size("apollo-regs", S_IRUGO,
+ apollo->debugfs_apollo_dir, &apollo->tcf,
+ &apollo_io_debugfs_fops, apollo->tcf.region.size);
+ apollo->debugfs_apollo_pll_regs =
+ debugfs_create_file_size("apollo-pll-regs", S_IRUGO,
+ apollo->debugfs_apollo_dir, &apollo->tcf_pll,
+ &apollo_io_debugfs_fops, apollo->tcf_pll.region.size);
+ apollo->debugfs_fpga_regs =
+ debugfs_create_file_size("fpga-regs", S_IRUGO,
+ apollo->debugfs_apollo_dir, &apollo->fpga,
+ &apollo_io_debugfs_fops, apollo->fpga.region.size);
+ apollo->debugfs_apollo_mem =
+ debugfs_create_file_size("apollo-mem", S_IRUGO,
+ apollo->debugfs_apollo_dir, &apollo->apollo_mem,
+ &apollo_mem_debugfs_fops, apollo->apollo_mem.size);
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+
+#if defined(SUPPORT_RGX)
+ apollo->debugfs_rogue_name =
+ debugfs_create_blob("rogue-name", S_IRUGO,
+ apollo->debugfs_apollo_dir,
+ &apollo_debugfs_rogue_name_blobs[apollo->version]);
+#endif /* defined(SUPPORT_RGX) */
+
+err_out:
+ return err;
+err_unmap_fpga_registers:
+#if defined(SUPPORT_APOLLO_FPGA)
+ iounmap(apollo->fpga.registers);
+ release_pci_io_addr(pdev, SYS_RGX_REG_PCI_BASENUM,
+ apollo->fpga.region.base, apollo->fpga.region.size);
+err_unmap_pll_registers:
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+ iounmap(apollo->tcf_pll.registers);
+ release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM,
+ apollo->tcf_pll.region.base, apollo->tcf_pll.region.size);
+err_unmap_sys_registers:
+ iounmap(apollo->tcf.registers);
+ release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM,
+ apollo->tcf.region.base, apollo->tcf.region.size);
+ goto err_out;
+}
+
+static void apollo_dev_cleanup(struct apollo_device *apollo)
+{
+#if defined(SUPPORT_RGX)
+ debugfs_remove(apollo->debugfs_rogue_name);
+#endif
+#if defined(SUPPORT_APOLLO_FPGA)
+ debugfs_remove(apollo->debugfs_apollo_mem);
+ debugfs_remove(apollo->debugfs_fpga_regs);
+ debugfs_remove(apollo->debugfs_apollo_pll_regs);
+ debugfs_remove(apollo->debugfs_apollo_regs);
+#endif
+#if defined(SUPPORT_APOLLO_FPGA) || defined(SUPPORT_RGX)
+ debugfs_remove(apollo->debugfs_apollo_dir);
+#endif
+
+#if defined(SUPPORT_ION)
+ apollo_ion_deinit(apollo, APOLLO_MEM_PCI_BASENUM);
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ if (apollo->mtrr >= 0)
+ arch_phys_wc_del(apollo->mtrr);
+#elif defined(CONFIG_MTRR)
+ if (apollo->mtrr >= 0) {
+ int err;
+
+ err = mtrr_del(apollo->mtrr, apollo->apollo_mem.base,
+ apollo->apollo_mem.size);
+ if (err < 0)
+ dev_err(&apollo->pdev->dev,
+ "%d - %s: mtrr_del failed (%d)\n",
+ __LINE__, __func__, err);
+ }
+#endif
+
+#if defined(SUPPORT_APOLLO_FPGA)
+ iounmap(apollo->fpga.registers);
+ release_pci_io_addr(apollo->pdev, SYS_RGX_REG_PCI_BASENUM,
+ apollo->fpga.region.base, apollo->fpga.region.size);
+#endif
+
+ iounmap(apollo->tcf_pll.registers);
+ release_pci_io_addr(apollo->pdev, SYS_APOLLO_REG_PCI_BASENUM,
+ apollo->tcf_pll.region.base, apollo->tcf_pll.region.size);
+
+ iounmap(apollo->tcf.registers);
+ release_pci_io_addr(apollo->pdev, SYS_APOLLO_REG_PCI_BASENUM,
+ apollo->tcf.region.base, apollo->tcf.region.size);
+}
+
+
+static void odin_dev_cleanup(struct apollo_device *apollo)
+{
+#if defined(SUPPORT_ION)
+ apollo_ion_deinit(apollo, ODN_DDR_BAR);
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ if (apollo->mtrr >= 0)
+ arch_phys_wc_del(apollo->mtrr);
+#elif defined(CONFIG_MTRR)
+ if (apollo->mtrr >= 0) {
+ int err;
+
+ err = mtrr_del(apollo->mtrr, apollo->apollo_mem.base,
+ apollo->apollo_mem.size);
+ if (err < 0)
+ dev_err(&apollo->pdev->dev,
+ "%d - %s: mtrr_del failed (%d)\n",
+ __LINE__, __func__, err);
+ }
+#endif
+
+ dev_info(&apollo->pdev->dev,
+ "odin_dev_cleanup - unmapping the odin system io region\n");
+
+ iounmap(apollo->tcf.registers);
+
+ release_pci_io_addr(apollo->pdev,
+ ODN_SYS_BAR,
+ apollo->tcf.region.base,
+ apollo->tcf.region.size);
+
+}
+
+
+static int apollo_init(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct apollo_device *apollo;
+ int err = 0;
+
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ apollo = devres_alloc(apollo_devres_release,
+ sizeof(*apollo), GFP_KERNEL);
+ if (!apollo) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ devres_add(&pdev->dev, apollo);
+
+ err = pci_enable_device(pdev);
+
+ if (err) {
+ dev_err(&pdev->dev,
+ "error - pci_enable_device returned %d\n", err);
+ goto err_out;
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_ODIN
+ && pdev->device == DEVICE_ID_ODIN) {
+
+ dev_info(&pdev->dev, "PCI_VENDOR_ID_ODIN DEVICE_ID_ODIN\n");
+
+ /* The device is an Odin */
+ apollo->odin = true;
+
+ err = odin_dev_init(apollo, pdev);
+
+ if (err) {
+ dev_err(&pdev->dev, "odin_dev_init failed\n");
+ goto err_disable_device;
+ }
+
+ err = odin_hw_init(apollo);
+
+ if (!err) {
+ err = odin_enable_irq(apollo);
+
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialise IRQ\n");
+ } else {
+ dev_info(&pdev->dev,
+ "odin_enable_irq succeeded\n");
+ }
+ }
+
+ if (err) {
+ odin_dev_cleanup(apollo);
+ goto err_disable_device;
+ }
+ } else {
+ apollo->odin = false;
+
+ err = apollo_dev_init(apollo, pdev);
+ if (err)
+ goto err_disable_device;
+
+ err = apollo_hw_init(apollo);
+ if (err)
+ goto err_dev_cleanup;
+
+ err = apollo_enable_irq(apollo);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialise IRQ\n");
+ goto err_dev_cleanup;
+ }
+ }
+
+#if defined(APOLLO_FAKE_INTERRUPTS)
+ dev_warn(&pdev->dev, "WARNING: Faking interrupts every %d ms",
+ FAKE_INTERRUPT_TIME_MS);
+#endif
+
+ /* Register ext and pdp platform devices
+ * Failures here aren't critical?
+ */
+ register_pdp_device(apollo);
+ register_ext_device(apollo);
+
+ devres_remove_group(&pdev->dev, NULL);
+ goto apollo_init_return;
+
+err_dev_cleanup:
+ apollo_dev_cleanup(apollo);
+err_disable_device:
+ pci_disable_device(pdev);
+err_out:
+ devres_release_group(&pdev->dev, NULL);
+
+apollo_init_return:
+ if (err)
+ dev_err(&pdev->dev, "apollo_init failed\n");
+
+ return err;
+}
+
+static void apollo_exit(struct pci_dev *pdev)
+{
+ int i;
+ struct apollo_device *apollo;
+
+ dev_err(&pdev->dev, "apollo_exit\n");
+
+ apollo = devres_find(&pdev->dev, apollo_devres_release, NULL, NULL);
+
+ if (!apollo)
+ goto apollo_exit_end;
+
+ if (apollo->thermal_zone)
+ thermal_zone_device_unregister(apollo->thermal_zone);
+
+ if (apollo->pdp_dev) {
+ dev_info(&pdev->dev, "platform_device_unregister pdp_dev\n");
+ platform_device_unregister(apollo->pdp_dev);
+ }
+
+ if (apollo->ext_dev) {
+ dev_info(&pdev->dev, "platform_device_unregister ext_dev\n");
+ platform_device_unregister(apollo->ext_dev);
+ }
+
+ if (apollo->odin) {
+ odin_disable_irq(apollo);
+ odin_dev_cleanup(apollo);
+ } else {
+ for (i = 0; i < APOLLO_INTERRUPT_COUNT; i++)
+ apollo_disable_interrupt(&pdev->dev, i);
+
+ apollo_disable_irq(apollo);
+ apollo_dev_cleanup(apollo);
+ }
+
+ dev_info(&pdev->dev, "pci_disable_device\n");
+ pci_disable_device(pdev);
+
+apollo_exit_end:
+ dev_info(&pdev->dev, "end of apollo_exit\n");
+}
+
+
+struct pci_device_id apollo_pci_tbl[] = {
+ { PCI_VDEVICE(POWERVR, DEVICE_ID_PCI_APOLLO_FPGA) },
+ { PCI_VDEVICE(POWERVR, DEVICE_ID_PCIE_APOLLO_FPGA) },
+ { PCI_VDEVICE(ODIN, DEVICE_ID_ODIN) },
+ { },
+};
+
+static struct pci_driver apollo_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = apollo_pci_tbl,
+ .probe = apollo_init,
+ .remove = apollo_exit,
+};
+
+module_pci_driver(apollo_pci_driver);
+
+MODULE_DEVICE_TABLE(pci, apollo_pci_tbl);
+
+static u32 apollo_interrupt_id_to_flag(int interrupt_id)
+{
+ switch (interrupt_id) {
+ case APOLLO_INTERRUPT_PDP:
+ return APOLLO_INTERRUPT_FLAG_PDP;
+ case APOLLO_INTERRUPT_EXT:
+ return APOLLO_INTERRUPT_FLAG_EXT;
+ default:
+ BUG();
+ }
+}
+
+static void apollo_enable_interrupt_register(struct apollo_device *apollo,
+ int interrupt_id)
+{
+ u32 val;
+
+ if (interrupt_id == APOLLO_INTERRUPT_PDP ||
+ interrupt_id == APOLLO_INTERRUPT_EXT) {
+ val = ioread32(
+ apollo->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+ val |= apollo_interrupt_id_to_flag(interrupt_id);
+ iowrite32(val,
+ apollo->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+ }
+}
+
+static void apollo_disable_interrupt_register(struct apollo_device *apollo,
+ int interrupt_id)
+{
+ u32 val;
+
+ if (interrupt_id == APOLLO_INTERRUPT_PDP ||
+ interrupt_id == APOLLO_INTERRUPT_EXT) {
+ val = ioread32(
+ apollo->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+ val &= ~(apollo_interrupt_id_to_flag(interrupt_id));
+ iowrite32(val,
+ apollo->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+ }
+}
+
+static u32 odin_interrupt_id_to_flag(int interrupt_id)
+{
+ switch (interrupt_id) {
+ case APOLLO_INTERRUPT_PDP:
+ return ODN_INTERRUPT_ENABLE_PDP1;
+ case APOLLO_INTERRUPT_EXT:
+ return ODN_INTERRUPT_ENABLE_DUT;
+ default:
+ BUG();
+ }
+}
+
+static void odin_enable_interrupt_register(struct apollo_device *apollo,
+ int interrupt_id)
+{
+ u32 val;
+ u32 flag;
+
+ dev_info(&apollo->pdev->dev, "odin_enable_interrupt_register\n");
+
+ switch (interrupt_id) {
+ case APOLLO_INTERRUPT_PDP:
+ dev_info(&apollo->pdev->dev,
+ "Enabling Odin PDP interrupts\n");
+ break;
+ case APOLLO_INTERRUPT_EXT:
+ dev_info(&apollo->pdev->dev,
+ "Enabling Odin DUT interrupts\n");
+ break;
+ default:
+ dev_err(&apollo->pdev->dev,
+ "Error - illegal interrupt id\n");
+ return;
+ }
+
+ val = ioread32(apollo->tcf.registers
+ + ODN_CORE_INTERRUPT_ENABLE);
+ flag = odin_interrupt_id_to_flag(interrupt_id);
+ val |= flag;
+ iowrite32(val, apollo->tcf.registers
+ + ODN_CORE_INTERRUPT_ENABLE);
+}
+
+static void odin_disable_interrupt_register(struct apollo_device *apollo,
+ int interrupt_id)
+{
+ u32 val;
+
+ dev_info(&apollo->pdev->dev, "odin_disable_interrupt_register\n");
+
+ switch (interrupt_id) {
+ case APOLLO_INTERRUPT_PDP:
+ dev_info(&apollo->pdev->dev,
+ "Disabling Odin PDP interrupts\n");
+ break;
+ case APOLLO_INTERRUPT_EXT:
+ dev_info(&apollo->pdev->dev,
+ "Disabling Odin DUT interrupts\n");
+ break;
+ default:
+ dev_err(&apollo->pdev->dev,
+ "Error - illegal interrupt id\n");
+ return;
+ }
+ val = ioread32(apollo->tcf.registers
+ + ODN_CORE_INTERRUPT_ENABLE);
+ val &= ~(odin_interrupt_id_to_flag(interrupt_id));
+ iowrite32(val, apollo->tcf.registers
+ + ODN_CORE_INTERRUPT_ENABLE);
+}
+
+int apollo_enable(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return pci_enable_device(pdev);
+}
+EXPORT_SYMBOL(apollo_enable);
+
+void apollo_disable(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ pci_disable_device(pdev);
+}
+EXPORT_SYMBOL(apollo_disable);
+
+int apollo_set_interrupt_handler(struct device *dev, int interrupt_id,
+ void (*handler_function)(void *), void *data)
+{
+ struct apollo_device *apollo = devres_find(dev, apollo_devres_release,
+ NULL, NULL);
+ int err = 0;
+ unsigned long flags;
+
+ if (!apollo) {
+ dev_err(dev, "No apollo device resources found\n");
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ if (interrupt_id < 0 || interrupt_id >= APOLLO_INTERRUPT_COUNT) {
+ dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ spin_lock_irqsave(&apollo->interrupt_handler_lock, flags);
+
+ apollo->interrupt_handlers[interrupt_id].handler_function =
+ handler_function;
+ apollo->interrupt_handlers[interrupt_id].handler_data = data;
+
+ spin_unlock_irqrestore(&apollo->interrupt_handler_lock, flags);
+
+err_out:
+ return err;
+}
+EXPORT_SYMBOL(apollo_set_interrupt_handler);
+
+int apollo_enable_interrupt(struct device *dev, int interrupt_id)
+{
+ struct apollo_device *apollo = devres_find(dev, apollo_devres_release,
+ NULL, NULL);
+ int err = 0;
+ unsigned long flags;
+
+ if (!apollo) {
+ dev_err(dev, "No apollo device resources found\n");
+ err = -ENODEV;
+ goto err_out;
+ }
+ if (interrupt_id < 0 || interrupt_id >= APOLLO_INTERRUPT_COUNT) {
+ dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id);
+ err = -EINVAL;
+ goto err_out;
+ }
+ spin_lock_irqsave(&apollo->interrupt_enable_lock, flags);
+
+ if (apollo->interrupt_handlers[interrupt_id].enabled) {
+ dev_warn(dev, "Interrupt ID %d already enabled\n",
+ interrupt_id);
+ err = -EEXIST;
+ goto err_unlock;
+ }
+ apollo->interrupt_handlers[interrupt_id].enabled = true;
+
+ if (apollo->odin)
+ odin_enable_interrupt_register(apollo, interrupt_id);
+ else
+ apollo_enable_interrupt_register(apollo, interrupt_id);
+
+err_unlock:
+ spin_unlock_irqrestore(&apollo->interrupt_enable_lock, flags);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL(apollo_enable_interrupt);
+
+int apollo_disable_interrupt(struct device *dev, int interrupt_id)
+{
+ struct apollo_device *apollo = devres_find(dev, apollo_devres_release,
+ NULL, NULL);
+ int err = 0;
+ unsigned long flags;
+
+ if (!apollo) {
+ dev_err(dev, "No apollo device resources found\n");
+ err = -ENODEV;
+ goto err_out;
+ }
+ if (interrupt_id < 0 || interrupt_id >= APOLLO_INTERRUPT_COUNT) {
+ dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id);
+ err = -EINVAL;
+ goto err_out;
+ }
+ spin_lock_irqsave(&apollo->interrupt_enable_lock, flags);
+
+ if (!apollo->interrupt_handlers[interrupt_id].enabled) {
+ dev_warn(dev, "Interrupt ID %d already disabled\n",
+ interrupt_id);
+ }
+ apollo->interrupt_handlers[interrupt_id].enabled = false;
+
+ if (apollo->odin)
+ odin_disable_interrupt_register(apollo, interrupt_id);
+ else
+ apollo_disable_interrupt_register(apollo, interrupt_id);
+
+ spin_unlock_irqrestore(&apollo->interrupt_enable_lock, flags);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL(apollo_disable_interrupt);
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File apollo_drv.h
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _APOLLO_DRV_H
+#define _APOLLO_DRV_H
+
+/*
+ * This contains the hooks for the apollo testchip driver, as used by the
+ * Rogue and PDP sub-devices, and the platform data passed to each of their
+ * drivers
+ */
+
+#include <linux/pci.h>
+#include <linux/device.h>
+
+#if defined(SUPPORT_ION)
+
+#include PVR_ANDROID_ION_HEADER
+
+/* NOTE: This should be kept in sync with the user side (in buffer_generic.c) */
+#if defined(SUPPORT_RGX)
+#define ION_HEAP_APOLLO_ROGUE (ION_HEAP_TYPE_CUSTOM+1)
+#endif
+#define ION_HEAP_APOLLO_PDP (ION_HEAP_TYPE_CUSTOM+2)
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+#define ION_HEAP_APOLLO_SECURE (ION_HEAP_TYPE_CUSTOM+3)
+#endif
+
+#endif /* defined(SUPPORT_ION) */
+
+#define APOLLO_INTERRUPT_PDP 0
+#define APOLLO_INTERRUPT_EXT 1
+#define APOLLO_INTERRUPT_TC5_PDP 2
+#define APOLLO_INTERRUPT_COUNT 3
+
+int apollo_enable(struct device *dev);
+void apollo_disable(struct device *dev);
+
+int apollo_enable_interrupt(struct device *dev, int interrupt_id);
+int apollo_disable_interrupt(struct device *dev, int interrupt_id);
+
+int apollo_set_interrupt_handler(struct device *dev, int interrupt_id,
+ void (*handler_function)(void *), void *handler_data);
+
+int apollo_sys_info(struct device *dev, u32 *tmp, u32 *pll);
+int apollo_sys_strings(struct device *dev,
+ char *str_fpga_rev, size_t size_fpga_rev, char *str_tcf_core_rev,
+ size_t size_tcf_core_rev, char *str_tcf_core_target_build_id,
+ size_t size_tcf_core_target_build_id, char *str_pci_ver,
+ size_t size_pci_ver, char *str_macro_ver, size_t size_macro_ver);
+int apollo_core_clock_speed(struct device *dev);
+
+#define APOLLO_DEVICE_NAME_PDP "apollo_pdp"
+
+#define ODN_DEVICE_NAME_PDP "odin_pdp"
+
+/* The following structs are initialised and passed down by the parent apollo
+ * driver to the respective sub-drivers
+ */
+
+struct apollo_pdp_platform_data {
+#if defined(SUPPORT_ION)
+ struct ion_device *ion_device;
+ int ion_heap_id;
+#endif
+ resource_size_t memory_base;
+
+ /* The following is used by the drm_pdp driver as it manages the
+ * pdp memory
+ */
+ resource_size_t pdp_heap_memory_base;
+ resource_size_t pdp_heap_memory_size;
+};
+
+#if defined(SUPPORT_RGX) && defined(SUPPORT_APOLLO_FPGA)
+#error Define either SUPPORT_RGX or SUPPORT_APOLLO_FGPA, not both
+#endif
+
+#if defined(SUPPORT_RGX)
+
+#define APOLLO_DEVICE_NAME_ROGUE "apollo_rogue"
+
+struct apollo_rogue_platform_data {
+#if defined(SUPPORT_ION)
+ struct ion_device *ion_device;
+ int ion_heap_id;
+#endif
+
+ /* The base address of the testchip memory (CPU physical address) -
+ * used to convert from CPU-Physical to device-physical addresses
+ */
+ resource_size_t apollo_memory_base;
+
+ /* The following is used to setup the services heaps that map to the
+ * ion heaps
+ */
+ resource_size_t pdp_heap_memory_base;
+ resource_size_t pdp_heap_memory_size;
+ resource_size_t rogue_heap_memory_base;
+ resource_size_t rogue_heap_memory_size;
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+ resource_size_t secure_heap_memory_base;
+ resource_size_t secure_heap_memory_size;
+#endif
+};
+
+#endif /* defined(SUPPORT_RGX) */
+
+#if defined(SUPPORT_APOLLO_FPGA)
+
+#define APOLLO_DEVICE_NAME_FPGA "apollo_fpga"
+
+struct apollo_fpga_platform_data {
+ resource_size_t apollo_memory_base;
+
+ resource_size_t pdp_heap_memory_base;
+ resource_size_t pdp_heap_memory_size;
+};
+
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+
+#endif /* _APOLLO_DRV_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__APOLLO_REGS_H__)
+#define __APOLLO_REGS_H__
+
+/*
+ * The core clock speed is passed through a multiplier depending on the TC version.
+ *
+ * On TC_ES1: Multiplier = x3, final speed = 270MHz
+ * On TC_ES2: Multiplier = x6, final speed = 540MHz
+ * On TCF5: Multiplier = 1x final speed = 45MHz
+ *
+ *
+ * The base (unmultiplied speed) can be adjusted using a module parameter called "sys_core_clk_speed",
+ * a number in Hz.
+ *
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_core_clk_speed=60000000" /etc/init.d/rc.pvr start
+ *
+ * would result in a core speed of 60MHz xMultiplier.
+ *
+ *
+ * The memory clock is unmultiplied and can be adjusted using a module parameter called
+ * "sys_mem_clk_speed", this should be the number in Hz for the memory clock speed.
+ *
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=100000000" /etc/init.d/rc.pvr start
+ *
+ * would attempt to start the driver with the memory clock speed set to 100MHz.
+ *
+ *
+ * Same applies to the system interface clock speed sys_sysif_clk_speed.
+ * Needed for TCF5 but not for TC_ES2/ES1.
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=45000000" /etc/init.d/rc.pvr start
+ *
+ * would attempt to start the driver with the system clock speed set to 45MHz.
+ *
+ *
+ * All parameters can be specified at once, eg:
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=MEMORY_SPEED sys_core_clk_speed=CORE_SPEED sys_mem_clk_speed=SYSIF_SPEED" /etc/init.d/rc.pvr start
+ */
+
+#define RGX_TC_SYS_CLOCK_SPEED (50000000) /*< At the moment just used for TCF5 */
+
+#if defined(TC_APOLLO_TCF5_12_4_1_48)
+ /* TC TCF5 (12.*) */
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED (60000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (45000000)
+ #define RGX_TC_SYS_CLOCK_SPEED (45000000)
+#elif defined(TC_APOLLO_TCF5_14_8_1_20) || defined(TC_APOLLO_TCF5_22_18_22_22) || \
+ defined(TC_APOLLO_TCF5_22_34_22_23) || defined(TC_APOLLO_TCF5_22_44_22_25) || \
+ defined(TC_APOLLO_TCF5_22_45_22_29) || defined(TC_APOLLO_TCF5_22_49_21_16) || \
+ defined(TC_APOLLO_TCF5_22_50_22_29)
+ /* TC TCF5 (14.* / 22.*) */
+ #define RGX_TC_CORE_CLOCK_SPEED (20000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (50000000)
+#elif defined(TC_APOLLO_TCF5_22_19_54_24) || defined(TC_APOLLO_TCF5_22_30_54_25) || \
+ defined(TC_APOLLO_TCF5_22_36_54_28) || defined(TC_APOLLO_TCF5_22_40_54_30) || \
+ defined(TC_APOLLO_TCF5_22_48_54_30)
+ /* TC TCF5 (22.*) */
+ #define RGX_TC_CORE_CLOCK_SPEED (100000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (50000000)
+#elif defined(TC_APOLLO_TCF5_22_26_54_24)
+ /* TC TCF5 (22.*) */
+ #define RGX_TC_CORE_CLOCK_SPEED (13000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (50000000)
+#elif defined(TC_APOLLO_TCF5_22_32_54_328) || defined(TC_APOLLO_TCF5_22_46_54_330)
+ /* TC TCF5 (22.*) */
+ #define RGX_TC_CORE_CLOCK_SPEED (50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (50000000)
+#elif defined(TC_APOLLO_TCF5_22_33_21_11)
+ /* TC TCF5 (22.*) */
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED (20000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (45000000)
+ #define RGX_TC_SYS_CLOCK_SPEED (45000000)
+#elif defined(TC_APOLLO_TCF5_22_41_54_330)
+ /* TC TCF5 (22.*) */
+ #define RGX_TC_CORE_CLOCK_SPEED (80000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (50000000)
+#elif defined(TC_APOLLO_TCF5_BVNC_NOT_SUPPORTED)
+ /* TC TCF5 (22.*) fallback frequencies */
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED (20000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (50000000)
+ #define RGX_TC_SYS_CLOCK_SPEED (50000000)
+#elif defined(TC_APOLLO_TCF5_REFERENCE)
+ /* TC TCF5 (Reference bitfile) */
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED (50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (50000000)
+ #define RGX_TC_SYS_CLOCK_SPEED (45000000)
+#elif defined(TC_APOLLO_BONNIE)
+ /* TC Bonnie */
+ #define RGX_TC_CORE_CLOCK_SPEED (18000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (65000000)
+#elif defined(TC_APOLLO_ES2)
+ /* TC ES2 */
+ #define RGX_TC_CORE_CLOCK_SPEED (90000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (104000000)
+#else
+ /* TC ES1 */
+ #define RGX_TC_CORE_CLOCK_SPEED (90000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (65000000)
+#endif
+
+/* TC TCF5 */
+#define TC5_SYS_APOLLO_REG_PCI_BASENUM (1)
+#define TC5_SYS_APOLLO_REG_PDP2_OFFSET (0x800000)
+#define TC5_SYS_APOLLO_REG_PDP2_SIZE (0x7C4)
+
+#define TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET (0xA00000)
+#define TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE (0x14)
+
+#define TC5_SYS_APOLLO_REG_HDMI_OFFSET (0xC00000)
+#define TC5_SYS_APOLLO_REG_HDMI_SIZE (0x1C)
+
+/* TC ES2 */
+#define TCF_TEMP_SENSOR_SPI_OFFSET 0xe
+#define TCF_TEMP_SENSOR_TO_C(raw) (((raw) * 248 / 4096) - 54)
+
+/* Number of bytes that are broken */
+#define SYS_DEV_MEM_BROKEN_BYTES (1024 * 1024)
+#define SYS_DEV_MEM_REGION_SIZE (0x40000000 - SYS_DEV_MEM_BROKEN_BYTES)
+
+/* Apollo reg on base register 0 */
+#define SYS_APOLLO_REG_PCI_BASENUM (0)
+#define SYS_APOLLO_REG_REGION_SIZE (0x00010000)
+
+#define SYS_APOLLO_REG_SYS_OFFSET (0x0000)
+#define SYS_APOLLO_REG_SYS_SIZE (0x0400)
+
+#define SYS_APOLLO_REG_PLL_OFFSET (0x1000)
+#define SYS_APOLLO_REG_PLL_SIZE (0x0400)
+
+#define SYS_APOLLO_REG_HOST_OFFSET (0x4050)
+#define SYS_APOLLO_REG_HOST_SIZE (0x0014)
+
+#define SYS_APOLLO_REG_PDP1_OFFSET (0xC000)
+#define SYS_APOLLO_REG_PDP1_SIZE (0x2000)
+
+/* Offsets for flashing Apollo PROMs from base 0 */
+#define APOLLO_FLASH_STAT_OFFSET (0x4058)
+#define APOLLO_FLASH_DATA_WRITE_OFFSET (0x4050)
+#define APOLLO_FLASH_RESET_OFFSET (0x4060)
+
+#define APOLLO_FLASH_FIFO_STATUS_MASK (0xF)
+#define APOLLO_FLASH_FIFO_STATUS_SHIFT (0)
+#define APOLLO_FLASH_PROGRAM_STATUS_MASK (0xF)
+#define APOLLO_FLASH_PROGRAM_STATUS_SHIFT (16)
+
+#define APOLLO_FLASH_PROG_COMPLETE_BIT (0x1)
+#define APOLLO_FLASH_PROG_PROGRESS_BIT (0x2)
+#define APOLLO_FLASH_PROG_FAILED_BIT (0x4)
+#define APOLLO_FLASH_INV_FILETYPE_BIT (0x8)
+
+#define APOLLO_FLASH_FIFO_SIZE (8)
+
+/* RGX reg on base register 1 */
+#define SYS_RGX_REG_PCI_BASENUM (1)
+#define SYS_RGX_REG_REGION_SIZE (0x7FFFF)
+
+/* Device memory (including HP mapping) on base register 2 */
+#define SYS_DEV_MEM_PCI_BASENUM (2)
+
+#endif /* if !defined(__APOLLO_REGS_H__) */
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File ion_lma_heap.c
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "ion_lma_heap.h"
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/genalloc.h>
+#include <linux/scatterlist.h>
+
+/* Ion heap for LMA allocations. This heap is identical to CARVEOUT except
+ * that it does not do any CPU cache maintenance nor does it zero the memory
+ * using the CPU (this is handled with PVR_ANDROID_DEFER_CLEAR in userspace).
+ */
+
+struct ion_lma_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+ bool allow_cpu_map;
+};
+
+static ion_phys_addr_t ion_lma_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
+{
+ struct ion_lma_heap *lma_heap =
+ container_of(heap, struct ion_lma_heap, heap);
+ unsigned long offset = gen_pool_alloc(lma_heap->pool, size);
+
+ if (!offset)
+ return ION_CARVEOUT_ALLOCATE_FAIL;
+
+ return offset;
+}
+
+static void ion_lma_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
+{
+ struct ion_lma_heap *lma_heap =
+ container_of(heap, struct ion_lma_heap, heap);
+
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+ return;
+
+ gen_pool_free(lma_heap->pool, addr, size);
+}
+
+static int ion_lma_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ *addr = paddr;
+ *len = buffer->size;
+ return 0;
+}
+
+static int ion_lma_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct sg_table *table;
+ ion_phys_addr_t paddr;
+ int ret;
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto err_free;
+
+ paddr = ion_lma_allocate(heap, size, align);
+ if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ ret = -ENOMEM;
+ goto err_free_table;
+ }
+
+ sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+ buffer->priv_virt = table;
+ return 0;
+
+err_free_table:
+ sg_free_table(table);
+err_free:
+ kfree(table);
+ return ret;
+}
+
+static void ion_lma_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ /* Do not zero the LMA heap from the CPU. This is very slow with
+ * the current TCF (w/ no DMA engine). We will use the TLA to clear
+ * the memory with Rogue in another place.
+ *
+ * We also skip the CPU cache maintenance for the heap space, as we
+ * statically know that the TCF PCI memory bar has UC/WC set by the
+ * MTRR/PAT subsystem.
+ */
+
+ ion_lma_free(heap, paddr, buffer->size);
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_lma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_lma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ /* No-op */
+}
+
+static int ion_lma_heap_map_user(struct ion_heap *mapper,
+ struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+ struct ion_lma_heap *lma_heap =
+ container_of(mapper, struct ion_lma_heap, heap);
+
+ if (!lma_heap->allow_cpu_map) {
+ pr_err("Trying to map_user fake secure ION handle\n");
+ return -EPERM;
+ }
+
+ return remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(paddr) + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ pgprot_writecombine(vma->vm_page_prot));
+}
+
+static void *ion_lma_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+ struct ion_lma_heap *lma_heap =
+ container_of(heap, struct ion_lma_heap, heap);
+
+ if (!lma_heap->allow_cpu_map) {
+ pr_err("Trying to map_kernel fake secure ION handle\n");
+ return ERR_PTR(-EPERM);
+ }
+
+ return ioremap_wc(paddr, buffer->size);
+}
+
+static void ion_lma_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ iounmap(buffer->vaddr);
+}
+
+static struct ion_heap_ops lma_heap_ops = {
+ .allocate = ion_lma_heap_allocate,
+ .free = ion_lma_heap_free,
+ .phys = ion_lma_heap_phys,
+ .map_dma = ion_lma_heap_map_dma,
+ .unmap_dma = ion_lma_heap_unmap_dma,
+ .map_user = ion_lma_heap_map_user,
+ .map_kernel = ion_lma_heap_map_kernel,
+ .unmap_kernel = ion_lma_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_lma_heap_create(struct ion_platform_heap *heap_data,
+ bool allow_cpu_map)
+{
+ struct ion_lma_heap *lma_heap;
+ size_t size = heap_data->size;
+ struct page *page;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+
+ /* Do not zero the LMA heap from the CPU. This is very slow with
+ * the current TCF (w/ no DMA engine). We will use the TLA to clear
+ * the memory with Rogue in another place.
+ *
+ * We also skip the CPU cache maintenance for the heap space, as we
+ * statically know that the TCF PCI memory bar has UC/WC set by the
+ * MTRR/PAT subsystem.
+ */
+
+ lma_heap = kzalloc(sizeof(*lma_heap), GFP_KERNEL);
+ if (!lma_heap)
+ return ERR_PTR(-ENOMEM);
+
+ lma_heap->pool = gen_pool_create(12, -1);
+ if (!lma_heap->pool) {
+ kfree(lma_heap);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ lma_heap->base = heap_data->base;
+ gen_pool_add(lma_heap->pool, lma_heap->base, size, -1);
+
+ lma_heap->heap.id = heap_data->id;
+ lma_heap->heap.ops = &lma_heap_ops;
+ lma_heap->heap.name = heap_data->name;
+ lma_heap->heap.type = ION_HEAP_TYPE_CUSTOM;
+ lma_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+
+ lma_heap->allow_cpu_map = allow_cpu_map;
+
+ return &lma_heap->heap;
+}
+
+void ion_lma_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_lma_heap *lma_heap =
+ container_of(heap, struct ion_lma_heap, heap);
+ gen_pool_destroy(lma_heap->pool);
+ kfree(lma_heap);
+ lma_heap = NULL;
+}
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File ion_lma_heap.h
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __ION_LMA_HEAP__
+#define __ION_LMA_HEAP__
+
+#include PVR_ANDROID_ION_HEADER
+#include PVR_ANDROID_ION_PRIV_HEADER
+
+struct ion_heap *ion_lma_heap_create(struct ion_platform_heap *heap_data,
+ bool allow_cpu_map);
+void ion_lma_heap_destroy(struct ion_heap *heap);
+
+#endif /* __ION_LMA_HEAP__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File ion_support.c
+@Title Generic Ion support
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This file does the Ion initialisation and De-initialistion for
+ systems that don't already have Ion.
+ For systems that do have Ion it's expected they they init Ion
+ as per their requirements and then implement IonDevAcquire and
+ IonDevRelease which provides access to the ion device.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "ion_support.h"
+#include "ion_sys.h"
+
+#include <linux/version.h>
+#include PVR_ANDROID_ION_HEADER
+#include PVR_ANDROID_ION_PRIV_HEADER
+#include <linux/err.h>
+#include <linux/slab.h>
+
+/* Just the system heaps are used by the generic implementation */
+static struct ion_platform_data generic_config = {
+ .nr = 2,
+ .heaps =
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,39))
+ (struct ion_platform_heap [])
+#endif
+ {
+ {
+ .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .name = "system_contig",
+ .id = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ },
+ {
+ .type = ION_HEAP_TYPE_SYSTEM,
+ .name = "system",
+ .id = ION_HEAP_TYPE_SYSTEM,
+ }
+ }
+};
+
+struct ion_heap **g_apsIonHeaps;
+struct ion_device *g_psIonDev;
+
+PVRSRV_ERROR IonInit(void *phPrivateData)
+{
+ int uiHeapCount = generic_config.nr;
+ int uiError;
+ int i;
+
+ PVR_UNREFERENCED_PARAMETER(phPrivateData);
+
+ g_apsIonHeaps = kzalloc(sizeof(struct ion_heap *) * uiHeapCount, GFP_KERNEL);
+
+ /* Create the ion devicenode */
+ g_psIonDev = ion_device_create(NULL);
+ if (IS_ERR_OR_NULL(g_psIonDev)) {
+ kfree(g_apsIonHeaps);
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Register all the heaps */
+ for (i = 0; i < generic_config.nr; i++)
+ {
+ struct ion_platform_heap *psPlatHeapData = &generic_config.heaps[i];
+
+ g_apsIonHeaps[i] = ion_heap_create(psPlatHeapData);
+ if (IS_ERR_OR_NULL(g_apsIonHeaps[i]))
+ {
+ uiError = PTR_ERR(g_apsIonHeaps[i]);
+ goto failHeapCreate;
+ }
+ ion_device_add_heap(g_psIonDev, g_apsIonHeaps[i]);
+ }
+
+ return PVRSRV_OK;
+
+failHeapCreate:
+ for (i = 0; i < uiHeapCount; i++) {
+ if (g_apsIonHeaps[i])
+ {
+ ion_heap_destroy(g_apsIonHeaps[i]);
+ }
+ }
+ kfree(g_apsIonHeaps);
+ ion_device_destroy(g_psIonDev);
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+struct ion_device *IonDevAcquire(void)
+{
+ return g_psIonDev;
+}
+
+void IonDevRelease(struct ion_device *psIonDev)
+{
+ /* Nothing to do, sanity check the pointer we're passed back */
+ PVR_ASSERT(psIonDev == g_psIonDev);
+}
+
+void IonDeinit(void)
+{
+ int uiHeapCount = generic_config.nr;
+ int i;
+
+ for (i = 0; i < uiHeapCount; i++) {
+ if (g_apsIonHeaps[i])
+ {
+ ion_heap_destroy(g_apsIonHeaps[i]);
+ }
+ }
+ kfree(g_apsIonHeaps);
+ ion_device_destroy(g_psIonDev);
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/pci.h>
+
+#if defined(CONFIG_MTRR)
+#include <asm/mtrr.h>
+#include <linux/version.h>
+#endif
+
+#include "pci_support.h"
+#include "allocmem.h"
+
+typedef struct _PVR_PCI_DEV_TAG
+{
+ struct pci_dev *psPCIDev;
+ HOST_PCI_INIT_FLAGS ePCIFlags;
+ IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
+} PVR_PCI_DEV;
+
+/*************************************************************************/ /*!
+@Function OSPCISetDev
+@Description Set a PCI device for subsequent use.
+@Input pvPCICookie Pointer to OS specific PCI structure
+@Input eFlags Flags
+@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle
+*/ /**************************************************************************/
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
+{
+ int err;
+ IMG_UINT32 i;
+ PVR_PCI_DEV *psPVRPCI;
+
+ psPVRPCI = OSAllocMem(sizeof(*psPVRPCI));
+ if (psPVRPCI == NULL)
+ {
+ printk(KERN_ERR "OSPCISetDev: Couldn't allocate PVR PCI structure\n");
+ return NULL;
+ }
+
+ psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
+ psPVRPCI->ePCIFlags = eFlags;
+
+ err = pci_enable_device(psPVRPCI->psPCIDev);
+ if (err != 0)
+ {
+ printk(KERN_ERR "OSPCISetDev: Couldn't enable device (%d)\n", err);
+ OSFreeMem(psPVRPCI);
+ return NULL;
+ }
+
+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */
+ {
+ pci_set_master(psPVRPCI->psPCIDev);
+ }
+
+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */
+ {
+#if defined(CONFIG_PCI_MSI)
+ err = pci_enable_msi(psPVRPCI->psPCIDev);
+ if (err != 0)
+ {
+ printk(KERN_ERR "OSPCISetDev: Couldn't enable MSI (%d)", err);
+ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI; /* PRQA S 1474,3358,4130 */ /* misuse of enums */
+ }
+#else
+ printk(KERN_ERR "OSPCISetDev: MSI support not enabled in the kernel");
+#endif
+}
+
+ /* Initialise the PCI resource tracking array */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ {
+ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+ }
+
+ return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI;
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIAcquireDev
+@Description Acquire a PCI device for subsequent use.
+@Input ui16VendorID Vendor PCI ID
+@Input ui16DeviceID Device PCI ID
+@Input eFlags Flags
+@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle
+*/ /**************************************************************************/
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID,
+ IMG_UINT16 ui16DeviceID,
+ HOST_PCI_INIT_FLAGS eFlags)
+{
+ struct pci_dev *psPCIDev;
+
+ psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
+ if (psPCIDev == NULL)
+ {
+ return NULL;
+ }
+
+ return OSPCISetDev((void *)psPCIDev, eFlags);
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIDevID
+@Description Get the PCI device ID.
+@Input hPVRPCI PCI device handle
+@Output pui16DeviceID Pointer to where the device ID should
+ be returned
+@Return PVRSRV_ERROR Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIDevID(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16DeviceID)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+ if (pui16DeviceID == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *pui16DeviceID = psPVRPCI->psPCIDev->device;
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIIRQ
+@Description Get the interrupt number for the device.
+@Input hPVRPCI PCI device handle
+@Output pui16DeviceID Pointer to where the interrupt number
+ should be returned
+@Return PVRSRV_ERROR Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+ if (pui32IRQ == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *pui32IRQ = psPVRPCI->psPCIDev->irq;
+
+ return PVRSRV_OK;
+}
+
+/* Functions supported by OSPCIAddrRangeFunc */
+enum HOST_PCI_ADDR_RANGE_FUNC
+{
+ HOST_PCI_ADDR_RANGE_FUNC_LEN,
+ HOST_PCI_ADDR_RANGE_FUNC_START,
+ HOST_PCI_ADDR_RANGE_FUNC_END,
+ HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
+ HOST_PCI_ADDR_RANGE_FUNC_RELEASE
+};
+
+/*************************************************************************/ /*!
+@Function OSPCIAddrRangeFunc
+@Description Internal support function for various address range related
+ functions
+@Input eFunc Function to perform
+@Input hPVRPCI PCI device handle
+@Input ui32Index Address range index
+@Return IMG_UINT32 Function dependent value
+*/ /**************************************************************************/
+static IMG_UINT64 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
+ PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+ IMG_UINT32 ui32Index)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+ if (ui32Index >= DEVICE_COUNT_RESOURCE)
+ {
+ printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range");
+ return 0;
+ }
+
+ switch (eFunc)
+ {
+ case HOST_PCI_ADDR_RANGE_FUNC_LEN:
+ {
+ return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
+ }
+ case HOST_PCI_ADDR_RANGE_FUNC_START:
+ {
+ return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+ }
+ case HOST_PCI_ADDR_RANGE_FUNC_END:
+ {
+ return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+ }
+ case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
+ {
+ int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME);
+ if (err != 0)
+ {
+ printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err);
+ return 0;
+ }
+ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
+ return 1;
+ }
+ case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
+ {
+ if (psPVRPCI->abPCIResourceInUse[ui32Index])
+ {
+ pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index);
+ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
+ }
+ return 1;
+ }
+ default:
+ {
+ printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function");
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIAddrRangeLen
+@Description Returns length of a given address range
+@Input hPVRPCI PCI device handle
+@Input ui32Index Address range index
+@Return IMG_UINT32 Length of address range or 0 if no
+ such range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index);
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIAddrRangeStart
+@Description Returns the start of a given address range
+@Input hPVRPCI PCI device handle
+@Input ui32Index Address range index
+@Return IMG_UINT32 Start of address range or 0 if no
+ such range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index);
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIAddrRangeEnd
+@Description Returns the end of a given address range
+@Input hPVRPCI PCI device handle
+@Input ui32Index Address range index
+@Return IMG_UINT32 End of address range or 0 if no such
+ range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index);
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIRequestAddrRange
+@Description Request a given address range index for subsequent use
+@Input hPVRPCI PCI device handle
+@Input ui32Index Address range index
+@Return PVRSRV_ERROR Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+ IMG_UINT32 ui32Index)
+{
+ if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0)
+ {
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+ else
+ {
+ return PVRSRV_OK;
+ }
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIReleaseAddrRange
+@Description Release a given address range that is no longer being used
+@Input hPVRPCI PCI device handle
+@Input ui32Index Address range index
+@Return PVRSRV_ERROR Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+ if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0)
+ {
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+ else
+ {
+ return PVRSRV_OK;
+ }
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIRequestAddrRegion
+@Description Request a given region from an address range for subsequent use
+@Input hPVRPCI PCI device handle
+@Input ui32Index Address range index
+@Input uiOffset Offset into the address range that forms
+ the start of the region
+@Input uiLength Length of the region
+@Return PVRSRV_ERROR Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+ IMG_UINT32 ui32Index,
+ IMG_UINT64 uiOffset,
+ IMG_UINT64 uiLength)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+ resource_size_t start;
+ resource_size_t end;
+
+ start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+ end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+
+ /* Check that the requested region is valid */
+ if ((start + uiOffset + uiLength - 1) > end)
+ {
+ return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH;
+ }
+
+ if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO)
+ {
+ if (request_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL)
+ {
+ return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+ }
+ }
+ else
+ {
+ if (request_mem_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL)
+ {
+ return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIReleaseAddrRegion
+@Description Release a given region, from an address range, that is no
+ longer in use
+@Input hPVRPCI PCI device handle
+@Input ui32Index Address range index
+@Input ui32Offset Offset into the address range that forms
+ the start of the region
+@Input ui32Length Length of the region
+@Return PVRSRV_ERROR Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+ IMG_UINT32 ui32Index,
+ IMG_UINT64 uiOffset,
+ IMG_UINT64 uiLength)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+ resource_size_t start;
+ resource_size_t end;
+
+ start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+ end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+
+ /* Check that the region is valid */
+ if ((start + uiOffset + uiLength - 1) > end)
+ {
+ return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH;
+ }
+
+ if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO)
+ {
+ release_region(start + uiOffset, uiLength);
+ }
+ else
+ {
+ release_mem_region(start + uiOffset, uiLength);
+ }
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIReleaseDev
+@Description Release a PCI device that is no longer being used
+@Input hPVRPCI PCI device handle
+@Return PVRSRV_ERROR Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+ int i;
+
+ /* Release all PCI regions that are currently in use */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ {
+ if (psPVRPCI->abPCIResourceInUse[i])
+ {
+ pci_release_region(psPVRPCI->psPCIDev, i);
+ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+ }
+ }
+
+#if defined(CONFIG_PCI_MSI)
+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */
+ {
+ pci_disable_msi(psPVRPCI->psPCIDev);
+ }
+#endif
+
+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */
+ {
+ pci_clear_master(psPVRPCI->psPCIDev);
+ }
+
+ pci_disable_device(psPVRPCI->psPCIDev);
+
+ OSFreeMem(psPVRPCI);
+ /*not nulling pointer, copy on stack*/
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function OSPCISuspendDev
+@Description Prepare PCI device to be turned off by power management
+@Input hPVRPCI PCI device handle
+@Return PVRSRV_ERROR Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+ int i;
+ int err;
+
+ /* Release all PCI regions that are currently in use */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ {
+ if (psPVRPCI->abPCIResourceInUse[i])
+ {
+ pci_release_region(psPVRPCI->psPCIDev, i);
+ }
+ }
+
+ err = pci_save_state(psPVRPCI->psPCIDev);
+ if (err != 0)
+ {
+ printk(KERN_ERR "OSPCISuspendDev: pci_save_state_failed (%d)", err);
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+
+ pci_disable_device(psPVRPCI->psPCIDev);
+
+ err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND));
+ switch(err)
+ {
+ case 0:
+ break;
+ case -EIO:
+ printk(KERN_ERR "OSPCISuspendDev: device doesn't support PCI PM");
+ break;
+ case -EINVAL:
+ printk(KERN_ERR "OSPCISuspendDev: can't enter requested power state");
+ break;
+ default:
+ printk(KERN_ERR "OSPCISuspendDev: pci_set_power_state failed (%d)", err);
+ break;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function OSPCIResumeDev
+@Description Prepare a PCI device to be resumed by power management
+@Input hPVRPCI PCI device handle
+@Return PVRSRV_ERROR Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+ int err;
+ int i;
+
+ err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
+ switch(err)
+ {
+ case 0:
+ break;
+ case -EIO:
+ printk(KERN_ERR "OSPCIResumeDev: device doesn't support PCI PM");
+ break;
+ case -EINVAL:
+ printk(KERN_ERR "OSPCIResumeDev: can't enter requested power state");
+ return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+ default:
+ printk(KERN_ERR "OSPCIResumeDev: pci_set_power_state failed (%d)", err);
+ return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+ }
+
+ pci_restore_state(psPVRPCI->psPCIDev);
+
+ err = pci_enable_device(psPVRPCI->psPCIDev);
+ if (err != 0)
+ {
+ printk(KERN_ERR "OSPCIResumeDev: Couldn't enable device (%d)", err);
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+
+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */
+ pci_set_master(psPVRPCI->psPCIDev);
+
+ /* Restore the PCI resource tracking array */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ {
+ if (psPVRPCI->abPCIResourceInUse[i])
+ {
+ err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME);
+ if (err != 0)
+ {
+ printk(KERN_ERR "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err);
+ }
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+#if defined(CONFIG_MTRR)
+
+/*************************************************************************/ /*!
+@Function OSPCIClearResourceMTRRs
+@Description Clear any BIOS-configured MTRRs for a PCI memory region
+@Input hPVRPCI PCI device handle
+@Input ui32Index Address range index
+@Return PVRSRV_ERROR Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+ resource_size_t start, end;
+ int err;
+
+ start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+ end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ err = arch_phys_wc_add(start, end - start);
+ if (err < 0)
+ {
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+#else
+
+ err = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0);
+ if (err < 0)
+ {
+ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", err);
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+
+ err = mtrr_del(err, start, end - start);
+ if (err < 0)
+ {
+ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", err);
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+
+ /* Workaround for overlapping MTRRs. */
+ {
+ IMG_BOOL bGotMTRR0 = IMG_FALSE;
+
+ /* Current mobo BIOSes will normally set up a WRBACK MTRR spanning
+ * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic &
+ * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour.
+ *
+ * WRBACK is incompatible with some PCI devices, so try to split
+ * the UNCACHABLE regions up and insert a WRCOMB region instead.
+ */
+ err = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0);
+ if (err < 0)
+ {
+ /* If this fails, services has probably run before and created
+ * a write-combined MTRR for the test chip. Assume it has, and
+ * don't return an error here.
+ */
+ return PVRSRV_OK;
+ }
+
+ if(err == 0)
+ bGotMTRR0 = IMG_TRUE;
+
+ err = mtrr_del(err, start, end - start);
+ if(err < 0)
+ {
+ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", err);
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+
+ if(bGotMTRR0)
+ {
+ /* Replace 0 with a non-overlapping WRBACK MTRR */
+ err = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0);
+ if(err < 0)
+ {
+ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", err);
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+
+ /* Add a WRCOMB MTRR for the PCI device memory bar */
+ err = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0);
+ if(err < 0)
+ {
+ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", err);
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+ }
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+#endif /* defined(CONFIG_MTRR) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PCI_SUPPORT_H__
+#define __PCI_SUPPORT_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#if defined(LINUX)
+#include <linux/pci.h>
+#define TO_PCI_COOKIE(dev) to_pci_dev((struct device *)(dev))
+#else
+#define TO_PCI_COOKIE(dev) (dev)
+#endif
+
+typedef enum _HOST_PCI_INIT_FLAGS_
+{
+ HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001,
+ HOST_PCI_INIT_FLAG_MSI = 0x00000002,
+ HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff
+} HOST_PCI_INIT_FLAGS;
+
+struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_;
+typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE;
+
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIDevID(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16DeviceID);
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ);
+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength);
+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength);
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+
+#if defined(CONFIG_MTRR)
+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+#else
+static inline PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+ PVR_UNREFERENCED_PARAMETER(hPVRPCI);
+ PVR_UNREFERENCED_PARAMETER(ui32Index);
+ return PVRSRV_OK;
+}
+#endif
+
+#endif /* __PCI_SUPPORT_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_REGS_H__)
+#define __PDP_REGS_H__
+
+/*************************************************************************/ /*!
+ PCI Device Information
+*/ /**************************************************************************/
+
+#define DCPDP_VENDOR_ID_POWERVR (0x1010)
+
+#define DCPDP_DEVICE_ID_PCI_APOLLO_FPGA (0x1CF1)
+#define DCPDP_DEVICE_ID_PCIE_APOLLO_FPGA (0x1CF2)
+
+/*************************************************************************/ /*!
+ PCI Device Base Address Information
+*/ /**************************************************************************/
+
+/* PLL and PDP registers on base address register 0 */
+#define DCPDP_REG_PCI_BASENUM (0)
+
+#define DCPDP_PCI_PLL_REG_OFFSET (0x1000)
+#define DCPDP_PCI_PLL_REG_SIZE (0x0400)
+
+#define DCPDP_PCI_PDP_REG_OFFSET (0xC000)
+#define DCPDP_PCI_PDP_REG_SIZE (0x2000)
+
+/*************************************************************************/ /*!
+ Misc register information
+*/ /**************************************************************************/
+
+/* This information isn't captured in tcf_rgbpdp_regs.h so define it here */
+#define DCPDP_STR1SURF_FORMAT_ARGB8888 (0xE)
+#define DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT (4)
+#define DCPDP_STR1POSN_STRIDE_SHIFT (4)
+
+#endif /* !defined(__PDP_REGS_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PDP_TC5_FBDC_REGS_H_
+#define _PDP_TC5_FBDC_REGS_H_
+
+#define PVR5__PDP_FBDC_INTRFC_NUM_TILES (0x00)
+#define PVR5__PDP_FBDC_INTRFC_PIXEL_FORMAT (0x04)
+#define PVR5__PDP_FBDC_INTRFC_BASE_ADDRESS (0x08)
+#define PVR5__PDP_FBDC_INTRFC_PER_LINE (0x0C)
+#define PVR5__PDP_FBDC_INTRFC_INVALIDATE_REQUEST (0x10)
+
+#endif /* _PDP_TC5_FBDC_REGS_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _OUT_DRV_H_
+#define _OUT_DRV_H_
+
+/*
+ Register PVR_PDP_GRPH1SURF
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1SURF 0x0000
+#define PVR5__GRPH1USEHQCD_MASK 0x00400000U
+#define PVR5__GRPH1USEHQCD_SHIFT 22
+#define PVR5__GRPH1USEHQCD_SIGNED 0
+
+#define PVR5__GRPH1USELUT_MASK 0x00800000U
+#define PVR5__GRPH1USELUT_SHIFT 23
+#define PVR5__GRPH1USELUT_SIGNED 0
+
+#define PVR5__GRPH1LUTRWCHOICE_MASK 0x01000000U
+#define PVR5__GRPH1LUTRWCHOICE_SHIFT 24
+#define PVR5__GRPH1LUTRWCHOICE_SIGNED 0
+
+#define PVR5__GRPH1USECSC_MASK 0x02000000U
+#define PVR5__GRPH1USECSC_SHIFT 25
+#define PVR5__GRPH1USECSC_SIGNED 0
+
+#define PVR5__GRPH1USEGAMMA_MASK 0x04000000U
+#define PVR5__GRPH1USEGAMMA_SHIFT 26
+#define PVR5__GRPH1USEGAMMA_SIGNED 0
+
+#define PVR5__GRPH1PIXFMT_MASK 0xF8000000U
+#define PVR5__GRPH1PIXFMT_SHIFT 27
+#define PVR5__GRPH1PIXFMT_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1BLND
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1BLND 0x0020
+#define PVR5__GRPH1CKEY_MASK 0x00FFFFFFU
+#define PVR5__GRPH1CKEY_SHIFT 0
+#define PVR5__GRPH1CKEY_SIGNED 0
+
+#define PVR5__GRPH1GALPHA_MASK 0xFF000000U
+#define PVR5__GRPH1GALPHA_SHIFT 24
+#define PVR5__GRPH1GALPHA_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1BLND2
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1BLND2 0x0040
+#define PVR5__GRPH1CKEYMASK_MASK 0x00FFFFFFU
+#define PVR5__GRPH1CKEYMASK_SHIFT 0
+#define PVR5__GRPH1CKEYMASK_SIGNED 0
+
+#define PVR5__GRPH1LINDBL_MASK 0x20000000U
+#define PVR5__GRPH1LINDBL_SHIFT 29
+#define PVR5__GRPH1LINDBL_SIGNED 0
+
+#define PVR5__GRPH1PIXDBL_MASK 0x80000000U
+#define PVR5__GRPH1PIXDBL_SHIFT 31
+#define PVR5__GRPH1PIXDBL_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1CTRL
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1CTRL 0x0060
+#define PVR5__GRPH1BLENDPOS_MASK 0x07000000U
+#define PVR5__GRPH1BLENDPOS_SHIFT 24
+#define PVR5__GRPH1BLENDPOS_SIGNED 0
+
+#define PVR5__GRPH1BLEND_MASK 0x18000000U
+#define PVR5__GRPH1BLEND_SHIFT 27
+#define PVR5__GRPH1BLEND_SIGNED 0
+
+#define PVR5__GRPH1CKEYSRC_MASK 0x20000000U
+#define PVR5__GRPH1CKEYSRC_SHIFT 29
+#define PVR5__GRPH1CKEYSRC_SIGNED 0
+
+#define PVR5__GRPH1CKEYEN_MASK 0x40000000U
+#define PVR5__GRPH1CKEYEN_SHIFT 30
+#define PVR5__GRPH1CKEYEN_SIGNED 0
+
+#define PVR5__GRPH1STREN_MASK 0x80000000U
+#define PVR5__GRPH1STREN_SHIFT 31
+#define PVR5__GRPH1STREN_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1STRIDE
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1STRIDE 0x0080
+#define PVR5__GRPH1STRIDE_MASK 0xFFC00000U
+#define PVR5__GRPH1STRIDE_SHIFT 22
+#define PVR5__GRPH1STRIDE_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1SIZE
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1SIZE 0x00A0
+#define PVR5__GRPH1HEIGHT_MASK 0x00000FFFU
+#define PVR5__GRPH1HEIGHT_SHIFT 0
+#define PVR5__GRPH1HEIGHT_SIGNED 0
+
+#define PVR5__GRPH1WIDTH_MASK 0x0FFF0000U
+#define PVR5__GRPH1WIDTH_SHIFT 16
+#define PVR5__GRPH1WIDTH_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1POSN
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1POSN 0x00C0
+#define PVR5__GRPH1YSTART_MASK 0x00000FFFU
+#define PVR5__GRPH1YSTART_SHIFT 0
+#define PVR5__GRPH1YSTART_SIGNED 0
+
+#define PVR5__GRPH1XSTART_MASK 0x0FFF0000U
+#define PVR5__GRPH1XSTART_SHIFT 16
+#define PVR5__GRPH1XSTART_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1_INTERLEAVE_CTRL
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1_INTERLEAVE_CTRL 0x00F0
+#define PVR5__GRPH1INTFIELD_MASK 0x00000001U
+#define PVR5__GRPH1INTFIELD_SHIFT 0
+#define PVR5__GRPH1INTFIELD_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1_BASEADDR
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1_BASEADDR 0x0110
+#define PVR5__GRPH1BASEADDR_MASK 0xFFFFFFF0U
+#define PVR5__GRPH1BASEADDR_SHIFT 4
+#define PVR5__GRPH1BASEADDR_SIGNED 0
+
+/*
+ Register PVR_PDP_SYNCCTRL
+*/
+#define PVR5__PDP_PVR_PDP_SYNCCTRL 0x0154
+#define PVR5__HSDIS_MASK 0x00000001U
+#define PVR5__HSDIS_SHIFT 0
+#define PVR5__HSDIS_SIGNED 0
+
+#define PVR5__HSPOL_MASK 0x00000002U
+#define PVR5__HSPOL_SHIFT 1
+#define PVR5__HSPOL_SIGNED 0
+
+#define PVR5__VSDIS_MASK 0x00000004U
+#define PVR5__VSDIS_SHIFT 2
+#define PVR5__VSDIS_SIGNED 0
+
+#define PVR5__VSPOL_MASK 0x00000008U
+#define PVR5__VSPOL_SHIFT 3
+#define PVR5__VSPOL_SIGNED 0
+
+#define PVR5__BLNKDIS_MASK 0x00000010U
+#define PVR5__BLNKDIS_SHIFT 4
+#define PVR5__BLNKDIS_SIGNED 0
+
+#define PVR5__BLNKPOL_MASK 0x00000020U
+#define PVR5__BLNKPOL_SHIFT 5
+#define PVR5__BLNKPOL_SIGNED 0
+
+#define PVR5__HS_SLAVE_MASK 0x00000040U
+#define PVR5__HS_SLAVE_SHIFT 6
+#define PVR5__HS_SLAVE_SIGNED 0
+
+#define PVR5__VS_SLAVE_MASK 0x00000080U
+#define PVR5__VS_SLAVE_SHIFT 7
+#define PVR5__VS_SLAVE_SIGNED 0
+
+#define PVR5__CLKPOL_MASK 0x00000800U
+#define PVR5__CLKPOL_SHIFT 11
+#define PVR5__CLKPOL_SIGNED 0
+
+#define PVR5__CSYNC_EN_MASK 0x00001000U
+#define PVR5__CSYNC_EN_SHIFT 12
+#define PVR5__CSYNC_EN_SIGNED 0
+
+#define PVR5__FIELD_EN_MASK 0x00002000U
+#define PVR5__FIELD_EN_SHIFT 13
+#define PVR5__FIELD_EN_SIGNED 0
+
+#define PVR5__FIELDPOL_MASK 0x00004000U
+#define PVR5__FIELDPOL_SHIFT 14
+#define PVR5__FIELDPOL_SIGNED 0
+
+#define PVR5__UPDWAIT_MASK 0x000F0000U
+#define PVR5__UPDWAIT_SHIFT 16
+#define PVR5__UPDWAIT_SIGNED 0
+
+#define PVR5__UPDCTRL_MASK 0x01000000U
+#define PVR5__UPDCTRL_SHIFT 24
+#define PVR5__UPDCTRL_SIGNED 0
+
+#define PVR5__UPDINTCTRL_MASK 0x02000000U
+#define PVR5__UPDINTCTRL_SHIFT 25
+#define PVR5__UPDINTCTRL_SIGNED 0
+
+#define PVR5__UPDSYNCTRL_MASK 0x04000000U
+#define PVR5__UPDSYNCTRL_SHIFT 26
+#define PVR5__UPDSYNCTRL_SIGNED 0
+
+#define PVR5__LOWPWRMODE_MASK 0x08000000U
+#define PVR5__LOWPWRMODE_SHIFT 27
+#define PVR5__LOWPWRMODE_SIGNED 0
+
+#define PVR5__POWERDN_MASK 0x10000000U
+#define PVR5__POWERDN_SHIFT 28
+#define PVR5__POWERDN_SIGNED 0
+
+#define PVR5__PDP_RST_MASK 0x20000000U
+#define PVR5__PDP_RST_SHIFT 29
+#define PVR5__PDP_RST_SIGNED 0
+
+#define PVR5__SYNCACTIVE_MASK 0x80000000U
+#define PVR5__SYNCACTIVE_SHIFT 31
+#define PVR5__SYNCACTIVE_SIGNED 0
+
+/*
+ Register PVR_PDP_HSYNC1
+*/
+#define PVR5__PDP_PVR_PDP_HSYNC1 0x0158
+#define PVR5__HT_MASK 0x00001FFFU
+#define PVR5__HT_SHIFT 0
+#define PVR5__HT_SIGNED 0
+
+#define PVR5__HBPS_MASK 0x1FFF0000U
+#define PVR5__HBPS_SHIFT 16
+#define PVR5__HBPS_SIGNED 0
+
+/*
+ Register PVR_PDP_HSYNC2
+*/
+#define PVR5__PDP_PVR_PDP_HSYNC2 0x015C
+#define PVR5__HLBS_MASK 0x00001FFFU
+#define PVR5__HLBS_SHIFT 0
+#define PVR5__HLBS_SIGNED 0
+
+#define PVR5__HAS_MASK 0x1FFF0000U
+#define PVR5__HAS_SHIFT 16
+#define PVR5__HAS_SIGNED 0
+
+/*
+ Register PVR_PDP_HSYNC3
+*/
+#define PVR5__PDP_PVR_PDP_HSYNC3 0x0160
+#define PVR5__HRBS_MASK 0x00001FFFU
+#define PVR5__HRBS_SHIFT 0
+#define PVR5__HRBS_SIGNED 0
+
+#define PVR5__HFPS_MASK 0x1FFF0000U
+#define PVR5__HFPS_SHIFT 16
+#define PVR5__HFPS_SIGNED 0
+
+/*
+ Register PVR_PDP_VSYNC1
+*/
+#define PVR5__PDP_PVR_PDP_VSYNC1 0x0164
+#define PVR5__VT_MASK 0x00001FFFU
+#define PVR5__VT_SHIFT 0
+#define PVR5__VT_SIGNED 0
+
+#define PVR5__VBPS_MASK 0x1FFF0000U
+#define PVR5__VBPS_SHIFT 16
+#define PVR5__VBPS_SIGNED 0
+
+/*
+ Register PVR_PDP_VSYNC2
+*/
+#define PVR5__PDP_PVR_PDP_VSYNC2 0x0168
+#define PVR5__VTBS_MASK 0x00001FFFU
+#define PVR5__VTBS_SHIFT 0
+#define PVR5__VTBS_SIGNED 0
+
+#define PVR5__VAS_MASK 0x1FFF0000U
+#define PVR5__VAS_SHIFT 16
+#define PVR5__VAS_SIGNED 0
+
+/*
+ Register PVR_PDP_VSYNC3
+*/
+#define PVR5__PDP_PVR_PDP_VSYNC3 0x016C
+#define PVR5__VBBS_MASK 0x00001FFFU
+#define PVR5__VBBS_SHIFT 0
+#define PVR5__VBBS_SIGNED 0
+
+#define PVR5__VFPS_MASK 0x1FFF0000U
+#define PVR5__VFPS_SHIFT 16
+#define PVR5__VFPS_SIGNED 0
+
+/*
+ Register PVR_PDP_BORDCOL
+*/
+#define PVR5__PDP_PVR_PDP_BORDCOL 0x0170
+#define PVR5__BORDCOL_MASK 0x00FFFFFFU
+#define PVR5__BORDCOL_SHIFT 0
+#define PVR5__BORDCOL_SIGNED 0
+
+/*
+ Register PVR_PDP_BGNDCOL
+*/
+#define PVR5__PDP_PVR_PDP_BGNDCOL 0x0174
+#define PVR5__BGNDCOL_MASK 0x00FFFFFFU
+#define PVR5__BGNDCOL_SHIFT 0
+#define PVR5__BGNDCOL_SIGNED 0
+
+#define PVR5__BGNDALPHA_MASK 0xFF000000U
+#define PVR5__BGNDALPHA_SHIFT 24
+#define PVR5__BGNDALPHA_SIGNED 0
+
+/*
+ Register PVR_PDP_INTSTAT
+*/
+#define PVR5__PDP_PVR_PDP_INTSTAT 0x0178
+#define PVR5__INTS_HBLNK0_MASK 0x00000001U
+#define PVR5__INTS_HBLNK0_SHIFT 0
+#define PVR5__INTS_HBLNK0_SIGNED 0
+
+#define PVR5__INTS_HBLNK1_MASK 0x00000002U
+#define PVR5__INTS_HBLNK1_SHIFT 1
+#define PVR5__INTS_HBLNK1_SIGNED 0
+
+#define PVR5__INTS_VBLNK0_MASK 0x00000004U
+#define PVR5__INTS_VBLNK0_SHIFT 2
+#define PVR5__INTS_VBLNK0_SIGNED 0
+
+#define PVR5__INTS_VBLNK1_MASK 0x00000008U
+#define PVR5__INTS_VBLNK1_SHIFT 3
+#define PVR5__INTS_VBLNK1_SIGNED 0
+
+#define PVR5__INTS_GRPH1URUN_MASK 0x00000010U
+#define PVR5__INTS_GRPH1URUN_SHIFT 4
+#define PVR5__INTS_GRPH1URUN_SIGNED 0
+
+#define PVR5__INTS_GRPH1ORUN_MASK 0x00010000U
+#define PVR5__INTS_GRPH1ORUN_SHIFT 16
+#define PVR5__INTS_GRPH1ORUN_SIGNED 0
+
+#define PVR5__INTS_I2P_PDP_EOL_MISMATCH_MASK 0x01000000U
+#define PVR5__INTS_I2P_PDP_EOL_MISMATCH_SHIFT 24
+#define PVR5__INTS_I2P_PDP_EOL_MISMATCH_SIGNED 0
+
+#define PVR5__INTS_I2P_OUT_PIXEL_FIFO_OVERFLOW_MASK 0x02000000U
+#define PVR5__INTS_I2P_OUT_PIXEL_FIFO_OVERFLOW_SHIFT 25
+#define PVR5__INTS_I2P_OUT_PIXEL_FIFO_OVERFLOW_SIGNED 0
+
+#define PVR5__INTS_I2P_OUT_PIXEL_FIFO_UNDERFLOW_MASK 0x04000000U
+#define PVR5__INTS_I2P_OUT_PIXEL_FIFO_UNDERFLOW_SHIFT 26
+#define PVR5__INTS_I2P_OUT_PIXEL_FIFO_UNDERFLOW_SIGNED 0
+
+#define PVR5__INTS_I2P_OUT_EXT_RAM_FIFO_OVERFLOW_MASK 0x08000000U
+#define PVR5__INTS_I2P_OUT_EXT_RAM_FIFO_OVERFLOW_SHIFT 27
+#define PVR5__INTS_I2P_OUT_EXT_RAM_FIFO_OVERFLOW_SIGNED 0
+
+#define PVR5__INTS_I2P_OUT_EXT_RAM_FIFO_UNDERFLOW_MASK 0x10000000U
+#define PVR5__INTS_I2P_OUT_EXT_RAM_FIFO_UNDERFLOW_SHIFT 28
+#define PVR5__INTS_I2P_OUT_EXT_RAM_FIFO_UNDERFLOW_SIGNED 0
+
+#define PVR5__INTS_I2P_OUT_SB_FIFO_OVERFLOW_MASK 0x20000000U
+#define PVR5__INTS_I2P_OUT_SB_FIFO_OVERFLOW_SHIFT 29
+#define PVR5__INTS_I2P_OUT_SB_FIFO_OVERFLOW_SIGNED 0
+
+#define PVR5__INTS_I2P_OUT_SB_FIFO_UNDERFLOW_MASK 0x40000000U
+#define PVR5__INTS_I2P_OUT_SB_FIFO_UNDERFLOW_SHIFT 30
+#define PVR5__INTS_I2P_OUT_SB_FIFO_UNDERFLOW_SIGNED 0
+
+/*
+ Register PVR_PDP_INTENAB
+*/
+#define PVR5__PDP_PVR_PDP_INTENAB 0x017C
+#define PVR5__INTEN_HBLNK0_MASK 0x00000001U
+#define PVR5__INTEN_HBLNK0_SHIFT 0
+#define PVR5__INTEN_HBLNK0_SIGNED 0
+
+#define PVR5__INTEN_HBLNK1_MASK 0x00000002U
+#define PVR5__INTEN_HBLNK1_SHIFT 1
+#define PVR5__INTEN_HBLNK1_SIGNED 0
+
+#define PVR5__INTEN_VBLNK0_MASK 0x00000004U
+#define PVR5__INTEN_VBLNK0_SHIFT 2
+#define PVR5__INTEN_VBLNK0_SIGNED 0
+
+#define PVR5__INTEN_VBLNK1_MASK 0x00000008U
+#define PVR5__INTEN_VBLNK1_SHIFT 3
+#define PVR5__INTEN_VBLNK1_SIGNED 0
+
+#define PVR5__INTEN_GRPH1URUN_MASK 0x00000010U
+#define PVR5__INTEN_GRPH1URUN_SHIFT 4
+#define PVR5__INTEN_GRPH1URUN_SIGNED 0
+
+#define PVR5__INTEN_GRPH1ORUN_MASK 0x00010000U
+#define PVR5__INTEN_GRPH1ORUN_SHIFT 16
+#define PVR5__INTEN_GRPH1ORUN_SIGNED 0
+
+#define PVR5__INTEN_I2P_PDP_EOL_MISMATCH_MASK 0x01000000U
+#define PVR5__INTEN_I2P_PDP_EOL_MISMATCH_SHIFT 24
+#define PVR5__INTEN_I2P_PDP_EOL_MISMATCH_SIGNED 0
+
+#define PVR5__INTEN_I2P_OUT_PIXEL_FIFO_OVERFLOW_MASK 0x02000000U
+#define PVR5__INTEN_I2P_OUT_PIXEL_FIFO_OVERFLOW_SHIFT 25
+#define PVR5__INTEN_I2P_OUT_PIXEL_FIFO_OVERFLOW_SIGNED 0
+
+#define PVR5__INTEN_I2P_OUT_PIXEL_FIFO_UNDERFLOW_MASK 0x04000000U
+#define PVR5__INTEN_I2P_OUT_PIXEL_FIFO_UNDERFLOW_SHIFT 26
+#define PVR5__INTEN_I2P_OUT_PIXEL_FIFO_UNDERFLOW_SIGNED 0
+
+#define PVR5__INTEN_I2P_OUT_EXT_RAM_FIFO_OVERFLOW_MASK 0x08000000U
+#define PVR5__INTEN_I2P_OUT_EXT_RAM_FIFO_OVERFLOW_SHIFT 27
+#define PVR5__INTEN_I2P_OUT_EXT_RAM_FIFO_OVERFLOW_SIGNED 0
+
+#define PVR5__INTEN_I2P_OUT_EXT_RAM_FIFO_UNDERFLOW_MASK 0x10000000U
+#define PVR5__INTEN_I2P_OUT_EXT_RAM_FIFO_UNDERFLOW_SHIFT 28
+#define PVR5__INTEN_I2P_OUT_EXT_RAM_FIFO_UNDERFLOW_SIGNED 0
+
+#define PVR5__INTEN_I2P_OUT_SB_FIFO_OVERFLOW_MASK 0x20000000U
+#define PVR5__INTEN_I2P_OUT_SB_FIFO_OVERFLOW_SHIFT 29
+#define PVR5__INTEN_I2P_OUT_SB_FIFO_OVERFLOW_SIGNED 0
+
+#define PVR5__INTEN_I2P_OUT_SB_FIFO_UNDERFLOW_MASK 0x40000000U
+#define PVR5__INTEN_I2P_OUT_SB_FIFO_UNDERFLOW_SHIFT 30
+#define PVR5__INTEN_I2P_OUT_SB_FIFO_UNDERFLOW_SIGNED 0
+
+/*
+ Register PVR_PDP_INTCTRL
+*/
+#define PVR5__PDP_PVR_PDP_INTCTRL 0x0180
+#define PVR5__HBLNK_LINENO_MASK 0x00001FFFU
+#define PVR5__HBLNK_LINENO_SHIFT 0
+#define PVR5__HBLNK_LINENO_SIGNED 0
+
+#define PVR5__HBLNK_LINE_MASK 0x00010000U
+#define PVR5__HBLNK_LINE_SHIFT 16
+#define PVR5__HBLNK_LINE_SIGNED 0
+
+/*
+ Register PVR_PDP_SIGNAT
+*/
+#define PVR5__PDP_PVR_PDP_SIGNAT 0x0184
+#define PVR5__SIGNATURE_MASK 0xFFFFFFFFU
+#define PVR5__SIGNATURE_SHIFT 0
+#define PVR5__SIGNATURE_SIGNED 0
+
+/*
+ Register PVR_PDP_MEMCTRL
+*/
+#define PVR5__PDP_PVR_PDP_MEMCTRL 0x0188
+#define PVR5__BURSTLEN_MASK 0x0000001FU
+#define PVR5__BURSTLEN_SHIFT 0
+#define PVR5__BURSTLEN_SIGNED 0
+
+#define PVR5__THRESHOLD_MASK 0x00001F80U
+#define PVR5__THRESHOLD_SHIFT 7
+#define PVR5__THRESHOLD_SIGNED 0
+
+#define PVR5__YTHRESHOLD_MASK 0x001F8000U
+#define PVR5__YTHRESHOLD_SHIFT 15
+#define PVR5__YTHRESHOLD_SIGNED 0
+
+#define PVR5__UVTHRESHOLD_MASK 0x0F800000U
+#define PVR5__UVTHRESHOLD_SHIFT 23
+#define PVR5__UVTHRESHOLD_SIGNED 0
+
+#define PVR5__MEMREFRESH_MASK 0xC0000000U
+#define PVR5__MEMREFRESH_SHIFT 30
+#define PVR5__MEMREFRESH_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1_MEMCTRL
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1_MEMCTRL 0x0190
+#define PVR5__GRPH1_BURSTLEN_MASK 0x0000001FU
+#define PVR5__GRPH1_BURSTLEN_SHIFT 0
+#define PVR5__GRPH1_BURSTLEN_SIGNED 0
+
+#define PVR5__GRPH1_THRESHOLD_MASK 0x00001F80U
+#define PVR5__GRPH1_THRESHOLD_SHIFT 7
+#define PVR5__GRPH1_THRESHOLD_SIGNED 0
+
+#define PVR5__GRPH1_YTHRESHOLD_MASK 0x001F8000U
+#define PVR5__GRPH1_YTHRESHOLD_SHIFT 15
+#define PVR5__GRPH1_YTHRESHOLD_SIGNED 0
+
+#define PVR5__GRPH1_UVTHRESHOLD_MASK 0x0F800000U
+#define PVR5__GRPH1_UVTHRESHOLD_SHIFT 23
+#define PVR5__GRPH1_UVTHRESHOLD_SIGNED 0
+
+#define PVR5__GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK 0x80000000U
+#define PVR5__GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT 31
+#define PVR5__GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED 0
+
+/*
+ Register PVR_PDP_PORTER_BLND1
+*/
+#define PVR5__PDP_PVR_PDP_PORTER_BLND1 0x01E4
+#define PVR5__BLND1PORTERMODE_MASK 0x0000000FU
+#define PVR5__BLND1PORTERMODE_SHIFT 0
+#define PVR5__BLND1PORTERMODE_SIGNED 0
+
+#define PVR5__BLND1BLENDTYPE_MASK 0x00000010U
+#define PVR5__BLND1BLENDTYPE_SHIFT 4
+#define PVR5__BLND1BLENDTYPE_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA0
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA0 0x0200
+#define PVR5__GAMMA0_MASK 0x00FFFFFFU
+#define PVR5__GAMMA0_SHIFT 0
+#define PVR5__GAMMA0_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA1
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA1 0x0204
+#define PVR5__GAMMA1_MASK 0x00FFFFFFU
+#define PVR5__GAMMA1_SHIFT 0
+#define PVR5__GAMMA1_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA2
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA2 0x0208
+#define PVR5__GAMMA2_MASK 0x00FFFFFFU
+#define PVR5__GAMMA2_SHIFT 0
+#define PVR5__GAMMA2_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA3
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA3 0x020C
+#define PVR5__GAMMA3_MASK 0x00FFFFFFU
+#define PVR5__GAMMA3_SHIFT 0
+#define PVR5__GAMMA3_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA4
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA4 0x0210
+#define PVR5__GAMMA4_MASK 0x00FFFFFFU
+#define PVR5__GAMMA4_SHIFT 0
+#define PVR5__GAMMA4_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA5
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA5 0x0214
+#define PVR5__GAMMA5_MASK 0x00FFFFFFU
+#define PVR5__GAMMA5_SHIFT 0
+#define PVR5__GAMMA5_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA6
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA6 0x0218
+#define PVR5__GAMMA6_MASK 0x00FFFFFFU
+#define PVR5__GAMMA6_SHIFT 0
+#define PVR5__GAMMA6_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA7
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA7 0x021C
+#define PVR5__GAMMA7_MASK 0x00FFFFFFU
+#define PVR5__GAMMA7_SHIFT 0
+#define PVR5__GAMMA7_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA8
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA8 0x0220
+#define PVR5__GAMMA8_MASK 0x00FFFFFFU
+#define PVR5__GAMMA8_SHIFT 0
+#define PVR5__GAMMA8_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA9
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA9 0x0224
+#define PVR5__GAMMA9_MASK 0x00FFFFFFU
+#define PVR5__GAMMA9_SHIFT 0
+#define PVR5__GAMMA9_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA10
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA10 0x0228
+#define PVR5__GAMMA10_MASK 0x00FFFFFFU
+#define PVR5__GAMMA10_SHIFT 0
+#define PVR5__GAMMA10_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA11
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA11 0x022C
+#define PVR5__GAMMA11_MASK 0x00FFFFFFU
+#define PVR5__GAMMA11_SHIFT 0
+#define PVR5__GAMMA11_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA12
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA12 0x0230
+#define PVR5__GAMMA12_MASK 0x00FFFFFFU
+#define PVR5__GAMMA12_SHIFT 0
+#define PVR5__GAMMA12_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA13
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA13 0x0234
+#define PVR5__GAMMA13_MASK 0x00FFFFFFU
+#define PVR5__GAMMA13_SHIFT 0
+#define PVR5__GAMMA13_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA14
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA14 0x0238
+#define PVR5__GAMMA14_MASK 0x00FFFFFFU
+#define PVR5__GAMMA14_SHIFT 0
+#define PVR5__GAMMA14_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA15
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA15 0x023C
+#define PVR5__GAMMA15_MASK 0x00FFFFFFU
+#define PVR5__GAMMA15_SHIFT 0
+#define PVR5__GAMMA15_SIGNED 0
+
+/*
+ Register PVR_PDP_GAMMA16
+*/
+#define PVR5__PDP_PVR_PDP_GAMMA16 0x0240
+#define PVR5__GAMMA16_MASK 0x00FFFFFFU
+#define PVR5__GAMMA16_SHIFT 0
+#define PVR5__GAMMA16_SIGNED 0
+
+/*
+ Register PVR_PDP_REGLD_ADDR_CTRL
+*/
+#define PVR5__PDP_PVR_PDP_REGLD_ADDR_CTRL 0x0298
+#define PVR5__REGLD_ADDRIN_MASK 0xFFFFFFF0U
+#define PVR5__REGLD_ADDRIN_SHIFT 4
+#define PVR5__REGLD_ADDRIN_SIGNED 0
+
+/*
+ Register PVR_PDP_REGLD_ADDR_STAT
+*/
+#define PVR5__PDP_PVR_PDP_REGLD_ADDR_STAT 0x029C
+#define PVR5__REGLD_ADDROUT_MASK 0xFFFFFFF0U
+#define PVR5__REGLD_ADDROUT_SHIFT 4
+#define PVR5__REGLD_ADDROUT_SIGNED 0
+
+/*
+ Register PVR_PDP_REGLD_STAT
+*/
+#define PVR5__PDP_PVR_PDP_REGLD_STAT 0x0300
+#define PVR5__REGLD_ADDREN_MASK 0x00800000U
+#define PVR5__REGLD_ADDREN_SHIFT 23
+#define PVR5__REGLD_ADDREN_SIGNED 0
+
+/*
+ Register PVR_PDP_REGLD_CTRL
+*/
+#define PVR5__PDP_PVR_PDP_REGLD_CTRL 0x0304
+#define PVR5__REGLD_VAL_MASK 0x00800000U
+#define PVR5__REGLD_VAL_SHIFT 23
+#define PVR5__REGLD_VAL_SIGNED 0
+
+#define PVR5__REGLD_ADDRLEN_MASK 0xFF000000U
+#define PVR5__REGLD_ADDRLEN_SHIFT 24
+#define PVR5__REGLD_ADDRLEN_SIGNED 0
+
+/*
+ Register PVR_PDP_LINESTAT
+*/
+#define PVR5__PDP_PVR_PDP_LINESTAT 0x0308
+#define PVR5__LINENO_MASK 0x00001FFFU
+#define PVR5__LINENO_SHIFT 0
+#define PVR5__LINENO_SIGNED 0
+
+/*
+ Register PVR_PDP_UPDCTRL
+*/
+#define PVR5__PDP_PVR_PDP_UPDCTRL 0x030C
+#define PVR5__UPDFIELD_MASK 0x00000001U
+#define PVR5__UPDFIELD_SHIFT 0
+#define PVR5__UPDFIELD_SIGNED 0
+
+/*
+ Register PVR_PDP_VEVENT
+*/
+#define PVR5__PDP_PVR_PDP_VEVENT 0x0310
+#define PVR5__VFETCH_MASK 0x00001FFFU
+#define PVR5__VFETCH_SHIFT 0
+#define PVR5__VFETCH_SIGNED 0
+
+#define PVR5__VEVENT_MASK 0x1FFF0000U
+#define PVR5__VEVENT_SHIFT 16
+#define PVR5__VEVENT_SIGNED 0
+
+/*
+ Register PVR_PDP_HDECTRL
+*/
+#define PVR5__PDP_PVR_PDP_HDECTRL 0x0314
+#define PVR5__HDEF_MASK 0x00001FFFU
+#define PVR5__HDEF_SHIFT 0
+#define PVR5__HDEF_SIGNED 0
+
+#define PVR5__HDES_MASK 0x1FFF0000U
+#define PVR5__HDES_SHIFT 16
+#define PVR5__HDES_SIGNED 0
+
+/*
+ Register PVR_PDP_VDECTRL
+*/
+#define PVR5__PDP_PVR_PDP_VDECTRL 0x0318
+#define PVR5__VDEF_MASK 0x00001FFFU
+#define PVR5__VDEF_SHIFT 0
+#define PVR5__VDEF_SIGNED 0
+
+#define PVR5__VDES_MASK 0x1FFF0000U
+#define PVR5__VDES_SHIFT 16
+#define PVR5__VDES_SIGNED 0
+
+/*
+ Register PVR_PDP_OPMASK
+*/
+#define PVR5__PDP_PVR_PDP_OPMASK 0x031C
+#define PVR5__MASKR_MASK 0x000000FFU
+#define PVR5__MASKR_SHIFT 0
+#define PVR5__MASKR_SIGNED 0
+
+#define PVR5__MASKG_MASK 0x0000FF00U
+#define PVR5__MASKG_SHIFT 8
+#define PVR5__MASKG_SIGNED 0
+
+#define PVR5__MASKB_MASK 0x00FF0000U
+#define PVR5__MASKB_SHIFT 16
+#define PVR5__MASKB_SIGNED 0
+
+#define PVR5__BLANKLEVEL_MASK 0x40000000U
+#define PVR5__BLANKLEVEL_SHIFT 30
+#define PVR5__BLANKLEVEL_SIGNED 0
+
+#define PVR5__MASKLEVEL_MASK 0x80000000U
+#define PVR5__MASKLEVEL_SHIFT 31
+#define PVR5__MASKLEVEL_SIGNED 0
+
+/*
+ Register PVR_PDP_CSCCOEFF0
+*/
+#define PVR5__PDP_PVR_PDP_CSCCOEFF0 0x0330
+#define PVR5__CSCCOEFFRY_MASK 0x000007FFU
+#define PVR5__CSCCOEFFRY_SHIFT 0
+#define PVR5__CSCCOEFFRY_SIGNED 0
+
+#define PVR5__CSCCOEFFRU_MASK 0x003FF800U
+#define PVR5__CSCCOEFFRU_SHIFT 11
+#define PVR5__CSCCOEFFRU_SIGNED 0
+
+/*
+ Register PVR_PDP_CSCCOEFF1
+*/
+#define PVR5__PDP_PVR_PDP_CSCCOEFF1 0x0334
+#define PVR5__CSCCOEFFRV_MASK 0x000007FFU
+#define PVR5__CSCCOEFFRV_SHIFT 0
+#define PVR5__CSCCOEFFRV_SIGNED 0
+
+#define PVR5__CSCCOEFFGY_MASK 0x003FF800U
+#define PVR5__CSCCOEFFGY_SHIFT 11
+#define PVR5__CSCCOEFFGY_SIGNED 0
+
+/*
+ Register PVR_PDP_CSCCOEFF2
+*/
+#define PVR5__PDP_PVR_PDP_CSCCOEFF2 0x0338
+#define PVR5__CSCCOEFFGU_MASK 0x000007FFU
+#define PVR5__CSCCOEFFGU_SHIFT 0
+#define PVR5__CSCCOEFFGU_SIGNED 0
+
+#define PVR5__CSCCOEFFGV_MASK 0x003FF800U
+#define PVR5__CSCCOEFFGV_SHIFT 11
+#define PVR5__CSCCOEFFGV_SIGNED 0
+
+/*
+ Register PVR_PDP_CSCCOEFF3
+*/
+#define PVR5__PDP_PVR_PDP_CSCCOEFF3 0x033C
+#define PVR5__CSCCOEFFBY_MASK 0x000007FFU
+#define PVR5__CSCCOEFFBY_SHIFT 0
+#define PVR5__CSCCOEFFBY_SIGNED 0
+
+#define PVR5__CSCCOEFFBU_MASK 0x003FF800U
+#define PVR5__CSCCOEFFBU_SHIFT 11
+#define PVR5__CSCCOEFFBU_SIGNED 0
+
+/*
+ Register PVR_PDP_CSCCOEFF4
+*/
+#define PVR5__PDP_PVR_PDP_CSCCOEFF4 0x0340
+#define PVR5__CSCCOEFFBV_MASK 0x000007FFU
+#define PVR5__CSCCOEFFBV_SHIFT 0
+#define PVR5__CSCCOEFFBV_SIGNED 0
+
+/*
+ Register CR_PDP_PROCAMP_C11C12
+*/
+#define PVR5__PDP_CR_PDP_PROCAMP_C11C12 0x03D0
+#define PVR5__CR_PROCAMP_C11_MASK 0x00003FFFU
+#define PVR5__CR_PROCAMP_C11_SHIFT 0
+#define PVR5__CR_PROCAMP_C11_SIGNED 0
+
+#define PVR5__CR_PROCAMP_C12_MASK 0x3FFF0000U
+#define PVR5__CR_PROCAMP_C12_SHIFT 16
+#define PVR5__CR_PROCAMP_C12_SIGNED 0
+
+/*
+ Register CR_PDP_PROCAMP_C13C21
+*/
+#define PVR5__PDP_CR_PDP_PROCAMP_C13C21 0x03D4
+#define PVR5__CR_PROCAMP_C13_MASK 0x00003FFFU
+#define PVR5__CR_PROCAMP_C13_SHIFT 0
+#define PVR5__CR_PROCAMP_C13_SIGNED 0
+
+#define PVR5__CR_PROCAMP_C21_MASK 0x3FFF0000U
+#define PVR5__CR_PROCAMP_C21_SHIFT 16
+#define PVR5__CR_PROCAMP_C21_SIGNED 0
+
+/*
+ Register CR_PDP_PROCAMP_C22C23
+*/
+#define PVR5__PDP_CR_PDP_PROCAMP_C22C23 0x03D8
+#define PVR5__CR_PROCAMP_C22_MASK 0x00003FFFU
+#define PVR5__CR_PROCAMP_C22_SHIFT 0
+#define PVR5__CR_PROCAMP_C22_SIGNED 0
+
+#define PVR5__CR_PROCAMP_C23_MASK 0x3FFF0000U
+#define PVR5__CR_PROCAMP_C23_SHIFT 16
+#define PVR5__CR_PROCAMP_C23_SIGNED 0
+
+/*
+ Register CR_PDP_PROCAMP_C31C32
+*/
+#define PVR5__PDP_CR_PDP_PROCAMP_C31C32 0x03DC
+#define PVR5__CR_PROCAMP_C31_MASK 0x00003FFFU
+#define PVR5__CR_PROCAMP_C31_SHIFT 0
+#define PVR5__CR_PROCAMP_C31_SIGNED 0
+
+#define PVR5__CR_PROCAMP_C32_MASK 0x3FFF0000U
+#define PVR5__CR_PROCAMP_C32_SHIFT 16
+#define PVR5__CR_PROCAMP_C32_SIGNED 0
+
+/*
+ Register CR_PDP_PROCAMP_C33
+*/
+#define PVR5__PDP_CR_PDP_PROCAMP_C33 0x03E0
+#define PVR5__CR_PROCAMP_EN_MASK 0x00000001U
+#define PVR5__CR_PROCAMP_EN_SHIFT 0
+#define PVR5__CR_PROCAMP_EN_SIGNED 0
+
+#define PVR5__CR_PROCAMP_RANGE_MASK 0x00000030U
+#define PVR5__CR_PROCAMP_RANGE_SHIFT 4
+#define PVR5__CR_PROCAMP_RANGE_SIGNED 0
+
+#define PVR5__CR_PROCAMP_C33_MASK 0x3FFF0000U
+#define PVR5__CR_PROCAMP_C33_SHIFT 16
+#define PVR5__CR_PROCAMP_C33_SIGNED 0
+
+/*
+ Register CR_PDP_PROCAMP_INOFFSET
+*/
+#define PVR5__PDP_CR_PDP_PROCAMP_INOFFSET 0x03E4
+#define PVR5__CR_PROCAMP_INOFF_B_MASK 0x000000FFU
+#define PVR5__CR_PROCAMP_INOFF_B_SHIFT 0
+#define PVR5__CR_PROCAMP_INOFF_B_SIGNED 0
+
+#define PVR5__CR_PROCAMP_INOFF_G_MASK 0x0000FF00U
+#define PVR5__CR_PROCAMP_INOFF_G_SHIFT 8
+#define PVR5__CR_PROCAMP_INOFF_G_SIGNED 0
+
+#define PVR5__CR_PROCAMP_INOFF_R_MASK 0x00FF0000U
+#define PVR5__CR_PROCAMP_INOFF_R_SHIFT 16
+#define PVR5__CR_PROCAMP_INOFF_R_SIGNED 0
+
+/*
+ Register CR_PDP_PROCAMP_OUTOFFSET_BG
+*/
+#define PVR5__PDP_CR_PDP_PROCAMP_OUTOFFSET_BG 0x03E8
+#define PVR5__CR_PROCAMP_OUTOFF_B_MASK 0x000003FFU
+#define PVR5__CR_PROCAMP_OUTOFF_B_SHIFT 0
+#define PVR5__CR_PROCAMP_OUTOFF_B_SIGNED 0
+
+#define PVR5__CR_PROCAMP_OUTOFF_G_MASK 0x03FF0000U
+#define PVR5__CR_PROCAMP_OUTOFF_G_SHIFT 16
+#define PVR5__CR_PROCAMP_OUTOFF_G_SIGNED 0
+
+/*
+ Register CR_PDP_PROCAMP_OUTOFFSET_R
+*/
+#define PVR5__PDP_CR_PDP_PROCAMP_OUTOFFSET_R 0x03EC
+#define PVR5__CR_PROCAMP_OUTOFF_R_MASK 0x000003FFU
+#define PVR5__CR_PROCAMP_OUTOFF_R_SHIFT 0
+#define PVR5__CR_PROCAMP_OUTOFF_R_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1_PALETTE_ADDR
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1_PALETTE_ADDR 0x0400
+#define PVR5__GRPH1LUTADDR_MASK 0xFF000000U
+#define PVR5__GRPH1LUTADDR_SHIFT 24
+#define PVR5__GRPH1LUTADDR_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1_PALETTE_DATA
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1_PALETTE_DATA 0x0404
+#define PVR5__GRPH1LUTDATA_MASK 0x00FFFFFFU
+#define PVR5__GRPH1LUTDATA_SHIFT 0
+#define PVR5__GRPH1LUTDATA_SIGNED 0
+
+/*
+ Register PVR_PDP_CORE_ID
+*/
+#define PVR5__PDP_PVR_PDP_CORE_ID 0x04E0
+#define PVR5__CONFIG_ID_MASK 0x0000FFFFU
+#define PVR5__CONFIG_ID_SHIFT 0
+#define PVR5__CONFIG_ID_SIGNED 0
+
+#define PVR5__CORE_ID_MASK 0x00FF0000U
+#define PVR5__CORE_ID_SHIFT 16
+#define PVR5__CORE_ID_SIGNED 0
+
+#define PVR5__GROUP_ID_MASK 0xFF000000U
+#define PVR5__GROUP_ID_SHIFT 24
+#define PVR5__GROUP_ID_SIGNED 0
+
+/*
+ Register PVR_PDP_CORE_REV
+*/
+#define PVR5__PDP_PVR_PDP_CORE_REV 0x04F0
+#define PVR5__MAINT_REV_MASK 0x000000FFU
+#define PVR5__MAINT_REV_SHIFT 0
+#define PVR5__MAINT_REV_SIGNED 0
+
+#define PVR5__MINOR_REV_MASK 0x0000FF00U
+#define PVR5__MINOR_REV_SHIFT 8
+#define PVR5__MINOR_REV_SIGNED 0
+
+#define PVR5__MAJOR_REV_MASK 0x00FF0000U
+#define PVR5__MAJOR_REV_SHIFT 16
+#define PVR5__MAJOR_REV_SIGNED 0
+
+/*
+ Register PVR_PDP_GRPH1SKIPCTRL
+*/
+#define PVR5__PDP_PVR_PDP_GRPH1SKIPCTRL 0x0578
+#define PVR5__GRPH1VSKIP_MASK 0x00000FFFU
+#define PVR5__GRPH1VSKIP_SHIFT 0
+#define PVR5__GRPH1VSKIP_SIGNED 0
+
+#define PVR5__GRPH1HSKIP_MASK 0x0FFF0000U
+#define PVR5__GRPH1HSKIP_SHIFT 16
+#define PVR5__GRPH1HSKIP_SIGNED 0
+
+/*
+ Register PVR_PDP_REGISTER_UPDATE_CTRL
+*/
+#define PVR5__PDP_PVR_PDP_REGISTER_UPDATE_CTRL 0x07A0
+#define PVR5__USE_VBLANK_MASK 0x00000001U
+#define PVR5__USE_VBLANK_SHIFT 0
+#define PVR5__USE_VBLANK_SIGNED 0
+
+#define PVR5__REGISTERS_VALID_MASK 0x00000002U
+#define PVR5__REGISTERS_VALID_SHIFT 1
+#define PVR5__REGISTERS_VALID_SIGNED 0
+
+#define PVR5__BYPASS_DOUBLE_BUFFERING_MASK 0x00000004U
+#define PVR5__BYPASS_DOUBLE_BUFFERING_SHIFT 2
+#define PVR5__BYPASS_DOUBLE_BUFFERING_SIGNED 0
+
+/*
+ Register PVR_PDP_REGISTER_UPDATE_STATUS
+*/
+#define PVR5__PDP_PVR_PDP_REGISTER_UPDATE_STATUS 0x07A4
+#define PVR5__REGISTERS_UPDATED_MASK 0x00000002U
+#define PVR5__REGISTERS_UPDATED_SHIFT 1
+#define PVR5__REGISTERS_UPDATED_SIGNED 0
+
+/*
+ Register PVR_PDP_DBGCTRL
+*/
+#define PVR5__PDP_PVR_PDP_DBGCTRL 0x07B0
+#define PVR5__DBG_ENAB_MASK 0x00000001U
+#define PVR5__DBG_ENAB_SHIFT 0
+#define PVR5__DBG_ENAB_SIGNED 0
+
+#define PVR5__DBG_READ_MASK 0x00000002U
+#define PVR5__DBG_READ_SHIFT 1
+#define PVR5__DBG_READ_SIGNED 0
+
+/*
+ Register PVR_PDP_DBGDATA
+*/
+#define PVR5__PDP_PVR_PDP_DBGDATA 0x07B4
+#define PVR5__DBG_DATA_MASK 0x00FFFFFFU
+#define PVR5__DBG_DATA_SHIFT 0
+#define PVR5__DBG_DATA_SIGNED 0
+
+/*
+ Register PVR_PDP_DBGSIDE
+*/
+#define PVR5__PDP_PVR_PDP_DBGSIDE 0x07B8
+#define PVR5__DBG_SIDE_MASK 0x00000007U
+#define PVR5__DBG_SIDE_SHIFT 0
+#define PVR5__DBG_SIDE_SIGNED 0
+
+#define PVR5__DBG_VAL_MASK 0x00000008U
+#define PVR5__DBG_VAL_SHIFT 3
+#define PVR5__DBG_VAL_SIGNED 0
+
+/*
+ Register PVR_PDP_OUTPUT
+*/
+#define PVR5__PDP_PVR_PDP_OUTPUT 0x07C0
+#define PVR5__OUTPUT_CONFIG_MASK 0x00000001U
+#define PVR5__OUTPUT_CONFIG_SHIFT 0
+#define PVR5__OUTPUT_CONFIG_SIGNED 0
+
+#endif /* _OUT_DRV_H_ */
+
+/*****************************************************************************
+ End of file (out_drv.h)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "sysinfo.h"
+#include "apollo_regs.h"
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+#include "syscommon.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_support.h"
+#include "ion_sys.h"
+#endif
+
+#include "apollo_drv.h"
+
+#include <linux/platform_device.h>
+
+#if !defined(LMA)
+#error Apollo only supports LMA at the minute
+#endif
+
+/* Valid values for the TC_MEMORY_CONFIG configuration option */
+#define TC_MEMORY_LOCAL (1)
+#define TC_MEMORY_HOST (2)
+#define TC_MEMORY_HYBRID (3)
+
+#if TC_MEMORY_CONFIG != TC_MEMORY_LOCAL
+#error Apollo only supports TC_MEMORY_LOCAL at the minute
+#endif
+
+/* These must be consecutive */
+#define PHYS_HEAP_IDX_GENERAL 0
+#define PHYS_HEAP_IDX_DMABUF 1
+#define PHYS_HEAP_IDX_COUNT 2
+
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10)
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+
+/* Dummy DVFS configuration used purely for testing purposes */
+
+static const IMG_OPP asOPPTable[] =
+{
+ { 8, 25000000},
+ { 16, 50000000},
+ { 32, 75000000},
+ { 64, 100000000},
+};
+
+#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP))
+
+static void SetFrequency(IMG_UINT32 ui32Frequency)
+{
+ PVR_DPF((PVR_DBG_ERROR, "SetFrequency %u", ui32Frequency));
+}
+
+static void SetVoltage(IMG_UINT32 ui32Voltage)
+{
+ PVR_DPF((PVR_DBG_ERROR, "SetVoltage %u", ui32Voltage));
+}
+
+#endif
+
+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+static IMG_UINT32 TCLocalGetRegionId(IMG_HANDLE hPrivData,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs =
+{
+ .pfnCpuPAddrToDevPAddr = TCLocalCpuPAddrToDevPAddr,
+ .pfnDevPAddrToCpuPAddr = TCLocalDevPAddrToCpuPAddr,
+ .pfnGetRegionId = TCLocalGetRegionId,
+};
+
+static void TCIonCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCIonDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+static IMG_UINT32 TCIonGetRegionId(IMG_HANDLE hPrivData,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+static PHYS_HEAP_FUNCTIONS gsIonPhysHeapFuncs =
+{
+ .pfnCpuPAddrToDevPAddr = TCIonCpuPAddrToDevPAddr,
+ .pfnDevPAddrToCpuPAddr = TCIonDevPAddrToCpuPAddr,
+ .pfnGetRegionId = TCIonGetRegionId,
+};
+
+/* BIF Tiling mode configuration */
+static RGXFWIF_BIFTILINGMODE geBIFTilingMode = RGXFWIF_BIFTILINGMODE_256x16;
+
+/* Default BIF tiling heap x-stride configurations. */
+static IMG_UINT32 gauiBIFTilingHeapXStrides[RGXFWIF_NUM_BIF_TILING_CONFIGS] =
+{
+ 0, /* BIF tiling heap 1 x-stride */
+ 1, /* BIF tiling heap 2 x-stride */
+ 2, /* BIF tiling heap 3 x-stride */
+ 3 /* BIF tiling heap 4 x-stride */
+};
+
+typedef struct _SYS_DATA_ SYS_DATA;
+
+struct _SYS_DATA_
+{
+ struct platform_device *pdev;
+
+ struct apollo_rogue_platform_data *pdata;
+
+ struct resource *registers;
+
+#if defined(SUPPORT_ION)
+ struct ion_client *ion_client;
+ struct ion_handle *ion_rogue_allocation;
+#endif
+};
+
+#define SYSTEM_INFO_FORMAT_STRING "FPGA Revision: %s\tTCF Core Revision: %s\tTCF Core Target Build ID: %s\tPCI Version: %s\tMacro Version: %s"
+static IMG_CHAR *GetDeviceVersionString(SYS_DATA *psSysData)
+{
+ int err;
+ char str_fpga_rev[12];
+ char str_tcf_core_rev[12];
+ char str_tcf_core_target_build_id[4];
+ char str_pci_ver[4];
+ char str_macro_ver[8];
+
+ IMG_CHAR *pszVersion;
+ IMG_UINT32 ui32StringLength;
+
+ err = apollo_sys_strings(psSysData->pdev->dev.parent,
+ str_fpga_rev, sizeof(str_fpga_rev),
+ str_tcf_core_rev, sizeof(str_tcf_core_rev),
+ str_tcf_core_target_build_id, sizeof(str_tcf_core_target_build_id),
+ str_pci_ver, sizeof(str_pci_ver),
+ str_macro_ver, sizeof(str_macro_ver));
+ if (err)
+ {
+ return NULL;
+ }
+
+ ui32StringLength = OSStringLength(SYSTEM_INFO_FORMAT_STRING);
+ ui32StringLength += OSStringLength(str_fpga_rev);
+ ui32StringLength += OSStringLength(str_tcf_core_rev);
+ ui32StringLength += OSStringLength(str_tcf_core_target_build_id);
+ ui32StringLength += OSStringLength(str_pci_ver);
+ ui32StringLength += OSStringLength(str_macro_ver);
+
+ /* Create the version string */
+ pszVersion = OSAllocZMem(ui32StringLength * sizeof(IMG_CHAR));
+ if (pszVersion)
+ {
+ OSSNPrintf(&pszVersion[0], ui32StringLength,
+ SYSTEM_INFO_FORMAT_STRING,
+ str_fpga_rev,
+ str_tcf_core_rev,
+ str_tcf_core_target_build_id,
+ str_pci_ver,
+ str_macro_ver);
+ }
+
+ return pszVersion;
+}
+
+#if defined(SUPPORT_ION)
+static SYS_DATA *gpsIonPrivateData;
+
+PVRSRV_ERROR IonInit(void *pvPrivateData)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYS_DATA *psSysData = pvPrivateData;
+ gpsIonPrivateData = psSysData;
+
+ psSysData->ion_client = ion_client_create(psSysData->pdata->ion_device, SYS_RGX_DEV_NAME);
+ if (IS_ERR(psSysData->ion_client))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create ION client (%ld)", __func__, PTR_ERR(psSysData->ion_client)));
+ /* FIXME: Find a better matching error code */
+ eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+ goto err_out;
+ }
+ /* Allocate the whole rogue ion heap and pass that to services to manage */
+ psSysData->ion_rogue_allocation = ion_alloc(psSysData->ion_client, psSysData->pdata->rogue_heap_memory_size, 4096, (1 << psSysData->pdata->ion_heap_id), 0);
+ if (IS_ERR(psSysData->ion_rogue_allocation))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate ION rogue buffer (%ld)", __func__, PTR_ERR(psSysData->ion_rogue_allocation)));
+ /* FIXME: Find a better matching error code */
+ eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+ goto err_destroy_client;
+
+ }
+
+ return PVRSRV_OK;
+err_destroy_client:
+ ion_client_destroy(psSysData->ion_client);
+ psSysData->ion_client = NULL;
+err_out:
+ return eError;
+}
+
+void IonDeinit(void)
+{
+ SYS_DATA *psSysData = gpsIonPrivateData;
+ ion_free(psSysData->ion_client, psSysData->ion_rogue_allocation);
+ psSysData->ion_rogue_allocation = NULL;
+ ion_client_destroy(psSysData->ion_client);
+ psSysData->ion_client = NULL;
+}
+
+struct ion_device *IonDevAcquire(void)
+{
+ return gpsIonPrivateData->pdata->ion_device;
+}
+
+void IonDevRelease(struct ion_device *ion_device)
+{
+ PVR_ASSERT(ion_device == gpsIonPrivateData->pdata->ion_device);
+}
+#endif /* defined(SUPPORT_ION) */
+
+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+
+ /* Optimise common case */
+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr - psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr - psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+ }
+ }
+}
+
+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+
+ /* Optimise common case */
+ psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr + psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr + psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+ }
+ }
+}
+
+static IMG_UINT32 TCLocalGetRegionId(IMG_HANDLE hPrivData,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+ /* Return first region which is always valid */
+ return 0;
+}
+
+static void TCIonCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+
+ /* Optimise common case */
+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr - psSysData->pdata->apollo_memory_base;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->apollo_memory_base;
+ }
+ }
+}
+
+static void TCIonDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+
+ /* Optimise common case */
+ psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr + psSysData->pdata->apollo_memory_base;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr + psSysData->pdata->apollo_memory_base;
+ }
+ }
+}
+
+static IMG_UINT32 TCIonGetRegionId(IMG_HANDLE hPrivData,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+ /* Return first region which is always valid */
+ return 0;
+}
+
+static PVRSRV_ERROR PhysHeapsCreate(SYS_DATA *psSysData,
+ void *pvPrivData,
+ PHYS_HEAP_CONFIG **ppasPhysHeapsOut,
+ IMG_UINT32 *puiPhysHeapCountOut)
+{
+ static IMG_UINT32 uiHeapIDBase = 0;
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ PHYS_HEAP_REGION *psRegion;
+ PVRSRV_ERROR eError;
+
+ pasPhysHeaps = OSAllocMem(sizeof(*pasPhysHeaps) * PHYS_HEAP_IDX_COUNT);
+ if (!pasPhysHeaps)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRegion = OSAllocMem(sizeof(*psRegion));
+ if (!psRegion)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorFreePhysHeaps;
+ }
+
+ psRegion->sStartAddr.uiAddr = psSysData->pdata->rogue_heap_memory_base;
+ psRegion->sCardBase.uiAddr = 0;
+ psRegion->uiSize = psSysData->pdata->rogue_heap_memory_size;
+
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID =
+ uiHeapIDBase + PHYS_HEAP_IDX_GENERAL;
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].eType = PHYS_HEAP_TYPE_LMA;
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pszPDumpMemspaceName = "LMA";
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].psMemFuncs = &gsLocalPhysHeapFuncs;
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pasRegions = psRegion;
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32NumOfRegions = 1;
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].hPrivData = pvPrivData;
+
+ psRegion = OSAllocMem(sizeof(*psRegion));
+ if (!psRegion)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorGeneralPhysHeapDestroy;
+ }
+
+ psRegion->sStartAddr.uiAddr = psSysData->pdata->pdp_heap_memory_base;
+ psRegion->sCardBase.uiAddr = 0;
+ psRegion->uiSize = psSysData->pdata->pdp_heap_memory_size;
+
+ pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].ui32PhysHeapID =
+ uiHeapIDBase + PHYS_HEAP_IDX_DMABUF;
+ pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].eType = PHYS_HEAP_TYPE_LMA;
+ pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].pszPDumpMemspaceName = "LMA";
+ pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].psMemFuncs = &gsIonPhysHeapFuncs;
+ pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].pasRegions = psRegion;
+ pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].ui32NumOfRegions = 1;
+ pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].hPrivData = pvPrivData;
+
+ uiHeapIDBase += PHYS_HEAP_IDX_COUNT;
+
+ *ppasPhysHeapsOut = pasPhysHeaps;
+ *puiPhysHeapCountOut = PHYS_HEAP_IDX_COUNT;
+
+ return PVRSRV_OK;
+
+ErrorGeneralPhysHeapDestroy:
+ OSFreeMem(pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pasRegions);
+
+ErrorFreePhysHeaps:
+ OSFreeMem(pasPhysHeaps);
+ return eError;
+}
+
+static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps,
+ IMG_UINT32 uiPhysHeapCount)
+{
+ IMG_UINT32 i;
+
+ for (i = 0; i < uiPhysHeapCount; i++)
+ {
+ if (pasPhysHeaps[i].pasRegions)
+ {
+ OSFreeMem(pasPhysHeaps[i].pasRegions);
+ }
+ }
+
+ OSFreeMem(pasPhysHeaps);
+}
+
+static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData,
+ PVRSRV_DEVICE_CONFIG **ppsDevConfigOut)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ RGX_DATA *psRGXData;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo;
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ IMG_UINT32 uiPhysHeapCount;
+ PVRSRV_ERROR eError;
+
+ psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+ sizeof(*psRGXData) +
+ sizeof(*psRGXTimingInfo));
+ if (!psDevConfig)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig));
+ psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData));
+
+ eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorFreeDevConfig;
+ }
+
+ /* Setup RGX specific timing data */
+ psRGXTimingInfo->ui32CoreClockSpeed = apollo_core_clock_speed(&psSysData->pdev->dev) * 6;
+ psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+ psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE;
+ psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+ /* Set up the RGX data */
+ psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+ /* Setup the device config */
+ psDevConfig->pvOSDevice = &psSysData->pdev->dev;
+ psDevConfig->pszName = "apollo";
+ psDevConfig->pszVersion = GetDeviceVersionString(psSysData);
+
+ psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start;
+ psDevConfig->ui32RegsSize = resource_size(psSysData->registers);
+
+ psDevConfig->ui32IRQ = APOLLO_INTERRUPT_EXT;
+
+ psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE;
+
+ psDevConfig->pasPhysHeaps = pasPhysHeaps;
+ psDevConfig->ui32PhysHeapCount = uiPhysHeapCount;
+
+ psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] =
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID;
+ psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] =
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID;
+ psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] =
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID;
+
+ psDevConfig->eBIFTilingMode = geBIFTilingMode;
+ psDevConfig->pui32BIFTilingHeapConfigs = &gauiBIFTilingHeapXStrides[0];
+ psDevConfig->ui32BIFTilingHeapCount = IMG_ARR_NUM_ELEMS(gauiBIFTilingHeapXStrides);
+
+ psDevConfig->hDevData = psRGXData;
+ psDevConfig->hSysData = psSysData;
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+ /* Dummy DVFS configuration used purely for testing purposes */
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage;
+#endif
+#if defined(PVR_DVFS)
+ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 1000;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE;
+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90;
+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10;
+#endif
+
+ *ppsDevConfigOut = psDevConfig;
+
+ return PVRSRV_OK;
+
+ErrorFreeDevConfig:
+ OSFreeMem(psDevConfig);
+ return eError;
+}
+
+static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ if (psDevConfig->pszVersion)
+ {
+ OSFreeMem(psDevConfig->pszVersion);
+ }
+
+ PhysHeapsDestroy(psDevConfig->pasPhysHeaps, psDevConfig->ui32PhysHeapCount);
+
+ OSFreeMem(psDevConfig);
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ SYS_DATA *psSysData;
+ resource_size_t uiRegistersSize;
+ PVRSRV_ERROR eError;
+ int err = 0;
+
+ PVR_ASSERT(pvOSDevice);
+
+ psSysData = OSAllocZMem(sizeof(*psSysData));
+ if (psSysData == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psSysData->pdev = to_platform_device((struct device *)pvOSDevice);
+ psSysData->pdata = psSysData->pdev->dev.platform_data;
+
+ err = apollo_enable(psSysData->pdev->dev.parent);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device (%d)", __func__, err));
+ eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+ goto ErrFreeSysData;
+ }
+
+ psSysData->registers = platform_get_resource_byname(psSysData->pdev,
+ IORESOURCE_MEM,
+ "rogue-regs");
+ if (!psSysData->registers)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get Rogue register information",
+ __func__));
+ eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+ goto ErrorDevDisable;
+ }
+
+ /* Check the address range is large enough. */
+ uiRegistersSize = resource_size(psSysData->registers);
+ if (uiRegistersSize < SYS_RGX_REG_REGION_SIZE)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)",
+ __FUNCTION__, &uiRegistersSize, SYS_RGX_REG_REGION_SIZE));
+
+ eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL;
+ goto ErrorDevDisable;
+ }
+
+ /* Reserve the address range */
+ if (!request_mem_region(psSysData->registers->start,
+ resource_size(psSysData->registers),
+ SYS_RGX_DEV_NAME))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Rogue register memory region not available", __FUNCTION__));
+ eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+
+ goto ErrorDevDisable;
+ }
+
+ eError = DeviceConfigCreate(psSysData, &psDevConfig);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorReleaseMemRegion;
+ }
+
+#if defined(SUPPORT_ION)
+ eError = IonInit(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise ION", __func__));
+ goto ErrorDeviceConfigDestroy;
+ }
+#endif
+
+ *ppsDevConfig = psDevConfig;
+
+ return PVRSRV_OK;
+
+#if defined(SUPPORT_ION)
+ErrorDeviceConfigDestroy:
+ DeviceConfigDestroy(psDevConfig);
+#endif
+ErrorReleaseMemRegion:
+ release_mem_region(psSysData->registers->start,
+ resource_size(psSysData->registers));
+ErrorDevDisable:
+ apollo_disable(psSysData->pdev->dev.parent);
+ErrFreeSysData:
+ OSFreeMem(psSysData);
+ return eError;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData;
+
+#if defined(SUPPORT_ION)
+ IonDeinit();
+#endif
+
+ DeviceConfigDestroy(psDevConfig);
+
+ release_mem_region(psSysData->registers->start,
+ resource_size(psSysData->registers));
+ apollo_disable(psSysData->pdev->dev.parent);
+
+ OSFreeMem(psSysData);
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+#if defined(TC_APOLLO_TCF5)
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ return PVRSRV_OK;
+#else
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ u32 tmp = 0;
+ u32 pll;
+
+ PVR_DUMPDEBUG_LOG("------[ rgx_tc system debug ]------");
+
+ if (apollo_sys_info(psSysData->pdev->dev.parent, &tmp, &pll))
+ goto err_out;
+
+ if (tmp > 0)
+ PVR_DUMPDEBUG_LOG("Chip temperature: %d degrees C", tmp);
+ PVR_DUMPDEBUG_LOG("PLL status: %x", pll);
+
+err_out:
+ return eError;
+#endif
+}
+
+typedef struct
+{
+ struct device *psDev;
+ int iInterruptID;
+ void *pvData;
+ PFN_LISR pfnLISR;
+} LISR_DATA;
+
+static void ApolloInterruptHandler(void* pvData)
+{
+ LISR_DATA *psLISRData = pvData;
+ psLISRData->pfnLISR(psLISRData->pvData);
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ SYS_DATA *psSysData = (SYS_DATA *)hSysData;
+ LISR_DATA *psLISRData;
+ PVRSRV_ERROR eError;
+ int err;
+
+ if (ui32IRQ != APOLLO_INTERRUPT_EXT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: No device matching IRQ %d", __func__, ui32IRQ));
+ return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ }
+
+ psLISRData = OSAllocZMem(sizeof(*psLISRData));
+ if (!psLISRData)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_out;
+ }
+
+ psLISRData->pfnLISR = pfnLISR;
+ psLISRData->pvData = pvData;
+ psLISRData->iInterruptID = ui32IRQ;
+ psLISRData->psDev = psSysData->pdev->dev.parent;
+
+ err = apollo_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, ApolloInterruptHandler, psLISRData);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: apollo_set_interrupt_handler() failed (%d)", __func__, err));
+ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ goto err_free_data;
+ }
+
+ err = apollo_enable_interrupt(psLISRData->psDev, psLISRData->iInterruptID);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: apollo_enable_interrupt() failed (%d)", __func__, err));
+ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ goto err_unset_interrupt_handler;
+ }
+
+ *phLISRData = psLISRData;
+ eError = PVRSRV_OK;
+
+ PVR_TRACE(("Installed device LISR %pf to irq %u", pfnLISR, ui32IRQ));
+
+err_out:
+ return eError;
+err_unset_interrupt_handler:
+ apollo_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+err_free_data:
+ OSFreeMem(psLISRData);
+ goto err_out;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ LISR_DATA *psLISRData = (LISR_DATA *) hLISRData;
+ int err;
+
+ err = apollo_disable_interrupt(psLISRData->psDev, psLISRData->iInterruptID);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: apollo_enable_interrupt() failed (%d)", __func__, err));
+ }
+
+ err = apollo_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: apollo_set_interrupt_handler() failed (%d)", __func__, err));
+ }
+
+ PVR_TRACE(("Uninstalled device LISR %pf from irq %u", psLISRData->pfnLISR, psLISRData->iInterruptID));
+
+ OSFreeMem(psLISRData);
+
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#if defined (VIRTUAL_PLATFORM)
+#define MAX_HW_TIME_US (240000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (120000)
+#else
+#define MAX_HW_TIME_US (500000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (10000)
+#endif
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT (10000)
+
+#define SYS_RGX_DEV_NAME "apollo_rogue"
+
+#endif /* !defined(__SYSINFO_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Test Chip Framework system control register definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Autogenerated C -- do not edit
+ Generated from: tcf_clk_ctrl.def
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_CLK_CTRL_H_)
+#define _TCF_CLK_CTRL_H_
+
+/*
+ Register FPGA_ID_REG
+*/
+#define TCF_CLK_CTRL_FPGA_ID_REG 0x0000
+#define FPGA_ID_REG_CORE_CFG_MASK 0x0000FFFFU
+#define FPGA_ID_REG_CORE_CFG_SHIFT 0
+#define FPGA_ID_REG_CORE_CFG_SIGNED 0
+
+#define FPGA_ID_REG_CORE_ID_MASK 0xFFFF0000U
+#define FPGA_ID_REG_CORE_ID_SHIFT 16
+#define FPGA_ID_REG_CORE_ID_SIGNED 0
+
+/*
+ Register FPGA_REV_REG
+*/
+#define TCF_CLK_CTRL_FPGA_REV_REG 0x0008
+#define FPGA_REV_REG_MAINT_MASK 0x000000FFU
+#define FPGA_REV_REG_MAINT_SHIFT 0
+#define FPGA_REV_REG_MAINT_SIGNED 0
+
+#define FPGA_REV_REG_MINOR_MASK 0x0000FF00U
+#define FPGA_REV_REG_MINOR_SHIFT 8
+#define FPGA_REV_REG_MINOR_SIGNED 0
+
+#define FPGA_REV_REG_MAJOR_MASK 0x00FF0000U
+#define FPGA_REV_REG_MAJOR_SHIFT 16
+#define FPGA_REV_REG_MAJOR_SIGNED 0
+
+#define FPGA_REV_REG_DESIGNER_MASK 0xFF000000U
+#define FPGA_REV_REG_DESIGNER_SHIFT 24
+#define FPGA_REV_REG_DESIGNER_SIGNED 0
+
+/*
+ Register FPGA_DES_REV_1
+*/
+#define TCF_CLK_CTRL_FPGA_DES_REV_1 0x0010
+#define FPGA_DES_REV_1_MASK 0xFFFFFFFFU
+#define FPGA_DES_REV_1_SHIFT 0
+#define FPGA_DES_REV_1_SIGNED 0
+
+/*
+ Register FPGA_DES_REV_2
+*/
+#define TCF_CLK_CTRL_FPGA_DES_REV_2 0x0018
+#define FPGA_DES_REV_2_MASK 0xFFFFFFFFU
+#define FPGA_DES_REV_2_SHIFT 0
+#define FPGA_DES_REV_2_SIGNED 0
+
+/*
+ Register TCF_CORE_ID_REG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_ID_REG 0x0020
+#define TCF_CORE_ID_REG_CORE_CFG_MASK 0x0000FFFFU
+#define TCF_CORE_ID_REG_CORE_CFG_SHIFT 0
+#define TCF_CORE_ID_REG_CORE_CFG_SIGNED 0
+
+#define TCF_CORE_ID_REG_CORE_ID_MASK 0xFFFF0000U
+#define TCF_CORE_ID_REG_CORE_ID_SHIFT 16
+#define TCF_CORE_ID_REG_CORE_ID_SIGNED 0
+
+/*
+ Register TCF_CORE_REV_REG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_REV_REG 0x0028
+#define TCF_CORE_REV_REG_MAINT_MASK 0x000000FFU
+#define TCF_CORE_REV_REG_MAINT_SHIFT 0
+#define TCF_CORE_REV_REG_MAINT_SIGNED 0
+
+#define TCF_CORE_REV_REG_MINOR_MASK 0x0000FF00U
+#define TCF_CORE_REV_REG_MINOR_SHIFT 8
+#define TCF_CORE_REV_REG_MINOR_SIGNED 0
+
+#define TCF_CORE_REV_REG_MAJOR_MASK 0x00FF0000U
+#define TCF_CORE_REV_REG_MAJOR_SHIFT 16
+#define TCF_CORE_REV_REG_MAJOR_SIGNED 0
+
+#define TCF_CORE_REV_REG_DESIGNER_MASK 0xFF000000U
+#define TCF_CORE_REV_REG_DESIGNER_SHIFT 24
+#define TCF_CORE_REV_REG_DESIGNER_SIGNED 0
+
+/*
+ Register TCF_CORE_DES_REV_1
+*/
+#define TCF_CLK_CTRL_TCF_CORE_DES_REV_1 0x0030
+#define TCF_CORE_DES_REV_1_MASK 0xFFFFFFFFU
+#define TCF_CORE_DES_REV_1_SHIFT 0
+#define TCF_CORE_DES_REV_1_SIGNED 0
+
+/*
+ Register TCF_CORE_DES_REV_2
+*/
+#define TCF_CLK_CTRL_TCF_CORE_DES_REV_2 0x0038
+#define TCF_CORE_DES_REV_2_MASK 0xFFFFFFFFU
+#define TCF_CORE_DES_REV_2_SHIFT 0
+#define TCF_CORE_DES_REV_2_SIGNED 0
+
+/*
+ Register SCB_GENERAL_CONTROL
+*/
+#define TCF_CLK_CTRL_SCB_GENERAL_CONTROL 0x0040
+#define SCB_GC_TRANS_HALT_MASK 0x00000200U
+#define SCB_GC_TRANS_HALT_SHIFT 9
+#define SCB_GC_TRANS_HALT_SIGNED 0
+
+#define SCB_GC_CKD_REGS_MASK 0x00000100U
+#define SCB_GC_CKD_REGS_SHIFT 8
+#define SCB_GC_CKD_REGS_SIGNED 0
+
+#define SCB_GC_CKD_SLAVE_MASK 0x00000080U
+#define SCB_GC_CKD_SLAVE_SHIFT 7
+#define SCB_GC_CKD_SLAVE_SIGNED 0
+
+#define SCB_GC_CKD_MASTER_MASK 0x00000040U
+#define SCB_GC_CKD_MASTER_SHIFT 6
+#define SCB_GC_CKD_MASTER_SIGNED 0
+
+#define SCB_GC_CKD_XDATA_MASK 0x00000020U
+#define SCB_GC_CKD_XDATA_SHIFT 5
+#define SCB_GC_CKD_XDATA_SIGNED 0
+
+#define SCB_GC_SFR_REG_MASK 0x00000010U
+#define SCB_GC_SFR_REG_SHIFT 4
+#define SCB_GC_SFR_REG_SIGNED 0
+
+#define SCB_GC_SFR_SLAVE_MASK 0x00000008U
+#define SCB_GC_SFR_SLAVE_SHIFT 3
+#define SCB_GC_SFR_SLAVE_SIGNED 0
+
+#define SCB_GC_SFR_MASTER_MASK 0x00000004U
+#define SCB_GC_SFR_MASTER_SHIFT 2
+#define SCB_GC_SFR_MASTER_SIGNED 0
+
+#define SCB_GC_SFR_DET_DATA_MASK 0x00000002U
+#define SCB_GC_SFR_DET_DATA_SHIFT 1
+#define SCB_GC_SFR_DET_DATA_SIGNED 0
+
+#define SCB_GC_SFR_GEN_DATA_MASK 0x00000001U
+#define SCB_GC_SFR_GEN_DATA_SHIFT 0
+#define SCB_GC_SFR_GEN_DATA_SIGNED 0
+
+/*
+ Register SCB_MASTER_READ_COUNT
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_READ_COUNT 0x0048
+#define MASTER_READ_COUNT_MASK 0x0000FFFFU
+#define MASTER_READ_COUNT_SHIFT 0
+#define MASTER_READ_COUNT_SIGNED 0
+
+/*
+ Register SCB_MASTER_READ_DATA
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_READ_DATA 0x0050
+#define MASTER_READ_DATA_MASK 0x000000FFU
+#define MASTER_READ_DATA_SHIFT 0
+#define MASTER_READ_DATA_SIGNED 0
+
+/*
+ Register SCB_MASTER_ADDRESS
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_ADDRESS 0x0058
+#define SCB_MASTER_ADDRESS_MASK 0x000003FFU
+#define SCB_MASTER_ADDRESS_SHIFT 0
+#define SCB_MASTER_ADDRESS_SIGNED 0
+
+/*
+ Register SCB_MASTER_WRITE_DATA
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_WRITE_DATA 0x0060
+#define MASTER_WRITE_DATA_MASK 0x000000FFU
+#define MASTER_WRITE_DATA_SHIFT 0
+#define MASTER_WRITE_DATA_SIGNED 0
+
+/*
+ Register SCB_MASTER_WRITE_COUNT
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_WRITE_COUNT 0x0068
+#define MASTER_WRITE_COUNT_MASK 0x0000FFFFU
+#define MASTER_WRITE_COUNT_SHIFT 0
+#define MASTER_WRITE_COUNT_SIGNED 0
+
+/*
+ Register SCB_BUS_SELECT
+*/
+#define TCF_CLK_CTRL_SCB_BUS_SELECT 0x0070
+#define BUS_SELECT_MASK 0x00000003U
+#define BUS_SELECT_SHIFT 0
+#define BUS_SELECT_SIGNED 0
+
+/*
+ Register SCB_MASTER_FILL_STATUS
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_FILL_STATUS 0x0078
+#define MASTER_WRITE_FIFO_EMPTY_MASK 0x00000008U
+#define MASTER_WRITE_FIFO_EMPTY_SHIFT 3
+#define MASTER_WRITE_FIFO_EMPTY_SIGNED 0
+
+#define MASTER_WRITE_FIFO_FULL_MASK 0x00000004U
+#define MASTER_WRITE_FIFO_FULL_SHIFT 2
+#define MASTER_WRITE_FIFO_FULL_SIGNED 0
+
+#define MASTER_READ_FIFO_EMPTY_MASK 0x00000002U
+#define MASTER_READ_FIFO_EMPTY_SHIFT 1
+#define MASTER_READ_FIFO_EMPTY_SIGNED 0
+
+#define MASTER_READ_FIFO_FULL_MASK 0x00000001U
+#define MASTER_READ_FIFO_FULL_SHIFT 0
+#define MASTER_READ_FIFO_FULL_SIGNED 0
+
+/*
+ Register CLK_AND_RST_CTRL
+*/
+#define TCF_CLK_CTRL_CLK_AND_RST_CTRL 0x0080
+#define GLB_CLKG_EN_MASK 0x00020000U
+#define GLB_CLKG_EN_SHIFT 17
+#define GLB_CLKG_EN_SIGNED 0
+
+#define CLK_GATE_CNTL_MASK 0x00010000U
+#define CLK_GATE_CNTL_SHIFT 16
+#define CLK_GATE_CNTL_SIGNED 0
+
+#define DUT_DCM_RESETN_MASK 0x00000400U
+#define DUT_DCM_RESETN_SHIFT 10
+#define DUT_DCM_RESETN_SIGNED 0
+
+#define MEM_RESYNC_BYPASS_MASK 0x00000200U
+#define MEM_RESYNC_BYPASS_SHIFT 9
+#define MEM_RESYNC_BYPASS_SIGNED 0
+
+#define SYS_RESYNC_BYPASS_MASK 0x00000100U
+#define SYS_RESYNC_BYPASS_SHIFT 8
+#define SYS_RESYNC_BYPASS_SIGNED 0
+
+#define SCB_RESETN_MASK 0x00000010U
+#define SCB_RESETN_SHIFT 4
+#define SCB_RESETN_SIGNED 0
+
+#define PDP2_RESETN_MASK 0x00000008U
+#define PDP2_RESETN_SHIFT 3
+#define PDP2_RESETN_SIGNED 0
+
+#define PDP1_RESETN_MASK 0x00000004U
+#define PDP1_RESETN_SHIFT 2
+#define PDP1_RESETN_SIGNED 0
+
+#define DDR_RESETN_MASK 0x00000002U
+#define DDR_RESETN_SHIFT 1
+#define DDR_RESETN_SIGNED 0
+
+#define DUT_RESETN_MASK 0x00000001U
+#define DUT_RESETN_SHIFT 0
+#define DUT_RESETN_SIGNED 0
+
+/*
+ Register TEST_REG_OUT
+*/
+#define TCF_CLK_CTRL_TEST_REG_OUT 0x0088
+#define TEST_REG_OUT_MASK 0xFFFFFFFFU
+#define TEST_REG_OUT_SHIFT 0
+#define TEST_REG_OUT_SIGNED 0
+
+/*
+ Register TEST_REG_IN
+*/
+#define TCF_CLK_CTRL_TEST_REG_IN 0x0090
+#define TEST_REG_IN_MASK 0xFFFFFFFFU
+#define TEST_REG_IN_SHIFT 0
+#define TEST_REG_IN_SIGNED 0
+
+/*
+ Register TEST_CTRL
+*/
+#define TCF_CLK_CTRL_TEST_CTRL 0x0098
+#define PCI_TEST_OFFSET_MASK 0xF8000000U
+#define PCI_TEST_OFFSET_SHIFT 27
+#define PCI_TEST_OFFSET_SIGNED 0
+
+#define HOST_PHY_MODE_MASK 0x00000100U
+#define HOST_PHY_MODE_SHIFT 8
+#define HOST_PHY_MODE_SIGNED 0
+
+#define HOST_ONLY_MODE_MASK 0x00000080U
+#define HOST_ONLY_MODE_SHIFT 7
+#define HOST_ONLY_MODE_SIGNED 0
+
+#define PCI_TEST_MODE_MASK 0x00000040U
+#define PCI_TEST_MODE_SHIFT 6
+#define PCI_TEST_MODE_SIGNED 0
+
+#define TURN_OFF_DDR_MASK 0x00000020U
+#define TURN_OFF_DDR_SHIFT 5
+#define TURN_OFF_DDR_SIGNED 0
+
+#define SYS_RD_CLK_INV_MASK 0x00000010U
+#define SYS_RD_CLK_INV_SHIFT 4
+#define SYS_RD_CLK_INV_SIGNED 0
+
+#define MEM_REQ_CLK_INV_MASK 0x00000008U
+#define MEM_REQ_CLK_INV_SHIFT 3
+#define MEM_REQ_CLK_INV_SIGNED 0
+
+#define BURST_SPLIT_MASK 0x00000004U
+#define BURST_SPLIT_SHIFT 2
+#define BURST_SPLIT_SIGNED 0
+
+#define CLK_INVERSION_MASK 0x00000002U
+#define CLK_INVERSION_SHIFT 1
+#define CLK_INVERSION_SIGNED 0
+
+#define ADDRESS_FORCE_MASK 0x00000001U
+#define ADDRESS_FORCE_SHIFT 0
+#define ADDRESS_FORCE_SIGNED 0
+
+/*
+ Register CLEAR_HOST_MEM_SIG
+*/
+#define TCF_CLK_CTRL_CLEAR_HOST_MEM_SIG 0x00A0
+#define SIGNATURE_TAG_ID_MASK 0x00000F00U
+#define SIGNATURE_TAG_ID_SHIFT 8
+#define SIGNATURE_TAG_ID_SIGNED 0
+
+#define CLEAR_HOST_MEM_SIGNATURE_MASK 0x00000001U
+#define CLEAR_HOST_MEM_SIGNATURE_SHIFT 0
+#define CLEAR_HOST_MEM_SIGNATURE_SIGNED 0
+
+/*
+ Register HOST_MEM_SIGNATURE
+*/
+#define TCF_CLK_CTRL_HOST_MEM_SIGNATURE 0x00A8
+#define HOST_MEM_SIGNATURE_MASK 0xFFFFFFFFU
+#define HOST_MEM_SIGNATURE_SHIFT 0
+#define HOST_MEM_SIGNATURE_SIGNED 0
+
+/*
+ Register INTERRUPT_STATUS
+*/
+#define TCF_CLK_CTRL_INTERRUPT_STATUS 0x00C8
+#define INTERRUPT_MASTER_STATUS_MASK 0x80000000U
+#define INTERRUPT_MASTER_STATUS_SHIFT 31
+#define INTERRUPT_MASTER_STATUS_SIGNED 0
+
+#define OTHER_INTS_MASK 0x7FFE0000U
+#define OTHER_INTS_SHIFT 17
+#define OTHER_INTS_SIGNED 0
+
+#define HOST_MST_NORESPONSE_MASK 0x00010000U
+#define HOST_MST_NORESPONSE_SHIFT 16
+#define HOST_MST_NORESPONSE_SIGNED 0
+
+#define PDP2_INT_MASK 0x00008000U
+#define PDP2_INT_SHIFT 15
+#define PDP2_INT_SIGNED 0
+
+#define PDP1_INT_MASK 0x00004000U
+#define PDP1_INT_SHIFT 14
+#define PDP1_INT_SIGNED 0
+
+#define EXT_INT_MASK 0x00002000U
+#define EXT_INT_SHIFT 13
+#define EXT_INT_SIGNED 0
+
+#define SCB_MST_HLT_BIT_MASK 0x00001000U
+#define SCB_MST_HLT_BIT_SHIFT 12
+#define SCB_MST_HLT_BIT_SIGNED 0
+
+#define SCB_SLV_EVENT_MASK 0x00000800U
+#define SCB_SLV_EVENT_SHIFT 11
+#define SCB_SLV_EVENT_SIGNED 0
+
+#define SCB_TDONE_RX_MASK 0x00000400U
+#define SCB_TDONE_RX_SHIFT 10
+#define SCB_TDONE_RX_SIGNED 0
+
+#define SCB_SLV_WT_RD_DAT_MASK 0x00000200U
+#define SCB_SLV_WT_RD_DAT_SHIFT 9
+#define SCB_SLV_WT_RD_DAT_SIGNED 0
+
+#define SCB_SLV_WT_PRV_RD_MASK 0x00000100U
+#define SCB_SLV_WT_PRV_RD_SHIFT 8
+#define SCB_SLV_WT_PRV_RD_SIGNED 0
+
+#define SCB_SLV_WT_WR_DAT_MASK 0x00000080U
+#define SCB_SLV_WT_WR_DAT_SHIFT 7
+#define SCB_SLV_WT_WR_DAT_SIGNED 0
+
+#define SCB_MST_WT_RD_DAT_MASK 0x00000040U
+#define SCB_MST_WT_RD_DAT_SHIFT 6
+#define SCB_MST_WT_RD_DAT_SIGNED 0
+
+#define SCB_ADD_ACK_ERR_MASK 0x00000020U
+#define SCB_ADD_ACK_ERR_SHIFT 5
+#define SCB_ADD_ACK_ERR_SIGNED 0
+
+#define SCB_WR_ACK_ERR_MASK 0x00000010U
+#define SCB_WR_ACK_ERR_SHIFT 4
+#define SCB_WR_ACK_ERR_SIGNED 0
+
+#define SCB_SDAT_LO_TIM_MASK 0x00000008U
+#define SCB_SDAT_LO_TIM_SHIFT 3
+#define SCB_SDAT_LO_TIM_SIGNED 0
+
+#define SCB_SCLK_LO_TIM_MASK 0x00000004U
+#define SCB_SCLK_LO_TIM_SHIFT 2
+#define SCB_SCLK_LO_TIM_SIGNED 0
+
+#define SCB_UNEX_START_BIT_MASK 0x00000002U
+#define SCB_UNEX_START_BIT_SHIFT 1
+#define SCB_UNEX_START_BIT_SIGNED 0
+
+#define SCB_BUS_INACTIVE_MASK 0x00000001U
+#define SCB_BUS_INACTIVE_SHIFT 0
+#define SCB_BUS_INACTIVE_SIGNED 0
+
+/*
+ Register INTERRUPT_OP_CFG
+*/
+#define TCF_CLK_CTRL_INTERRUPT_OP_CFG 0x00D0
+#define PULSE_NLEVEL_MASK 0x80000000U
+#define PULSE_NLEVEL_SHIFT 31
+#define PULSE_NLEVEL_SIGNED 0
+
+#define INT_SENSE_MASK 0x40000000U
+#define INT_SENSE_SHIFT 30
+#define INT_SENSE_SIGNED 0
+
+#define INTERRUPT_DEST_MASK 0x0000000FU
+#define INTERRUPT_DEST_SHIFT 0
+#define INTERRUPT_DEST_SIGNED 0
+
+/*
+ Register INTERRUPT_ENABLE
+*/
+#define TCF_CLK_CTRL_INTERRUPT_ENABLE 0x00D8
+#define INTERRUPT_MASTER_ENABLE_MASK 0x80000000U
+#define INTERRUPT_MASTER_ENABLE_SHIFT 31
+#define INTERRUPT_MASTER_ENABLE_SIGNED 0
+
+#define INTERRUPT_ENABLE_MASK 0x7FFFFFFFU
+#define INTERRUPT_ENABLE_SHIFT 0
+#define INTERRUPT_ENABLE_SIGNED 0
+
+/*
+ Register INTERRUPT_CLEAR
+*/
+#define TCF_CLK_CTRL_INTERRUPT_CLEAR 0x00E0
+#define INTERRUPT_MASTER_CLEAR_MASK 0x80000000U
+#define INTERRUPT_MASTER_CLEAR_SHIFT 31
+#define INTERRUPT_MASTER_CLEAR_SIGNED 0
+
+#define INTERRUPT_CLEAR_MASK 0x7FFFFFFFU
+#define INTERRUPT_CLEAR_SHIFT 0
+#define INTERRUPT_CLEAR_SIGNED 0
+
+/*
+ Register YCC_RGB_CTRL
+*/
+#define TCF_CLK_CTRL_YCC_RGB_CTRL 0x00E8
+#define RGB_CTRL1_MASK 0x000001FFU
+#define RGB_CTRL1_SHIFT 0
+#define RGB_CTRL1_SIGNED 0
+
+#define RGB_CTRL2_MASK 0x01FF0000U
+#define RGB_CTRL2_SHIFT 16
+#define RGB_CTRL2_SIGNED 0
+
+/*
+ Register EXP_BRD_CTRL
+*/
+#define TCF_CLK_CTRL_EXP_BRD_CTRL 0x00F8
+#define PDP1_DATA_EN_MASK 0x00000003U
+#define PDP1_DATA_EN_SHIFT 0
+#define PDP1_DATA_EN_SIGNED 0
+
+#define PDP2_DATA_EN_MASK 0x00000030U
+#define PDP2_DATA_EN_SHIFT 4
+#define PDP2_DATA_EN_SIGNED 0
+
+#define EXP_BRD_OUTPUT_MASK 0xFFFFFF00U
+#define EXP_BRD_OUTPUT_SHIFT 8
+#define EXP_BRD_OUTPUT_SIGNED 0
+
+/*
+ Register HOSTIF_CONTROL
+*/
+#define TCF_CLK_CTRL_HOSTIF_CONTROL 0x0100
+#define HOSTIF_CTRL_MASK 0x000000FFU
+#define HOSTIF_CTRL_SHIFT 0
+#define HOSTIF_CTRL_SIGNED 0
+
+/*
+ Register DUT_CONTROL_1
+*/
+#define TCF_CLK_CTRL_DUT_CONTROL_1 0x0108
+#define DUT_CTRL_1_MASK 0xFFFFFFFFU
+#define DUT_CTRL_1_SHIFT 0
+#define DUT_CTRL_1_SIGNED 0
+
+/* TC ES2 additional needs those: */
+#define DUT_CTRL_TEST_MODE_SHIFT 0
+#define DUT_CTRL_TEST_MODE_MASK 0x3
+
+#define DUT_CTRL_VCC_0V9EN (1<<12)
+#define DUT_CTRL_VCC_1V8EN (1<<13)
+#define DUT_CTRL_VCC_IO_INH (1<<14)
+#define DUT_CTRL_VCC_CORE_INH (1<<15)
+
+/*
+ Register DUT_STATUS_1
+*/
+#define TCF_CLK_CTRL_DUT_STATUS_1 0x0110
+#define DUT_STATUS_1_MASK 0xFFFFFFFFU
+#define DUT_STATUS_1_SHIFT 0
+#define DUT_STATUS_1_SIGNED 0
+
+/*
+ Register DUT_CTRL_NOT_STAT_1
+*/
+#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_1 0x0118
+#define DUT_STAT_NOT_CTRL_1_MASK 0xFFFFFFFFU
+#define DUT_STAT_NOT_CTRL_1_SHIFT 0
+#define DUT_STAT_NOT_CTRL_1_SIGNED 0
+
+/*
+ Register DUT_CONTROL_2
+*/
+#define TCF_CLK_CTRL_DUT_CONTROL_2 0x0120
+#define DUT_CTRL_2_MASK 0xFFFFFFFFU
+#define DUT_CTRL_2_SHIFT 0
+#define DUT_CTRL_2_SIGNED 0
+
+/*
+ Register DUT_STATUS_2
+*/
+#define TCF_CLK_CTRL_DUT_STATUS_2 0x0128
+#define DUT_STATUS_2_MASK 0xFFFFFFFFU
+#define DUT_STATUS_2_SHIFT 0
+#define DUT_STATUS_2_SIGNED 0
+
+/*
+ Register DUT_CTRL_NOT_STAT_2
+*/
+#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_2 0x0130
+#define DUT_CTRL_NOT_STAT_2_MASK 0xFFFFFFFFU
+#define DUT_CTRL_NOT_STAT_2_SHIFT 0
+#define DUT_CTRL_NOT_STAT_2_SIGNED 0
+
+/*
+ Register BUS_CAP_BASE_ADDR
+*/
+#define TCF_CLK_CTRL_BUS_CAP_BASE_ADDR 0x0138
+#define BUS_CAP_BASE_ADDR_MASK 0xFFFFFFFFU
+#define BUS_CAP_BASE_ADDR_SHIFT 0
+#define BUS_CAP_BASE_ADDR_SIGNED 0
+
+/*
+ Register BUS_CAP_ENABLE
+*/
+#define TCF_CLK_CTRL_BUS_CAP_ENABLE 0x0140
+#define BUS_CAP_ENABLE_MASK 0x00000001U
+#define BUS_CAP_ENABLE_SHIFT 0
+#define BUS_CAP_ENABLE_SIGNED 0
+
+/*
+ Register BUS_CAP_COUNT
+*/
+#define TCF_CLK_CTRL_BUS_CAP_COUNT 0x0148
+#define BUS_CAP_COUNT_MASK 0xFFFFFFFFU
+#define BUS_CAP_COUNT_SHIFT 0
+#define BUS_CAP_COUNT_SIGNED 0
+
+/*
+ Register DCM_LOCK_STATUS
+*/
+#define TCF_CLK_CTRL_DCM_LOCK_STATUS 0x0150
+#define DCM_LOCK_STATUS_MASK 0x00000007U
+#define DCM_LOCK_STATUS_SHIFT 0
+#define DCM_LOCK_STATUS_SIGNED 0
+
+/*
+ Register AUX_DUT_RESETNS
+*/
+#define TCF_CLK_CTRL_AUX_DUT_RESETNS 0x0158
+#define AUX_DUT_RESETNS_MASK 0x0000000FU
+#define AUX_DUT_RESETNS_SHIFT 0
+#define AUX_DUT_RESETNS_SIGNED 0
+
+/*
+ Register TCF_SPI_MST_ADDR_RDNWR
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR 0x0160
+#define TCF_SPI_MST_ADDR_MASK 0x00000FFFU
+#define TCF_SPI_MST_ADDR_SHIFT 0
+#define TCF_SPI_MST_ADDR_SIGNED 0
+
+#define TCF_SPI_MST_RDNWR_MASK 0x00001000U
+#define TCF_SPI_MST_RDNWR_SHIFT 12
+#define TCF_SPI_MST_RDNWR_SIGNED 0
+
+#define TCF_SPI_MST_SLAVE_ID_MASK 0x00010000U
+#define TCF_SPI_MST_SLAVE_ID_SHIFT 16
+#define TCF_SPI_MST_SLAVE_ID_SIGNED 0
+
+/*
+ Register TCF_SPI_MST_WDATA
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_WDATA 0x0168
+#define TCF_SPI_MST_WDATA_MASK 0xFFFFFFFFU
+#define TCF_SPI_MST_WDATA_SHIFT 0
+#define TCF_SPI_MST_WDATA_SIGNED 0
+
+/*
+ Register TCF_SPI_MST_RDATA
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_RDATA 0x0170
+#define TCF_SPI_MST_RDATA_MASK 0xFFFFFFFFU
+#define TCF_SPI_MST_RDATA_SHIFT 0
+#define TCF_SPI_MST_RDATA_SIGNED 0
+
+/*
+ Register TCF_SPI_MST_STATUS
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_STATUS 0x0178
+#define TCF_SPI_MST_STATUS_MASK 0x0000000FU
+#define TCF_SPI_MST_STATUS_SHIFT 0
+#define TCF_SPI_MST_STATUS_SIGNED 0
+
+/*
+ Register TCF_SPI_MST_GO
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_GO 0x0180
+#define TCF_SPI_MST_GO_MASK 0x00000001U
+#define TCF_SPI_MST_GO_SHIFT 0
+#define TCF_SPI_MST_GO_SIGNED 0
+
+/*
+ Register EXT_SIG_CTRL
+*/
+#define TCF_CLK_CTRL_EXT_SIG_CTRL 0x0188
+#define EXT_SYS_REQ_SIG_START_MASK 0x00000001U
+#define EXT_SYS_REQ_SIG_START_SHIFT 0
+#define EXT_SYS_REQ_SIG_START_SIGNED 0
+
+#define EXT_SYS_RD_SIG_START_MASK 0x00000002U
+#define EXT_SYS_RD_SIG_START_SHIFT 1
+#define EXT_SYS_RD_SIG_START_SIGNED 0
+
+#define EXT_MEM_REQ_SIG_START_MASK 0x00000004U
+#define EXT_MEM_REQ_SIG_START_SHIFT 2
+#define EXT_MEM_REQ_SIG_START_SIGNED 0
+
+#define EXT_MEM_RD_SIG_START_MASK 0x00000008U
+#define EXT_MEM_RD_SIG_START_SHIFT 3
+#define EXT_MEM_RD_SIG_START_SIGNED 0
+
+/*
+ Register EXT_SYS_REQ_SIG
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_SIG 0x0190
+#define EXT_SYS_REQ_SIG_MASK 0xFFFFFFFFU
+#define EXT_SYS_REQ_SIG_SHIFT 0
+#define EXT_SYS_REQ_SIG_SIGNED 0
+
+/*
+ Register EXT_SYS_RD_SIG
+*/
+#define TCF_CLK_CTRL_EXT_SYS_RD_SIG 0x0198
+#define EXT_SYS_RD_SIG_MASK 0xFFFFFFFFU
+#define EXT_SYS_RD_SIG_SHIFT 0
+#define EXT_SYS_RD_SIG_SIGNED 0
+
+/*
+ Register EXT_MEM_REQ_SIG
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_SIG 0x01A0
+#define EXT_MEM_REQ_SIG_MASK 0xFFFFFFFFU
+#define EXT_MEM_REQ_SIG_SHIFT 0
+#define EXT_MEM_REQ_SIG_SIGNED 0
+
+/*
+ Register EXT_MEM_RD_SIG
+*/
+#define TCF_CLK_CTRL_EXT_MEM_RD_SIG 0x01A8
+#define EXT_MEM_RD_SIG_MASK 0xFFFFFFFFU
+#define EXT_MEM_RD_SIG_SHIFT 0
+#define EXT_MEM_RD_SIG_SIGNED 0
+
+/*
+ Register EXT_SYS_REQ_WR_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_WR_CNT 0x01B0
+#define EXT_SYS_REQ_WR_CNT_MASK 0xFFFFFFFFU
+#define EXT_SYS_REQ_WR_CNT_SHIFT 0
+#define EXT_SYS_REQ_WR_CNT_SIGNED 0
+
+/*
+ Register EXT_SYS_REQ_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_RD_CNT 0x01B8
+#define EXT_SYS_REQ_RD_CNT_MASK 0xFFFFFFFFU
+#define EXT_SYS_REQ_RD_CNT_SHIFT 0
+#define EXT_SYS_REQ_RD_CNT_SIGNED 0
+
+/*
+ Register EXT_SYS_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_RD_CNT 0x01C0
+#define EXT_SYS_RD_CNT_MASK 0xFFFFFFFFU
+#define EXT_SYS_RD_CNT_SHIFT 0
+#define EXT_SYS_RD_CNT_SIGNED 0
+
+/*
+ Register EXT_MEM_REQ_WR_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_WR_CNT 0x01C8
+#define EXT_MEM_REQ_WR_CNT_MASK 0xFFFFFFFFU
+#define EXT_MEM_REQ_WR_CNT_SHIFT 0
+#define EXT_MEM_REQ_WR_CNT_SIGNED 0
+
+/*
+ Register EXT_MEM_REQ_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_RD_CNT 0x01D0
+#define EXT_MEM_REQ_RD_CNT_MASK 0xFFFFFFFFU
+#define EXT_MEM_REQ_RD_CNT_SHIFT 0
+#define EXT_MEM_REQ_RD_CNT_SIGNED 0
+
+/*
+ Register EXT_MEM_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_RD_CNT 0x01D8
+#define EXT_MEM_RD_CNT_MASK 0xFFFFFFFFU
+#define EXT_MEM_RD_CNT_SHIFT 0
+#define EXT_MEM_RD_CNT_SIGNED 0
+
+/*
+ Register TCF_CORE_TARGET_BUILD_CFG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG 0x01E0
+#define TCF_CORE_TARGET_BUILD_ID_MASK 0x000000FFU
+#define TCF_CORE_TARGET_BUILD_ID_SHIFT 0
+#define TCF_CORE_TARGET_BUILD_ID_SIGNED 0
+
+/*
+ Register MEM_THROUGH_SYS
+*/
+#define TCF_CLK_CTRL_MEM_THROUGH_SYS 0x01E8
+#define MEM_THROUGH_SYS_MASK 0x00000001U
+#define MEM_THROUGH_SYS_SHIFT 0
+#define MEM_THROUGH_SYS_SIGNED 0
+
+/*
+ Register HOST_PHY_OFFSET
+*/
+#define TCF_CLK_CTRL_HOST_PHY_OFFSET 0x01F0
+#define HOST_PHY_OFFSET_MASK 0xFFFFFFFFU
+#define HOST_PHY_OFFSET_SHIFT 0
+#define HOST_PHY_OFFSET_SIGNED 0
+
+#endif /* !defined(_TCF_CLK_CTRL_H_) */
+
+/*****************************************************************************
+ End of file (tcf_clk_ctrl.h)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@Title Test Chip Framework PDP register definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Autogenerated C -- do not edit
+ Generated from tcf_pll.def
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_PLL_H_)
+#define _TCF_PLL_H_
+
+/*
+ Register PLL_DDR2_CLK0
+*/
+#define TCF_PLL_PLL_DDR2_CLK0 0x0000
+#define DDR2_PLL_CLK0_PHS_MASK 0x00300000U
+#define DDR2_PLL_CLK0_PHS_SHIFT 20
+#define DDR2_PLL_CLK0_PHS_SIGNED 0
+
+#define DDR2_PLL_CLK0_MS_MASK 0x00030000U
+#define DDR2_PLL_CLK0_MS_SHIFT 16
+#define DDR2_PLL_CLK0_MS_SIGNED 0
+
+#define DDR2_PLL_CLK0_FREQ_MASK 0x000001FFU
+#define DDR2_PLL_CLK0_FREQ_SHIFT 0
+#define DDR2_PLL_CLK0_FREQ_SIGNED 0
+
+/*
+ Register PLL_DDR2_CLK1TO5
+*/
+#define TCF_PLL_PLL_DDR2_CLK1TO5 0x0008
+#define DDR2_PLL_CLK1TO5_PHS_MASK 0x3FF00000U
+#define DDR2_PLL_CLK1TO5_PHS_SHIFT 20
+#define DDR2_PLL_CLK1TO5_PHS_SIGNED 0
+
+#define DDR2_PLL_CLK1TO5_MS_MASK 0x000FFC00U
+#define DDR2_PLL_CLK1TO5_MS_SHIFT 10
+#define DDR2_PLL_CLK1TO5_MS_SIGNED 0
+
+#define DDR2_PLL_CLK1TO5_FREQ_MASK 0x000003FFU
+#define DDR2_PLL_CLK1TO5_FREQ_SHIFT 0
+#define DDR2_PLL_CLK1TO5_FREQ_SIGNED 0
+
+/*
+ Register PLL_DDR2_DRP_GO
+*/
+#define TCF_PLL_PLL_DDR2_DRP_GO 0x0010
+#define PLL_DDR2_DRP_GO_MASK 0x00000001U
+#define PLL_DDR2_DRP_GO_SHIFT 0
+#define PLL_DDR2_DRP_GO_SIGNED 0
+
+/*
+ Register PLL_PDP_CLK0
+*/
+#define TCF_PLL_PLL_PDP_CLK0 0x0018
+#define PDP_PLL_CLK0_PHS_MASK 0x00300000U
+#define PDP_PLL_CLK0_PHS_SHIFT 20
+#define PDP_PLL_CLK0_PHS_SIGNED 0
+
+#define PDP_PLL_CLK0_MS_MASK 0x00030000U
+#define PDP_PLL_CLK0_MS_SHIFT 16
+#define PDP_PLL_CLK0_MS_SIGNED 0
+
+#define PDP_PLL_CLK0_FREQ_MASK 0x000001FFU
+#define PDP_PLL_CLK0_FREQ_SHIFT 0
+#define PDP_PLL_CLK0_FREQ_SIGNED 0
+
+/*
+ Register PLL_PDP_CLK1TO5
+*/
+#define TCF_PLL_PLL_PDP_CLK1TO5 0x0020
+#define PDP_PLL_CLK1TO5_PHS_MASK 0x3FF00000U
+#define PDP_PLL_CLK1TO5_PHS_SHIFT 20
+#define PDP_PLL_CLK1TO5_PHS_SIGNED 0
+
+#define PDP_PLL_CLK1TO5_MS_MASK 0x000FFC00U
+#define PDP_PLL_CLK1TO5_MS_SHIFT 10
+#define PDP_PLL_CLK1TO5_MS_SIGNED 0
+
+#define PDP_PLL_CLK1TO5_FREQ_MASK 0x000003FFU
+#define PDP_PLL_CLK1TO5_FREQ_SHIFT 0
+#define PDP_PLL_CLK1TO5_FREQ_SIGNED 0
+
+/*
+ Register PLL_PDP_DRP_GO
+*/
+#define TCF_PLL_PLL_PDP_DRP_GO 0x0028
+#define PLL_PDP_DRP_GO_MASK 0x00000001U
+#define PLL_PDP_DRP_GO_SHIFT 0
+#define PLL_PDP_DRP_GO_SIGNED 0
+
+/*
+ Register PLL_PDP2_CLK0
+*/
+#define TCF_PLL_PLL_PDP2_CLK0 0x0030
+#define PDP2_PLL_CLK0_PHS_MASK 0x00300000U
+#define PDP2_PLL_CLK0_PHS_SHIFT 20
+#define PDP2_PLL_CLK0_PHS_SIGNED 0
+
+#define PDP2_PLL_CLK0_MS_MASK 0x00030000U
+#define PDP2_PLL_CLK0_MS_SHIFT 16
+#define PDP2_PLL_CLK0_MS_SIGNED 0
+
+#define PDP2_PLL_CLK0_FREQ_MASK 0x000001FFU
+#define PDP2_PLL_CLK0_FREQ_SHIFT 0
+#define PDP2_PLL_CLK0_FREQ_SIGNED 0
+
+/*
+ Register PLL_PDP2_CLK1TO5
+*/
+#define TCF_PLL_PLL_PDP2_CLK1TO5 0x0038
+#define PDP2_PLL_CLK1TO5_PHS_MASK 0x3FF00000U
+#define PDP2_PLL_CLK1TO5_PHS_SHIFT 20
+#define PDP2_PLL_CLK1TO5_PHS_SIGNED 0
+
+#define PDP2_PLL_CLK1TO5_MS_MASK 0x000FFC00U
+#define PDP2_PLL_CLK1TO5_MS_SHIFT 10
+#define PDP2_PLL_CLK1TO5_MS_SIGNED 0
+
+#define PDP2_PLL_CLK1TO5_FREQ_MASK 0x000003FFU
+#define PDP2_PLL_CLK1TO5_FREQ_SHIFT 0
+#define PDP2_PLL_CLK1TO5_FREQ_SIGNED 0
+
+/*
+ Register PLL_PDP2_DRP_GO
+*/
+#define TCF_PLL_PLL_PDP2_DRP_GO 0x0040
+#define PLL_PDP2_DRP_GO_MASK 0x00000001U
+#define PLL_PDP2_DRP_GO_SHIFT 0
+#define PLL_PDP2_DRP_GO_SIGNED 0
+
+/*
+ Register PLL_CORE_CLK0
+*/
+#define TCF_PLL_PLL_CORE_CLK0 0x0048
+#define CORE_PLL_CLK0_PHS_MASK 0x00300000U
+#define CORE_PLL_CLK0_PHS_SHIFT 20
+#define CORE_PLL_CLK0_PHS_SIGNED 0
+
+#define CORE_PLL_CLK0_MS_MASK 0x00030000U
+#define CORE_PLL_CLK0_MS_SHIFT 16
+#define CORE_PLL_CLK0_MS_SIGNED 0
+
+#define CORE_PLL_CLK0_FREQ_MASK 0x000001FFU
+#define CORE_PLL_CLK0_FREQ_SHIFT 0
+#define CORE_PLL_CLK0_FREQ_SIGNED 0
+
+/*
+ Register PLL_CORE_CLK1TO5
+*/
+#define TCF_PLL_PLL_CORE_CLK1TO5 0x0050
+#define CORE_PLL_CLK1TO5_PHS_MASK 0x3FF00000U
+#define CORE_PLL_CLK1TO5_PHS_SHIFT 20
+#define CORE_PLL_CLK1TO5_PHS_SIGNED 0
+
+#define CORE_PLL_CLK1TO5_MS_MASK 0x000FFC00U
+#define CORE_PLL_CLK1TO5_MS_SHIFT 10
+#define CORE_PLL_CLK1TO5_MS_SIGNED 0
+
+#define CORE_PLL_CLK1TO5_FREQ_MASK 0x000003FFU
+#define CORE_PLL_CLK1TO5_FREQ_SHIFT 0
+#define CORE_PLL_CLK1TO5_FREQ_SIGNED 0
+
+/*
+ Register PLL_CORE_DRP_GO
+*/
+#define TCF_PLL_PLL_CORE_DRP_GO 0x0058
+#define PLL_CORE_DRP_GO_MASK 0x00000001U
+#define PLL_CORE_DRP_GO_SHIFT 0
+#define PLL_CORE_DRP_GO_SIGNED 0
+
+/*
+ Register PLL_SYSIF_CLK0
+*/
+#define TCF_PLL_PLL_SYSIF_CLK0 0x0060
+#define SYSIF_PLL_CLK0_PHS_MASK 0x00300000U
+#define SYSIF_PLL_CLK0_PHS_SHIFT 20
+#define SYSIF_PLL_CLK0_PHS_SIGNED 0
+
+#define SYSIF_PLL_CLK0_MS_MASK 0x00030000U
+#define SYSIF_PLL_CLK0_MS_SHIFT 16
+#define SYSIF_PLL_CLK0_MS_SIGNED 0
+
+#define SYSIF_PLL_CLK0_FREQ_MASK 0x000001FFU
+#define SYSIF_PLL_CLK0_FREQ_SHIFT 0
+#define SYSIF_PLL_CLK0_FREQ_SIGNED 0
+
+/*
+ Register PLL_SYSIF_CLK1TO5
+*/
+#define TCF_PLL_PLL_SYSIF_CLK1TO5 0x0068
+#define SYSIF_PLL_CLK1TO5_PHS_MASK 0x3FF00000U
+#define SYSIF_PLL_CLK1TO5_PHS_SHIFT 20
+#define SYSIF_PLL_CLK1TO5_PHS_SIGNED 0
+
+#define SYSIF_PLL_CLK1TO5_MS_MASK 0x000FFC00U
+#define SYSIF_PLL_CLK1TO5_MS_SHIFT 10
+#define SYSIF_PLL_CLK1TO5_MS_SIGNED 0
+
+#define SYSIF_PLL_CLK1TO5_FREQ_MASK 0x000003FFU
+#define SYSIF_PLL_CLK1TO5_FREQ_SHIFT 0
+#define SYSIF_PLL_CLK1TO5_FREQ_SIGNED 0
+
+/*
+ Register PLL_SYS_DRP_GO
+*/
+#define TCF_PLL_PLL_SYS_DRP_GO 0x0070
+#define PLL_SYS_DRP_GO_MASK 0x00000001U
+#define PLL_SYS_DRP_GO_SHIFT 0
+#define PLL_SYS_DRP_GO_SIGNED 0
+
+/*
+ Register PLL_MEMIF_CLK0
+*/
+#define TCF_PLL_PLL_MEMIF_CLK0 0x0078
+#define MEMIF_PLL_CLK0_PHS_MASK 0x00300000U
+#define MEMIF_PLL_CLK0_PHS_SHIFT 20
+#define MEMIF_PLL_CLK0_PHS_SIGNED 0
+
+#define MEMIF_PLL_CLK0_MS_MASK 0x00030000U
+#define MEMIF_PLL_CLK0_MS_SHIFT 16
+#define MEMIF_PLL_CLK0_MS_SIGNED 0
+
+#define MEMIF_PLL_CLK0_FREQ_MASK 0x000001FFU
+#define MEMIF_PLL_CLK0_FREQ_SHIFT 0
+#define MEMIF_PLL_CLK0_FREQ_SIGNED 0
+
+/*
+ Register PLL_MEMIF_CLK1TO5
+*/
+#define TCF_PLL_PLL_MEMIF_CLK1TO5 0x0080
+#define MEMIF_PLL_CLK1TO5_PHS_MASK 0x3FF00000U
+#define MEMIF_PLL_CLK1TO5_PHS_SHIFT 20
+#define MEMIF_PLL_CLK1TO5_PHS_SIGNED 0
+
+#define MEMIF_PLL_CLK1TO5_MS_MASK 0x000FFC00U
+#define MEMIF_PLL_CLK1TO5_MS_SHIFT 10
+#define MEMIF_PLL_CLK1TO5_MS_SIGNED 0
+
+#define MEMIF_PLL_CLK1TO5_FREQ_MASK 0x000003FFU
+#define MEMIF_PLL_CLK1TO5_FREQ_SHIFT 0
+#define MEMIF_PLL_CLK1TO5_FREQ_SIGNED 0
+
+/*
+ Register PLL_MEM_DRP_GO
+*/
+#define TCF_PLL_PLL_MEM_DRP_GO 0x0088
+#define PLL_MEM_DRP_GO_MASK 0x00000001U
+#define PLL_MEM_DRP_GO_SHIFT 0
+#define PLL_MEM_DRP_GO_SIGNED 0
+
+/*
+ Register PLL_ALL_DRP_GO
+*/
+#define TCF_PLL_PLL_ALL_DRP_GO 0x0090
+#define PLL_ALL_DRP_GO_MASK 0x00000001U
+#define PLL_ALL_DRP_GO_SHIFT 0
+#define PLL_ALL_DRP_GO_SIGNED 0
+
+/*
+ Register PLL_DRP_STATUS
+*/
+#define TCF_PLL_PLL_DRP_STATUS 0x0098
+#define PLL_LOCKS_MASK 0x00003F00U
+#define PLL_LOCKS_SHIFT 8
+#define PLL_LOCKS_SIGNED 0
+
+#define PLL_DRP_GOOD_MASK 0x0000003FU
+#define PLL_DRP_GOOD_SHIFT 0
+#define PLL_DRP_GOOD_SIGNED 0
+
+#endif /* !defined(_TCF_PLL_H_) */
+
+/*****************************************************************************
+ End of file (tcf_pll.h)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@Title Test Chip Framework PDP register definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Autogenerated C -- do not edit
+ Generated from: tcf_rgbpdp_regs.def
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_RGBPDP_REGS_H_)
+#define _TCF_RGBPDP_REGS_H_
+
+/*
+ Register PVR_TCF_RGBPDP_STR1SURF
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF 0x0000
+#define STR1HEIGHT_MASK 0x000007FFU
+#define STR1HEIGHT_SHIFT 0
+#define STR1HEIGHT_SIGNED 0
+
+#define STR1WIDTH_MASK 0x003FF800U
+#define STR1WIDTH_SHIFT 11
+#define STR1WIDTH_SIGNED 0
+
+#define STR1PIXFMT_MASK 0x0F000000U
+#define STR1PIXFMT_SHIFT 24
+#define STR1PIXFMT_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_STR1ADDRCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL 0x0004
+#define STR1BASE_MASK 0x03FFFFFFU
+#define STR1BASE_SHIFT 0
+#define STR1BASE_SIGNED 0
+
+#define STR1INTFIELD_MASK 0x40000000U
+#define STR1INTFIELD_SHIFT 30
+#define STR1INTFIELD_SIGNED 0
+
+#define STR1STREN_MASK 0x80000000U
+#define STR1STREN_SHIFT 31
+#define STR1STREN_SIGNED 0
+
+/*
+ Register PVR_PDP_STR1POSN
+*/
+#define TCF_RGBPDP_PVR_PDP_STR1POSN 0x0008
+#define STR1STRIDE_MASK 0x000003FFU
+#define STR1STRIDE_SHIFT 0
+#define STR1STRIDE_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_MEMCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_MEMCTRL 0x000C
+#define MEMREFRESH_MASK 0xC0000000U
+#define MEMREFRESH_SHIFT 30
+#define MEMREFRESH_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_STRCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL 0x0010
+#define BURSTLEN_GFX_MASK 0x000000FFU
+#define BURSTLEN_GFX_SHIFT 0
+#define BURSTLEN_GFX_SIGNED 0
+
+#define THRESHOLD_GFX_MASK 0x0000FF00U
+#define THRESHOLD_GFX_SHIFT 8
+#define THRESHOLD_GFX_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_SYNCCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL 0x0014
+#define HSDIS_MASK 0x00000001U
+#define HSDIS_SHIFT 0
+#define HSDIS_SIGNED 0
+
+#define HSPOL_MASK 0x00000002U
+#define HSPOL_SHIFT 1
+#define HSPOL_SIGNED 0
+
+#define VSDIS_MASK 0x00000004U
+#define VSDIS_SHIFT 2
+#define VSDIS_SIGNED 0
+
+#define VSPOL_MASK 0x00000008U
+#define VSPOL_SHIFT 3
+#define VSPOL_SIGNED 0
+
+#define BLNKDIS_MASK 0x00000010U
+#define BLNKDIS_SHIFT 4
+#define BLNKDIS_SIGNED 0
+
+#define BLNKPOL_MASK 0x00000020U
+#define BLNKPOL_SHIFT 5
+#define BLNKPOL_SIGNED 0
+
+#define HS_SLAVE_MASK 0x00000040U
+#define HS_SLAVE_SHIFT 6
+#define HS_SLAVE_SIGNED 0
+
+#define VS_SLAVE_MASK 0x00000080U
+#define VS_SLAVE_SHIFT 7
+#define VS_SLAVE_SIGNED 0
+
+#define INTERLACE_MASK 0x00000100U
+#define INTERLACE_SHIFT 8
+#define INTERLACE_SIGNED 0
+
+#define FIELDPOL_MASK 0x00000200U
+#define FIELDPOL_SHIFT 9
+#define FIELDPOL_SIGNED 0
+
+#define CLKPOL_MASK 0x00000800U
+#define CLKPOL_SHIFT 11
+#define CLKPOL_SIGNED 0
+
+#define CSYNC_EN_MASK 0x00001000U
+#define CSYNC_EN_SHIFT 12
+#define CSYNC_EN_SIGNED 0
+
+#define FIELD_EN_MASK 0x00002000U
+#define FIELD_EN_SHIFT 13
+#define FIELD_EN_SIGNED 0
+
+#define UPDWAIT_MASK 0x000F0000U
+#define UPDWAIT_SHIFT 16
+#define UPDWAIT_SIGNED 0
+
+#define UPDCTRL_MASK 0x01000000U
+#define UPDCTRL_SHIFT 24
+#define UPDCTRL_SIGNED 0
+
+#define UPDINTCTRL_MASK 0x02000000U
+#define UPDINTCTRL_SHIFT 25
+#define UPDINTCTRL_SIGNED 0
+
+#define UPDSYNCTRL_MASK 0x04000000U
+#define UPDSYNCTRL_SHIFT 26
+#define UPDSYNCTRL_SIGNED 0
+
+#define POWERDN_MASK 0x10000000U
+#define POWERDN_SHIFT 28
+#define POWERDN_SIGNED 0
+
+#define DISP_RST_MASK 0x20000000U
+#define DISP_RST_SHIFT 29
+#define DISP_RST_SIGNED 0
+
+#define SYNCACTIVE_MASK 0x80000000U
+#define SYNCACTIVE_SHIFT 31
+#define SYNCACTIVE_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_BORDCOL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL 0x0018
+#define BORDCOL_MASK 0x00FFFFFFU
+#define BORDCOL_SHIFT 0
+#define BORDCOL_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_UPDCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL 0x001C
+#define UPDFIELD_MASK 0x00000001U
+#define UPDFIELD_SHIFT 0
+#define UPDFIELD_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_HSYNC1
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1 0x0020
+#define HT_MASK 0x00000FFFU
+#define HT_SHIFT 0
+#define HT_SIGNED 0
+
+#define HBPS_MASK 0x0FFF0000U
+#define HBPS_SHIFT 16
+#define HBPS_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_HSYNC2
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2 0x0024
+#define HLBS_MASK 0x00000FFFU
+#define HLBS_SHIFT 0
+#define HLBS_SIGNED 0
+
+#define HAS_MASK 0x0FFF0000U
+#define HAS_SHIFT 16
+#define HAS_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_HSYNC3
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3 0x0028
+#define HRBS_MASK 0x00000FFFU
+#define HRBS_SHIFT 0
+#define HRBS_SIGNED 0
+
+#define HFPS_MASK 0x0FFF0000U
+#define HFPS_SHIFT 16
+#define HFPS_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_VSYNC1
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1 0x002C
+#define VT_MASK 0x00000FFFU
+#define VT_SHIFT 0
+#define VT_SIGNED 0
+
+#define VBPS_MASK 0x0FFF0000U
+#define VBPS_SHIFT 16
+#define VBPS_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_VSYNC2
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2 0x0030
+#define VTBS_MASK 0x00000FFFU
+#define VTBS_SHIFT 0
+#define VTBS_SIGNED 0
+
+#define VAS_MASK 0x0FFF0000U
+#define VAS_SHIFT 16
+#define VAS_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_VSYNC3
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3 0x0034
+#define VBBS_MASK 0x00000FFFU
+#define VBBS_SHIFT 0
+#define VBBS_SIGNED 0
+
+#define VFPS_MASK 0x0FFF0000U
+#define VFPS_SHIFT 16
+#define VFPS_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_HDECTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL 0x0038
+#define HDEF_MASK 0x00000FFFU
+#define HDEF_SHIFT 0
+#define HDEF_SIGNED 0
+
+#define HDES_MASK 0x0FFF0000U
+#define HDES_SHIFT 16
+#define HDES_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_VDECTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL 0x003C
+#define VDEF_MASK 0x00000FFFU
+#define VDEF_SHIFT 0
+#define VDEF_SIGNED 0
+
+#define VDES_MASK 0x0FFF0000U
+#define VDES_SHIFT 16
+#define VDES_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_VEVENT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT 0x0040
+#define VFETCH_MASK 0x00000FFFU
+#define VFETCH_SHIFT 0
+#define VFETCH_SIGNED 0
+
+#define VEVENT_MASK 0x0FFF0000U
+#define VEVENT_SHIFT 16
+#define VEVENT_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_OPMASK
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_OPMASK 0x0044
+#define MASKR_MASK 0x000000FFU
+#define MASKR_SHIFT 0
+#define MASKR_SIGNED 0
+
+#define MASKG_MASK 0x0000FF00U
+#define MASKG_SHIFT 8
+#define MASKG_SIGNED 0
+
+#define MASKB_MASK 0x00FF0000U
+#define MASKB_SHIFT 16
+#define MASKB_SIGNED 0
+
+#define BLANKLEVEL_MASK 0x40000000U
+#define BLANKLEVEL_SHIFT 30
+#define BLANKLEVEL_SIGNED 0
+
+#define MASKLEVEL_MASK 0x80000000U
+#define MASKLEVEL_SHIFT 31
+#define MASKLEVEL_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_INTSTAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT 0x0048
+#define INTS_HBLNK0_MASK 0x00000001U
+#define INTS_HBLNK0_SHIFT 0
+#define INTS_HBLNK0_SIGNED 0
+
+#define INTS_HBLNK1_MASK 0x00000002U
+#define INTS_HBLNK1_SHIFT 1
+#define INTS_HBLNK1_SIGNED 0
+
+#define INTS_VBLNK0_MASK 0x00000004U
+#define INTS_VBLNK0_SHIFT 2
+#define INTS_VBLNK0_SIGNED 0
+
+#define INTS_VBLNK1_MASK 0x00000008U
+#define INTS_VBLNK1_SHIFT 3
+#define INTS_VBLNK1_SIGNED 0
+
+#define INTS_STR1URUN_MASK 0x00000010U
+#define INTS_STR1URUN_SHIFT 4
+#define INTS_STR1URUN_SIGNED 0
+
+#define INTS_STR1ORUN_MASK 0x00000020U
+#define INTS_STR1ORUN_SHIFT 5
+#define INTS_STR1ORUN_SIGNED 0
+
+#define INTS_DISPURUN_MASK 0x00000040U
+#define INTS_DISPURUN_SHIFT 6
+#define INTS_DISPURUN_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_INTENAB
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB 0x004C
+#define INTEN_HBLNK0_MASK 0x00000001U
+#define INTEN_HBLNK0_SHIFT 0
+#define INTEN_HBLNK0_SIGNED 0
+
+#define INTEN_HBLNK1_MASK 0x00000002U
+#define INTEN_HBLNK1_SHIFT 1
+#define INTEN_HBLNK1_SIGNED 0
+
+#define INTEN_VBLNK0_MASK 0x00000004U
+#define INTEN_VBLNK0_SHIFT 2
+#define INTEN_VBLNK0_SIGNED 0
+
+#define INTEN_VBLNK1_MASK 0x00000008U
+#define INTEN_VBLNK1_SHIFT 3
+#define INTEN_VBLNK1_SIGNED 0
+
+#define INTEN_STR1URUN_MASK 0x00000010U
+#define INTEN_STR1URUN_SHIFT 4
+#define INTEN_STR1URUN_SIGNED 0
+
+#define INTEN_STR1ORUN_MASK 0x00000020U
+#define INTEN_STR1ORUN_SHIFT 5
+#define INTEN_STR1ORUN_SIGNED 0
+
+#define INTEN_DISPURUN_MASK 0x00000040U
+#define INTEN_DISPURUN_SHIFT 6
+#define INTEN_DISPURUN_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_INTCLEAR
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR 0x0050
+#define INTCLR_HBLNK0_MASK 0x00000001U
+#define INTCLR_HBLNK0_SHIFT 0
+#define INTCLR_HBLNK0_SIGNED 0
+
+#define INTCLR_HBLNK1_MASK 0x00000002U
+#define INTCLR_HBLNK1_SHIFT 1
+#define INTCLR_HBLNK1_SIGNED 0
+
+#define INTCLR_VBLNK0_MASK 0x00000004U
+#define INTCLR_VBLNK0_SHIFT 2
+#define INTCLR_VBLNK0_SIGNED 0
+
+#define INTCLR_VBLNK1_MASK 0x00000008U
+#define INTCLR_VBLNK1_SHIFT 3
+#define INTCLR_VBLNK1_SIGNED 0
+
+#define INTCLR_STR1URUN_MASK 0x00000010U
+#define INTCLR_STR1URUN_SHIFT 4
+#define INTCLR_STR1URUN_SIGNED 0
+
+#define INTCLR_STR1ORUN_MASK 0x00000020U
+#define INTCLR_STR1ORUN_SHIFT 5
+#define INTCLR_STR1ORUN_SIGNED 0
+
+#define INTCLR_DISPURUN_MASK 0x00000040U
+#define INTCLR_DISPURUN_SHIFT 6
+#define INTCLR_DISPURUN_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_INTCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCTRL 0x0054
+#define HBLNK_LINENO_MASK 0x00000FFFU
+#define HBLNK_LINENO_SHIFT 0
+#define HBLNK_LINENO_SIGNED 0
+
+#define HBLNK_LINE_MASK 0x00010000U
+#define HBLNK_LINE_SHIFT 16
+#define HBLNK_LINE_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_SIGNAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_SIGNAT 0x0058
+#define SIGNATURE_MASK 0xFFFFFFFFU
+#define SIGNATURE_SHIFT 0
+#define SIGNATURE_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_LINESTAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_LINESTAT 0x005C
+#define LINENO_MASK 0x00000FFFU
+#define LINENO_SHIFT 0
+#define LINENO_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_DBGCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGCTRL 0x0060
+#define DBG_ENAB_MASK 0x00000001U
+#define DBG_ENAB_SHIFT 0
+#define DBG_ENAB_SIGNED 0
+
+#define DBG_READ_MASK 0x00000002U
+#define DBG_READ_SHIFT 1
+#define DBG_READ_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_DBGDATA
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGDATA 0x0064
+#define DBG_DATA_MASK 0x00FFFFFFU
+#define DBG_DATA_SHIFT 0
+#define DBG_DATA_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_DBGSIDE
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGSIDE 0x0068
+#define DBG_SIDE_MASK 0x00000007U
+#define DBG_SIDE_SHIFT 0
+#define DBG_SIDE_SIGNED 0
+
+#define DBG_VAL_MASK 0x00000008U
+#define DBG_VAL_SHIFT 3
+#define DBG_VAL_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_REGLD_STAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_STAT 0x0070
+#define REGLD_ADDROUT_MASK 0x00FFFFFFU
+#define REGLD_ADDROUT_SHIFT 0
+#define REGLD_ADDROUT_SIGNED 0
+
+#define REGLD_ADDREN_MASK 0x80000000U
+#define REGLD_ADDREN_SHIFT 31
+#define REGLD_ADDREN_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_REGLD_CTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_CTRL 0x0074
+#define REGLD_ADDRIN_MASK 0x00FFFFFFU
+#define REGLD_ADDRIN_SHIFT 0
+#define REGLD_ADDRIN_SIGNED 0
+
+#define REGLD_VAL_MASK 0x01000000U
+#define REGLD_VAL_SHIFT 24
+#define REGLD_VAL_SIGNED 0
+
+#define REGLD_ADDRLEN_MASK 0xFE000000U
+#define REGLD_ADDRLEN_SHIFT 25
+#define REGLD_ADDRLEN_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_CORE_ID
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_ID 0x0078
+#define CONFIG_ID_MASK 0x0000FFFFU
+#define CONFIG_ID_SHIFT 0
+#define CONFIG_ID_SIGNED 0
+
+#define CORE_ID_MASK 0x00FF0000U
+#define CORE_ID_SHIFT 16
+#define CORE_ID_SIGNED 0
+
+#define GROUP_ID_MASK 0xFF000000U
+#define GROUP_ID_SHIFT 24
+#define GROUP_ID_SIGNED 0
+
+/*
+ Register PVR_TCF_RGBPDP_CORE_REV
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_REV 0x007C
+#define MAINT_REV_MASK 0x000000FFU
+#define MAINT_REV_SHIFT 0
+#define MAINT_REV_SIGNED 0
+
+#define MINOR_REV_MASK 0x0000FF00U
+#define MINOR_REV_SHIFT 8
+#define MINOR_REV_SIGNED 0
+
+#define MAJOR_REV_MASK 0x00FF0000U
+#define MAJOR_REV_SHIFT 16
+#define MAJOR_REV_SIGNED 0
+
+#endif /* !defined(_TCF_RGBPDP_REGS_H_) */
+
+/*****************************************************************************
+ End of file (tcf_rgbpdp_regs.h)
+*****************************************************************************/
--- /dev/null
+#define RGX_FW_HEAP_SHIFT 25
+#define RGX_FW_FILENAME "rgx.fw.signed"
+#define LINUX
+#define PVR_BUILD_DIR "rk3368_android"
+#define PVR_BUILD_TYPE "release"
+#define PVRSRV_MODNAME "pvrsrvkm"
+#define SUPPORT_RGX 1
+#define DMABUF_IMPORT_PHYSHEAP_ID 0
+#define RELEASE
+#define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_5.9.1.46.h"
+#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_5.V.1.46.h"
+#define SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION
+#define SUPPORT_DBGDRV_EVENT_OBJECTS
+#define PDUMP_STREAMBUF_MAX_SIZE_MB 10
+#define PVRSRV_NEED_PVR_STACKTRACE
+//#define SUPPORT_GPUTRACE_EVENTS
+#define GPUVIRT_VALIDATION_NUM_OS 8
+#define PVRSRV_GPUVIRT_NUM_OSID 2
+#define SUPPORT_VDM_CONTEXT_STORE_BUFFER_AB
+#define PVR_DVFS
+#define SUPPORT_LINUX_X86_WRITECOMBINE
+#define SUPPORT_LINUX_X86_PAT
+#define PVR_LINUX_USING_WORKQUEUES
+#define PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE
+#define PVR_LINUX_TIMERS_USING_WORKQUEUES
+#define PVR_LDM_PLATFORM_PRE_REGISTERED
+#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm"
+#define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256
+#define SUPPORT_MMU_PENDING_FAULT_PROTECTION
+#define PVR_DUMMY_PAGE_INIT_VALUE 0x00
+#define PVRSRV_UNMAP_ON_SPARSE_CHANGE
+#define SUPPORT_PERCONTEXT_FREELIST
+#define HWR_DEFAULT_ENABLED
+#define PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT APPHNT_BLDVAR_DBGDUMPLIMIT
+#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE
+#define PVRSRV_APPHINT_HTBUFFERSIZE 0x1000
+#define PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE 0x4000
+#define PVRSRV_APPHINT_ENABLESIGNATURECHECKS APPHNT_BLDVAR_ENABLESIGNATURECHECKS
+#define PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE RGXFW_SIG_BUFFER_SIZE_MIN
+#define PVRSRV_APPHINT_DISABLECLOCKGATING 0
+#define PVRSRV_APPHINT_DISABLEDMOVERLAP 0
+#define PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE 0
+#define PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH RGXFWIF_INICFG_CTXSWITCH_DM_ALL
+#define PVRSRV_APPHINT_ENABLERDPOWERISLAND RGX_RD_POWER_ISLAND_DEFAULT
+#define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE
+#define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN
+#define PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER 0
+#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB RGXFW_HWPERF_L1_SIZE_DEFAULT
+#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB HWPERF_HOST_TL_STREAM_SIZE_DEFAULT
+#define PVRSRV_APPHINT_JONESDISABLEMASK 0
+#define PVRSRV_APPHINT_NEWFILTERINGMODE 1
+#define PVRSRV_APPHINT_TRUNCATEMODE 0
+#define PVRSRV_APPHINT_USEMETAT1 RGX_META_T1_OFF
+#define PVRSRV_APPHINT_RGXBVNC ""
+#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE
+#define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 0
+#define PVRSRV_APPHINT_CLEANUPTHREADWEIGHT 0
+#define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0
+#define PVRSRV_APPHINT_WATCHDOGTHREADWEIGHT 0
+#define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE
+#define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE
+#define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG
+#define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE
+#define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT
+#define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0
+#define PVRSRV_APPHINT_ENABLELOGGROUP 0
+#define PVRSRV_APPHINT_FIRMWARELOGTYPE 0
+#define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPLATEST
+#define PVRSRV_APPHINT_HWPERFFWFILTER 0
+#define PVRSRV_APPHINT_HWPERFHOSTFILTER 0
+#define PVRSRV_APPHINT_TIMECORRCLOCK 2
+#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE
+#define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD
+#define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE
+#define PVRSRV_APPHINT_DUSTREQUESTINJECT IMG_FALSE
+#define PVRSRV_APPHINT_DISABLEPDUMPPANIC IMG_FALSE
+#define PVRSRV_ENABLE_PROCESS_STATS
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO 1 Calculate high watermarks of all the client CCBs and print a warning if the_ watermarks touched a certain threshold value (90% by default) of the cCCB allocation size._
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD 90
+#define PVRSRV_ENABLE_MEMTRACK_STATS_FILE
+#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240
+#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 32768
+#define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 1048576
+#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256
+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2
+#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384
+#define SUPPORT_KERNEL_SRVINIT
+#define SUPPORT_NATIVE_FENCE_SYNC
+#define PVR_DRM_NAME "pvr"
+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 16
+#define ANDROID
+#define SUPPORT_ION
+#define PVR_ANDROID_ION_HEADER "../drivers/staging/android/ion/ion.h"
+#define PVR_ANDROID_ION_PRIV_HEADER "../drivers/staging/android/ion/ion_priv.h"
+#define PVR_ANDROID_ION_USE_SG_LENGTH
+#define PVR_ANDROID_SYNC_HEADER "../drivers/staging/android/sync.h"
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File debugfs_dma_buf.c
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "debugfs_dma_buf.h"
+
+#if defined(DEBUGFS_DMA_BUF) && defined(CONFIG_DEBUG_FS)
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+
+#include "kernel_compatibility.h"
+
+static struct dentry *g_debugfs_dentry;
+static struct dma_buf *g_dma_buf;
+
+static ssize_t read_file_dma_buf(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ size_t istart = *ppos / PAGE_SIZE, iend, i = istart;
+ ssize_t wb = 0, res = 0;
+ struct dma_buf *dma_buf = g_dma_buf;
+ int err;
+
+ if (!dma_buf)
+ goto err_out;
+
+ /* Inc the ref count for the time we use it in this function. */
+ get_dma_buf(dma_buf);
+
+ /* End of buffer? */
+ if (*ppos >= dma_buf->size)
+ goto err_put;
+
+ /* Calculate the number of pages we need to process based on the
+ * remaining dma buffer size or the available um buffer size. */
+ iend = istart + min((size_t)(dma_buf->size - *ppos), count) / PAGE_SIZE;
+
+ res = dma_buf_begin_cpu_access(dma_buf, DMA_FROM_DEVICE);
+ if (res)
+ goto err_put;
+
+ /* dma_buf_kmap only allows mapping one page, so we have to loop until
+ * the um buffer is full. */
+ while (i < iend) {
+ loff_t dummy = 0; /* We ignore that */
+ void *map = dma_buf_kmap(dma_buf, i);
+
+ if (!map) {
+ res = -EFAULT;
+ goto err_access;
+ }
+ /* Read PAGE_SIZE or the remaining buffer size worth of
+ * data. Whichever is smaller. */
+ res = simple_read_from_buffer(&user_buf[wb], count - wb,
+ &dummy, map,
+ min((size_t)PAGE_SIZE,
+ (size_t)(dma_buf->size - *ppos)));
+ dma_buf_kunmap(dma_buf, i, map);
+ if (res < 0)
+ goto err_access;
+ wb += res;
+ *ppos += res;
+ ++i;
+ }
+ res = wb;
+
+err_access:
+ do {
+ err = dma_buf_end_cpu_access(dma_buf, DMA_FROM_DEVICE);
+ } while (err == -EAGAIN || err == -EINTR);
+err_put:
+ dma_buf_put(dma_buf);
+err_out:
+ return res;
+}
+
+static const struct file_operations fops_dma_buf = {
+ .open = simple_open,
+ .read = read_file_dma_buf,
+ .llseek = default_llseek,
+};
+
+int debugfs_dma_buf_init(const char *name)
+{
+ int err = 0;
+
+ g_debugfs_dentry = debugfs_create_file(name, S_IRUSR, NULL,
+ NULL, &fops_dma_buf);
+ if (IS_ERR(g_debugfs_dentry)) {
+ err = PTR_ERR(g_debugfs_dentry);
+ g_debugfs_dentry = NULL;
+ goto err_out;
+ }
+
+err_out:
+ return err;
+}
+
+void debugfs_dma_buf_deinit(void)
+{
+ debugfs_remove(g_debugfs_dentry);
+}
+
+void debugfs_dma_buf_set(struct dma_buf *dma_buf)
+{
+ struct dma_buf *old_dma_buf = g_dma_buf;
+
+ if (dma_buf)
+ get_dma_buf(dma_buf);
+
+ g_dma_buf = dma_buf;
+
+ if (old_dma_buf)
+ dma_buf_put(old_dma_buf);
+}
+
+#else /* defined(DEBUGFS_DMA_BUF) && defined(CONFIG_DEBUG_FS) */
+
+int debugfs_dma_buf_init(const char *name)
+{
+ return 0;
+}
+
+void debugfs_dma_buf_deinit(void)
+{
+}
+
+void debugfs_dma_buf_set(struct dma_buf *dma_buf)
+{
+}
+
+#endif /* defined(DEBUGFS_DMA_BUF) && defined(CONFIG_DEBUG_FS) */
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File debugfs_dma_buf.h
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEBUGFS_DMA_BUF_H
+#define _DEBUGFS_DMA_BUF_H
+
+/* This creates a debugfs file for reading out the current content of the dma
+ * buffer provided by the set function. Please note that you can get tearing in
+ * the final dumps if the content is quickly changing. */
+
+/* Uncomment the following line to enable */
+/*#define DEBUGFS_DMA_BUF 1*/
+
+#include <linux/dma-buf.h>
+
+int debugfs_dma_buf_init(const char *name);
+void debugfs_dma_buf_deinit(void);
+void debugfs_dma_buf_set(struct dma_buf *dma_buf);
+
+#endif /* _DEBUGFS_DMA_BUF_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title 3D types for use by IMG APIs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _POWERVR_BUFFER_ATTRIBS_H_
+#define _POWERVR_BUFFER_ATTRIBS_H_
+
+/**
+ * Memory layouts
+ * Defines how pixels are laid out within a surface.
+ */
+typedef enum
+{
+ IMG_MEMLAYOUT_STRIDED, /**< Resource is strided, one row at a time */
+ IMG_MEMLAYOUT_TWIDDLED, /**< Resource is 2D twiddled, classic style */
+ IMG_MEMLAYOUT_3DTWIDDLED, /**< Resource is 3D twiddled, classic style */
+ IMG_MEMLAYOUT_TILED, /**< Resource is tiled, tiling config specified elsewhere. */
+ IMG_MEMLAYOUT_PAGETILED, /**< Resource is pagetiled */
+} IMG_MEMLAYOUT;
+
+/**
+ * Rotation types
+ */
+typedef enum
+{
+ IMG_ROTATION_0DEG = 0,
+ IMG_ROTATION_90DEG = 1,
+ IMG_ROTATION_180DEG = 2,
+ IMG_ROTATION_270DEG = 3,
+ IMG_ROTATION_FLIP_Y = 4,
+
+ IMG_ROTATION_BAD = 255,
+} IMG_ROTATION;
+
+/**
+ * Alpha types.
+ */
+typedef enum
+{
+ IMG_COLOURSPACE_FORMAT_UNKNOWN = 0x00000000, /**< Colourspace Format: Unknown */
+ IMG_COLOURSPACE_FORMAT_LINEAR = 0x00010000, /**< Colourspace Format: Linear */
+ IMG_COLOURSPACE_FORMAT_NONLINEAR = 0x00020000, /**< Colourspace Format: Non-Linear */
+ IMG_COLOURSPACE_FORMAT_MASK = 0x000F0000, /**< Colourspace Format Mask */
+} IMG_COLOURSPACE_FORMAT;
+
+/**
+ * Types of framebuffer compression
+ */
+typedef enum
+{
+ IMG_FB_COMPRESSION_NONE,
+ IMG_FB_COMPRESSION_DIRECT_8x8,
+ IMG_FB_COMPRESSION_DIRECT_16x4,
+ IMG_FB_COMPRESSION_DIRECT_32x2,
+ IMG_FB_COMPRESSION_INDIRECT_8x8,
+ IMG_FB_COMPRESSION_INDIRECT_16x4,
+ IMG_FB_COMPRESSION_INDIRECT_4TILE_8x8,
+ IMG_FB_COMPRESSION_INDIRECT_4TILE_16x4
+} IMG_FB_COMPRESSION;
+
+
+#endif /* _POWERVR_BUFFER_ATTRIBS_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Public types
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _POWERVR_TYPES_H_
+#define _POWERVR_TYPES_H_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(_MSC_VER)
+ #include "msvc_types.h"
+#elif defined(LINUX) && defined(__KERNEL__)
+ #include <linux/types.h>
+#else
+ #include <stdint.h>
+#endif
+
+typedef void *IMG_CPU_VIRTADDR;
+
+/* device virtual address */
+typedef struct _IMG_DEV_VIRTADDR
+{
+ uint64_t uiAddr;
+#define IMG_CAST_TO_DEVVADDR_UINT(var) (uint64_t)(var)
+
+} IMG_DEV_VIRTADDR;
+
+typedef uint64_t IMG_DEVMEM_SIZE_T;
+typedef uint64_t IMG_DEVMEM_ALIGN_T;
+typedef uint64_t IMG_DEVMEM_OFFSET_T;
+typedef uint32_t IMG_DEVMEM_LOG2ALIGN_T;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services external synchronisation interface header
+@Description Defines synchronisation structures that are visible internally
+ and externally
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_EXTERNAL_
+#define _SYNC_EXTERNAL_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include <powervr/mem_types.h>
+
+/*!
+ * Maximum byte length for a sync prim name
+ */
+#define SYNC_MAX_CLASS_NAME_LEN 32
+
+/*!
+ * Number of sync primitives in operations
+ */
+#define PVRSRV_MAX_SYNC_PRIMS 32
+
+typedef void* PVRSRV_CLIENT_SYNC_PRIM_HANDLE;
+typedef void* SYNC_BRIDGE_HANDLE;
+typedef struct SYNC_PRIM_CONTEXT *PSYNC_PRIM_CONTEXT;
+typedef struct _SYNC_OP_COOKIE_ *PSYNC_OP_COOKIE;
+
+/*!
+ * Client sync prim definition holding a CPU accessible address
+ *
+ * Structure: #PVRSRV_CLIENT_SYNC_PRIM
+ * Typedef: ::PVRSRV_CLIENT_SYNC_PRIM
+ */
+typedef struct PVRSRV_CLIENT_SYNC_PRIM
+{
+ volatile uint32_t *pui32LinAddr; /*!< User pointer to the primitive */
+} PVRSRV_CLIENT_SYNC_PRIM;
+
+/*!
+ * Bundled information for a sync prim operation
+ *
+ * Structure: #PVRSRV_CLIENT_SYNC_PRIM_OP
+ * Typedef: ::PVRSRV_CLIENT_SYNC_PRIM_OP
+ */
+typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP
+{
+ #define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK (1 << 0)
+ #define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE (1 << 1)
+ #define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1<<2))
+ uint32_t ui32Flags; /*!< Operation flags: PVRSRV_CLIENT_SYNC_PRIM_OP_XXX */
+ PVRSRV_CLIENT_SYNC_PRIM *psSync; /*!< Pointer to the client sync primitive */
+ uint32_t ui32FenceValue; /*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */
+ uint32_t ui32UpdateValue; /*!< The Update value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE is set) */
+} PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* _SYNC_EXTERNAL_ */
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PowerVR DRM driver
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <drm/drm.h>
+#include <drm/drmP.h> /* include before drm_crtc.h for kernels older than 3.9 */
+#include <drm/drm_crtc.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/version.h>
+
+#include "module_common.h"
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+#include "pvrversion.h"
+#include "services_kernel_client.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+#define DRIVER_RENDER 0
+#define DRM_RENDER_ALLOW 0
+#endif
+
+#define PVR_DRM_DRIVER_NAME PVR_DRM_NAME
+#define PVR_DRM_DRIVER_DESC "Imagination Technologies PVR DRM"
+#define PVR_DRM_DRIVER_DATE "20110701"
+
+
+static int pvr_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ DRM_DEBUG_DRIVER("device %p\n", dev);
+
+ return PVRSRVCommonDeviceSuspend(ddev->dev_private);
+}
+
+static int pvr_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ DRM_DEBUG_DRIVER("device %p\n", dev);
+
+ return PVRSRVCommonDeviceResume(ddev->dev_private);
+}
+
+const struct dev_pm_ops pvr_pm_ops = {
+ .suspend = pvr_pm_suspend,
+ .resume = pvr_pm_resume,
+};
+
+
+static int pvr_drm_load(struct drm_device *ddev, unsigned long flags)
+{
+ struct _PVRSRV_DEVICE_NODE_ *dev_node;
+ enum PVRSRV_ERROR srv_err;
+ int err;
+
+ DRM_DEBUG_DRIVER("device %p\n", ddev->dev);
+
+ /*
+ * The equivalent is done for PCI modesetting drivers by
+ * drm_get_pci_dev()
+ */
+ if (ddev->platformdev)
+ platform_set_drvdata(ddev->platformdev, ddev);
+
+ srv_err = PVRSRVDeviceCreate(ddev->dev, &dev_node);
+ if (srv_err != PVRSRV_OK) {
+ DRM_ERROR("failed to create device node for device %p (%s)\n",
+ ddev->dev, PVRSRVGetErrorStringKM(srv_err));
+ if (srv_err == PVRSRV_ERROR_PROBE_DEFER)
+ err = -EPROBE_DEFER;
+ else
+ err = -ENODEV;
+ goto err_exit;
+ }
+
+ err = PVRSRVCommonDeviceInit(dev_node);
+ if (err) {
+ DRM_ERROR("device %p initialisation failed (err=%d)\n",
+ ddev->dev, err);
+ goto err_device_destroy;
+ }
+
+ drm_mode_config_init(ddev);
+ ddev->dev_private = dev_node;
+
+ return 0;
+
+err_device_destroy:
+ PVRSRVDeviceDestroy(dev_node);
+err_exit:
+ return err;
+}
+
+static int pvr_drm_unload(struct drm_device *ddev)
+{
+ DRM_DEBUG_DRIVER("device %p\n", ddev->dev);
+
+ PVRSRVCommonDeviceDeinit(ddev->dev_private);
+
+ PVRSRVDeviceDestroy(ddev->dev_private);
+ ddev->dev_private = NULL;
+
+ return 0;
+}
+
+static int pvr_drm_open(struct drm_device *ddev, struct drm_file *dfile)
+{
+ int err;
+
+ if (!try_module_get(THIS_MODULE)) {
+ DRM_ERROR("failed to get module reference\n");
+ return -ENOENT;
+ }
+
+ err = PVRSRVCommonDeviceOpen(ddev->dev_private, dfile);
+ if (err)
+ module_put(THIS_MODULE);
+
+ return err;
+}
+
+static void pvr_drm_release(struct drm_device *ddev, struct drm_file *dfile)
+{
+ PVRSRVCommonDeviceRelease(ddev->dev_private, dfile);
+
+ module_put(THIS_MODULE);
+}
+
+/*
+ * The DRM global lock is taken for ioctls unless the DRM_UNLOCKED flag is set.
+ * If you revise one of the driver specific ioctls, or add a new one, that has
+ * DRM_UNLOCKED set then consider whether the gPVRSRVLock mutex needs to be
+ * taken.
+ */
+static struct drm_ioctl_desc pvr_drm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(PVR_SRVKM_CMD, PVRSRV_BridgeDispatchKM, DRM_RENDER_ALLOW | DRM_UNLOCKED),
+#if defined(PDUMP)
+ DRM_IOCTL_DEF_DRV(PVR_DBGDRV_CMD, dbgdrv_ioctl, DRM_RENDER_ALLOW | DRM_AUTH | DRM_UNLOCKED),
+#endif
+};
+
+#if defined(CONFIG_COMPAT)
+#if defined(PDUMP)
+static drm_ioctl_compat_t *pvr_drm_compat_ioctls[] = {
+ [DRM_PVR_DBGDRV_CMD] = dbgdrv_ioctl_compat,
+};
+#endif
+
+static long pvr_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(file, cmd, arg);
+
+#if defined(PDUMP)
+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(pvr_drm_compat_ioctls)) {
+ drm_ioctl_compat_t *pfnBridge;
+
+ pfnBridge = pvr_drm_compat_ioctls[nr - DRM_COMMAND_BASE];
+ if (pfnBridge)
+ return pfnBridge(file, cmd, arg);
+ }
+#endif
+
+ return drm_ioctl(file, cmd, arg);
+}
+#endif /* defined(CONFIG_COMPAT) */
+
+static const struct file_operations pvr_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ /*
+ * FIXME:
+ * Wrap this in a function that checks enough data has been
+ * supplied with the ioctl (e.g. _IOCDIR(nr) != _IOC_NONE &&
+ * _IOC_SIZE(nr) == size).
+ */
+ .unlocked_ioctl = drm_ioctl,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = pvr_compat_ioctl,
+#endif
+ .mmap = PVRSRV_MMap,
+ .poll = drm_poll,
+ .read = drm_read,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+ .fasync = drm_fasync,
+#endif
+};
+
+const struct drm_driver pvr_drm_generic_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_RENDER,
+
+ .dev_priv_size = 0,
+ .load = pvr_drm_load,
+ .unload = pvr_drm_unload,
+ .open = pvr_drm_open,
+ .postclose = pvr_drm_release,
+
+ .ioctls = pvr_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(pvr_drm_ioctls),
+ .fops = &pvr_drm_fops,
+
+ .name = PVR_DRM_DRIVER_NAME,
+ .desc = PVR_DRM_DRIVER_DESC,
+ .date = PVR_DRM_DRIVER_DATE,
+ .major = PVRVERSION_MAJ,
+ .minor = PVRVERSION_MIN,
+ .patchlevel = PVRVERSION_BUILD,
+};
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PowerVR DRM driver
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRV_H__)
+#define __PVR_DRV_H__
+
+#include <drm/drmP.h>
+#include <linux/pm.h>
+
+struct file;
+struct vm_area_struct;
+
+extern const struct dev_pm_ops pvr_pm_ops;
+extern const struct drm_driver pvr_drm_generic_driver;
+
+#if defined(PDUMP)
+int dbgdrv_init(void);
+void dbgdrv_cleanup(void);
+int dbgdrv_ioctl(struct drm_device *dev, void *arg, struct drm_file *file);
+int dbgdrv_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg);
+#endif
+
+int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg,
+ struct drm_file *file);
+int PVRSRV_MMap(struct file *file, struct vm_area_struct *ps_vma);
+
+#endif /* !defined(__PVR_DRV_H__) */
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PowerVR DRM platform driver
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+
+#include "module_common.h"
+#include "pvr_drv.h"
+#include "pvrmodule.h"
+#include "sysinfo.h"
+#include "pvrversion.h" //add by zxl
+
+#if defined(CONFIG_OF)
+#include <linux/of.h>
+#include <linux/of_device.h>
+#endif
+
+struct platform_device *gpsPVRLDMDev=NULL;
+
+static struct drm_driver pvr_drm_platform_driver;
+
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+/*
+ * This is an arbitrary value. If it's changed then the 'num_devices' module
+ * parameter description should also be updated to match.
+ */
+#define MAX_DEVICES 16
+
+static unsigned int pvr_num_devices = 1;
+static struct platform_device **pvr_devices;
+
+#if defined(NO_HARDWARE)
+static int pvr_num_devices_set(const char *val,
+ const struct kernel_param *param)
+{
+ int err;
+
+ err = param_set_uint(val, param);
+ if (err)
+ return err;
+
+ if (pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct kernel_param_ops pvr_num_devices_ops = {
+ .set = pvr_num_devices_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(num_devices, &pvr_num_devices_ops, &pvr_num_devices,
+ S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(num_devices,
+ "Number of platform devices to register (default: 1 - max: 16)");
+#endif /* defined(NO_HARDWARE) */
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+
+static int pvr_devices_register(void)
+{
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+ struct platform_device_info pvr_dev_info = {
+ .name = SYS_RGX_DEV_NAME,
+ .id = -2,
+#if defined(NO_HARDWARE)
+ /* Not all cores have 40 bit physical support, but this
+ * will work unless > 32 bit address is returned on those cores.
+ * In the future this will be fixed more correctly.
+ */
+ .dma_mask = DMA_BIT_MASK(40),
+#else
+ .dma_mask = DMA_BIT_MASK(32),
+#endif
+ };
+ unsigned int i;
+
+ BUG_ON(pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES);
+
+ pvr_devices = kmalloc_array(pvr_num_devices, sizeof(*pvr_devices),
+ GFP_KERNEL);
+ if (!pvr_devices)
+ return -ENOMEM;
+
+ for (i = 0; i < pvr_num_devices; i++) {
+ pvr_devices[i] = platform_device_register_full(&pvr_dev_info);
+ if (IS_ERR(pvr_devices[i])) {
+ DRM_ERROR("unable to register device %u (err=%ld)\n",
+ i, PTR_ERR(pvr_devices[i]));
+ pvr_devices[i] = NULL;
+ return -ENODEV;
+ }
+ }
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+
+ return 0;
+}
+
+static void pvr_devices_unregister(void)
+{
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+ unsigned int i;
+
+ BUG_ON(!pvr_devices);
+
+ for (i = 0; i < pvr_num_devices && pvr_devices[i]; i++)
+ platform_device_unregister(pvr_devices[i]);
+
+ kfree(pvr_devices);
+ pvr_devices = NULL;
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+}
+
+static int pvr_probe(struct platform_device *pdev)
+{
+ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+ //zxl:print gpu version on boot time
+ printk("PVR_K: sys.gpvr.version=%s\n",RKVERSION);
+ gpsPVRLDMDev = pdev;
+
+ return drm_platform_init(&pvr_drm_platform_driver, pdev);
+}
+
+static int pvr_remove(struct platform_device *pdev)
+{
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+ drm_put_dev(ddev);
+
+ return 0;
+}
+
+static void pvr_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+ PVRSRVCommonDeviceShutdown(ddev->dev_private);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+static const struct of_device_id pvr_of_ids[] = {
+#if defined(SYS_RGX_OF_COMPATIBLE)
+ { .compatible = SYS_RGX_OF_COMPATIBLE, },
+#endif
+ { .compatible = "arm,rogue-G6110", },
+ { .compatible = "arm,rk3368-gpu", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, pvr_of_ids);
+#endif
+
+static struct platform_device_id pvr_platform_ids[] = {
+#if defined(SYS_RGX_DEV_NAME)
+ { SYS_RGX_DEV_NAME, 0 },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(platform, pvr_platform_ids);
+
+static struct platform_driver pvr_platform_driver = {
+ .driver = {
+ .name = DRVNAME,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+ .of_match_table = of_match_ptr(pvr_of_ids),
+#endif
+ .pm = &pvr_pm_ops,
+ },
+ .id_table = pvr_platform_ids,
+ .probe = pvr_probe,
+ .remove = pvr_remove,
+ .shutdown = pvr_shutdown,
+};
+
+static int __init pvr_init(void)
+{
+ int err;
+ int i = 0;
+ struct device_node *np;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ for_each_compatible_node(np, NULL, "arm,rogue-G6110") {
+ i++;
+ }
+ if (0 == i) {
+ printk("It doesn't contain Rogue gpu\n");
+ return -1;
+ }
+
+ pvr_drm_platform_driver = pvr_drm_generic_driver;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+ pvr_drm_platform_driver.set_busid = drm_platform_set_busid;
+#endif
+
+ err = PVRSRVCommonDriverInit();
+ if (err)
+ return err;
+
+ err = platform_driver_register(&pvr_platform_driver);
+ if (err)
+ return err;
+
+ return pvr_devices_register();
+}
+
+static void __exit pvr_exit(void)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ pvr_devices_unregister();
+ platform_driver_unregister(&pvr_platform_driver);
+ PVRSRVCommonDriverDeinit();
+
+ DRM_DEBUG_DRIVER("done\n");
+}
+
+late_initcall(pvr_init);
+module_exit(pvr_exit);
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File pvr_sync.c
+@Title Kernel driver for Android's sync mechanism
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_sync.h"
+#include "pvr_fd_sync_kernel.h"
+#include "services_kernel_client.h"
+
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/syscalls.h>
+#include <linux/miscdevice.h>
+#include <linux/anon_inodes.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+#include <linux/sync.h>
+#ifndef CONFIG_SW_SYNC_USER
+#include <linux/sw_sync.h>
+#endif
+#else
+#include <../drivers/staging/android/sync.h>
+#ifndef CONFIG_SW_SYNC_USER
+#include <../drivers/staging/android/sw_sync.h>
+#endif
+#endif
+
+#include "kernel_compatibility.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+
+static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
+{
+ return pt->parent;
+}
+
+static inline int sync_pt_get_status(struct sync_pt *pt)
+{
+ return pt->status;
+}
+
+#define for_each_sync_pt(s, f, c) \
+ list_for_each_entry((s), &(f)->pt_list_head, pt_list)
+
+#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+
+static inline int sync_pt_get_status(struct sync_pt *pt)
+{
+ /* No error state for raw dma-buf fences */
+ return fence_is_signaled(&pt->base) ? 1 : 0;
+}
+
+#define for_each_sync_pt(s, f, c) \
+ for ((c) = 0, (s) = (f)->num_fences == 0 ? \
+ NULL : (struct sync_pt *)(f)->cbs[0].sync_pt; \
+ (c) < (f)->num_fences; \
+ (c)++, (s) = (c) < (f)->num_fences ? \
+ (struct sync_pt *)(f)->cbs[c].sync_pt : NULL)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+
+/* #define DEBUG_OUTPUT 1 */
+
+#ifdef DEBUG_OUTPUT
+#define DPF(fmt, ...) pr_err("pvr_sync: " fmt "\n", __VA_ARGS__)
+#else
+#define DPF(fmt, ...) do {} while (0)
+#endif
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \
+ do { \
+ if (pfnDumpDebugPrintf) { \
+ pfnDumpDebugPrintf(pvDumpDebugFile, fmt, __VA_ARGS__); \
+ } else { \
+ pr_info("pvr_sync: " fmt, __VA_ARGS__); \
+ } \
+ } while (0)
+
+#define SYNC_MAX_POOL_SIZE 10
+
+enum {
+ SYNC_TL_TYPE = 0,
+ SYNC_PT_FENCE_TYPE = 1,
+ SYNC_PT_CLEANUP_TYPE = 2,
+ SYNC_PT_FOREIGN_FENCE_TYPE = 3,
+ SYNC_PT_FOREIGN_CLEANUP_TYPE = 4,
+};
+
+struct pvr_sync_append_data {
+ u32 nr_updates;
+ struct _RGXFWIF_DEV_VIRTADDR_ *update_ufo_addresses;
+ u32 *update_values;
+ u32 nr_checks;
+ struct _RGXFWIF_DEV_VIRTADDR_ *check_ufo_addresses;
+ u32 *check_values;
+
+ /* The cleanup list is needed for rollback (as that's the only op
+ * taken).
+ */
+ u32 nr_cleanup_syncs;
+ struct pvr_sync_native_sync_prim **cleanup_syncs;
+
+ /* A FD is reserved in append_fences, but is not associated with
+ * the update fence until pvr_sync_get_update_fd().
+ */
+ int update_fence_fd;
+
+ /* Keep the sync points around for fput and if rollback is needed */
+ struct sync_fence *update_fence;
+ struct pvr_sync_native_sync_prim *update_sync;
+ struct pvr_sync_native_sync_prim *update_timeline_sync;
+ struct sync_fence *check_fence;
+};
+
+/* Services client sync prim wrapper. This is used to hold debug information
+ * and make it possible to cache unused syncs.
+ */
+struct pvr_sync_native_sync_prim {
+ /* List for the sync pool support. */
+ struct list_head list;
+
+ /* Base services sync prim structure */
+ struct PVRSRV_CLIENT_SYNC_PRIM *client_sync;
+
+ /* The next queued value which should be used */
+ u32 next_value;
+
+ /* Every sync data will get some unique id */
+ u32 id;
+
+ /* FWAddr used by the client sync */
+ u32 vaddr;
+
+ /* The type this sync is used for in our driver. Used in
+ * pvr_sync_debug_request().
+ */
+ u8 type;
+
+ /* A debug class name also printed in pvr_sync_debug_request(). */
+ char class[32];
+
+ /* List for the cleanup syncs attached to a sync_pt */
+ struct list_head cleanup_list;
+};
+
+/* This is the actual timeline metadata. We might keep this around after the
+ * base sync driver has destroyed the pvr_sync_timeline_wrapper object.
+ */
+struct pvr_sync_timeline {
+ /* Back reference to the sync_timeline. Not always valid */
+ struct sync_timeline *obj;
+
+ /* Global timeline list support */
+ struct list_head list;
+
+ /* Timeline sync */
+ struct pvr_sync_kernel_pair *kernel;
+
+ /* Reference count for this object */
+ struct kref kref;
+
+ /* Used only by pvr_sync_update_all_timelines(). False if the timeline
+ * has been detected as racing with pvr_sync_destroy_timeline().
+ */
+ bool valid;
+};
+
+/* This is the IMG extension of a sync_timeline */
+struct pvr_sync_timeline_wrapper {
+ /* Original timeline struct. Needs to come first. */
+ struct sync_timeline obj;
+
+ /* Pointer to extra timeline data. Separated life-cycle. */
+ struct pvr_sync_timeline *timeline;
+};
+
+struct pvr_sync_kernel_pair {
+ /* Binary sync point representing the android native sync in hw. */
+ struct pvr_sync_native_sync_prim *fence_sync;
+
+ /* Cleanup sync list. If the base sync prim is used for "checking"
+ * only within a GL stream, there is no way of knowing when this has
+ * happened. So each check appends another sync prim just used for
+ * update at the end of the command, so we know if all syncs in this
+ * cleanup list are complete there are no outstanding renders waiting
+ * to check this, so it can safely be freed.
+ */
+ struct list_head cleanup_sync_list;
+ /* A temporary pointer used to track the 'new' cleanup_sync added to
+ * cleanup_sync_list within pvr_sync_append_fences()
+ */
+ struct pvr_sync_native_sync_prim *current_cleanup_sync;
+
+ /* Sync points can go away when there are deferred hardware operations
+ * still outstanding. We must not free the SERVER_SYNC_PRIMITIVE until
+ * the hardware is finished, so we add it to a defer list which is
+ * processed periodically ("defer-free").
+ *
+ * Note that the defer-free list is global, not per-timeline.
+ */
+ struct list_head list;
+};
+
+struct pvr_sync_data {
+ /* Every sync point has a services sync object. This object is used
+ * by the hardware to enforce ordering -- it is attached as a source
+ * dependency to various commands.
+ */
+ struct pvr_sync_kernel_pair *kernel;
+
+ /* The timeline update value for this sync point. */
+ u32 timeline_update_value;
+
+ /* This refcount is incremented at create and dup time, and decremented
+ * at free time. It ensures the object doesn't start the defer-free
+ * process until it is no longer referenced.
+ */
+ struct kref kref;
+};
+
+/* This is the IMG extension of a sync_pt */
+struct pvr_sync_pt {
+ /* Original sync_pt structure. Needs to come first. */
+ struct sync_pt pt;
+
+ /* Private shared data */
+ struct pvr_sync_data *sync_data;
+};
+
+/* This is the IMG extension of a sync_fence */
+struct pvr_sync_fence {
+ /* Original sync_fence structure. Needs to come first. */
+ struct sync_fence *fence;
+
+ /* To ensure callbacks are always received for fences / sync_pts, even
+ * after the fence has been 'put' (freed), we must take a reference to
+ * the fence. We still need to 'put' the fence ourselves, but this might
+ * happen in irq context, where fput() is not allowed (in kernels <3.6).
+ * We must add the fence to a list which is processed in WQ context.
+ */
+ struct list_head list;
+};
+
+/* Any sync point from a foreign (non-PVR) timeline needs to have a "shadow"
+ * sync prim. This is modelled as a software operation. The foreign driver
+ * completes the operation by calling a callback we registered with it.
+ */
+struct pvr_sync_fence_waiter {
+ /* Base sync driver waiter structure */
+ struct sync_fence_waiter waiter;
+
+ /* "Shadow" sync prim backing the foreign driver's sync_pt */
+ struct pvr_sync_kernel_pair *kernel;
+
+ /* Optimizes lookup of fence for defer-put operation */
+ struct pvr_sync_fence *sync_fence;
+};
+
+/* Global data for the sync driver */
+static struct {
+ /* Complete notify handle */
+ void *command_complete_handle;
+
+ /* Defer-free workqueue. Syncs may still be in use by the HW when freed,
+ * so we have to keep them around until the HW is done with them at
+ * some later time. This workqueue iterates over the list of free'd
+ * syncs, checks if they are in use, and frees the sync device memory
+ * when done with.
+ */
+ struct workqueue_struct *defer_free_wq;
+ struct work_struct defer_free_work;
+
+ /* check_status workqueue: When a foreign point is completed, a SW
+ * operation marks the sync as completed to allow the operations to
+ * continue. This completion may require the hardware to be notified,
+ * which may be expensive/take locks, so we push that to a workqueue
+ */
+ struct workqueue_struct *check_status_wq;
+ struct work_struct check_status_work;
+
+ /* Context used to create client sync prims. */
+ struct SYNC_PRIM_CONTEXT *sync_prim_context;
+
+ /* Debug notify handle */
+ void *debug_notify_handle;
+
+ /* Unique id counter for the sync prims */
+ atomic_t sync_id;
+
+ /* The global event object (used to wait between checks for
+ * deferred-free sync status).
+ */
+ void *event_object_handle;
+} pvr_sync_data;
+
+/* List of timelines created by this driver */
+static LIST_HEAD(timeline_list);
+static DEFINE_MUTEX(timeline_list_mutex);
+
+/* Sync pool support */
+static LIST_HEAD(sync_pool_free_list);
+static LIST_HEAD(sync_pool_active_list);
+static DEFINE_MUTEX(sync_pool_mutex);
+static s32 sync_pool_size;
+static u32 sync_pool_created;
+static u32 sync_pool_reused;
+
+/* The "defer-free" object list. Driver global. */
+static LIST_HEAD(sync_prim_free_list);
+static DEFINE_SPINLOCK(sync_prim_free_list_spinlock);
+
+/* The "defer-put" object list. Driver global. */
+static LIST_HEAD(sync_fence_put_list);
+static DEFINE_SPINLOCK(sync_fence_put_list_spinlock);
+
+static void pvr_sync_update_all_timelines(void *command_complete_handle);
+
+static inline void set_sync_value(struct pvr_sync_native_sync_prim *sync,
+ u32 value)
+{
+ *(sync->client_sync->pui32LinAddr) = value;
+}
+
+static inline u32 get_sync_value(struct pvr_sync_native_sync_prim *sync)
+{
+ return *(sync->client_sync->pui32LinAddr);
+}
+
+static inline void complete_sync(struct pvr_sync_native_sync_prim *sync)
+{
+ *(sync->client_sync->pui32LinAddr) = sync->next_value;
+}
+
+static inline int is_sync_met(struct pvr_sync_native_sync_prim *sync)
+{
+ return *(sync->client_sync->pui32LinAddr) == sync->next_value;
+}
+
+static inline struct pvr_sync_timeline *get_timeline(struct sync_timeline *obj)
+{
+ return ((struct pvr_sync_timeline_wrapper *)obj)->timeline;
+}
+
+static inline struct pvr_sync_timeline *get_timeline_pt(struct sync_pt *pt)
+{
+ return get_timeline(sync_pt_parent(pt));
+}
+
+static inline int
+pvr_sync_has_kernel_signaled(struct pvr_sync_kernel_pair *kernel)
+{
+ /* Idle syncs are always signaled */
+ if (!kernel)
+ return 1;
+
+ return is_sync_met(kernel->fence_sync);
+}
+
+#ifdef DEBUG_OUTPUT
+
+static char *debug_info_timeline(struct pvr_sync_timeline *timeline)
+{
+ static char info[256];
+
+ snprintf(info, sizeof(info),
+ "n='%s' id=%u fw=0x%x tl_curr=%u tl_next=%u",
+ timeline->obj ? timeline->obj->name : "?",
+ timeline->kernel->fence_sync->id,
+ timeline->kernel->fence_sync->vaddr,
+ get_sync_value(timeline->kernel->fence_sync),
+ timeline->kernel->fence_sync->next_value);
+
+ return info;
+}
+
+static char *debug_info_sync_pt(struct sync_pt *pt)
+{
+ struct pvr_sync_timeline *timeline = get_timeline_pt(pt);
+ struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)pt;
+ struct pvr_sync_kernel_pair *kernel = pvr_pt->sync_data->kernel;
+ static char info[256], info1[256];
+
+ if (kernel) {
+ unsigned int cleanup_count = 0;
+ unsigned int info1_pos = 0;
+ struct list_head *pos;
+
+ info1[0] = 0;
+
+ list_for_each(pos, &kernel->cleanup_sync_list) {
+ struct pvr_sync_native_sync_prim *cleanup_sync =
+ list_entry(pos,
+ struct pvr_sync_native_sync_prim,
+ cleanup_list);
+ int string_size = 0;
+
+ string_size = snprintf(info1 + info1_pos,
+ sizeof(info1) - info1_pos,
+ " # cleanup %u: id=%u fw=0x%x curr=%u next=%u",
+ cleanup_count,
+ cleanup_sync->id,
+ cleanup_sync->vaddr,
+ get_sync_value(cleanup_sync),
+ cleanup_sync->next_value);
+ cleanup_count++;
+ info1_pos += string_size;
+ /* Truncate the string and stop if we run out of space
+ * This should stop any underflow of snprintf's 'size'
+ * arg too
+ */
+ if (info1_pos >= sizeof(info1))
+ break;
+ }
+
+ snprintf(info, sizeof(info),
+ "status=%d tl_taken=%u ref=%d # sync: id=%u fw=0x%x curr=%u next=%u%s # tl: %s",
+ pvr_sync_has_kernel_signaled(kernel),
+ pvr_pt->sync_data->timeline_update_value,
+ atomic_read(&pvr_pt->sync_data->kref.refcount),
+ kernel->fence_sync->id,
+ kernel->fence_sync->vaddr,
+ get_sync_value(kernel->fence_sync),
+ kernel->fence_sync->next_value,
+ info1, debug_info_timeline(timeline));
+ } else {
+ snprintf(info, sizeof(info),
+ "status=%d tl_taken=%u ref=%d # sync: idle # tl: %s",
+ pvr_sync_has_kernel_signaled(kernel),
+ pvr_pt->sync_data->timeline_update_value,
+ atomic_read(&pvr_pt->sync_data->kref.refcount),
+ debug_info_timeline(timeline));
+ }
+
+ return info;
+}
+
+#endif /* DEBUG_OUTPUT */
+
+static enum PVRSRV_ERROR
+sync_pool_get(struct pvr_sync_native_sync_prim **_sync,
+ const char *class_name, u8 type)
+{
+ struct pvr_sync_native_sync_prim *sync;
+ enum PVRSRV_ERROR error = PVRSRV_OK;
+ u32 sync_addr;
+
+ mutex_lock(&sync_pool_mutex);
+
+ if (list_empty(&sync_pool_free_list)) {
+ /* If there is nothing in the pool, create a new sync prim. */
+ sync = kmalloc(sizeof(*sync),
+ GFP_KERNEL);
+ if (!sync) {
+ pr_err("pvr_sync: %s: Failed to allocate sync data\n",
+ __func__);
+ error = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_unlock;
+ }
+
+ error = SyncPrimAlloc(pvr_sync_data.sync_prim_context,
+ &sync->client_sync, class_name);
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ goto err_free;
+ }
+
+ error = SyncPrimGetFirmwareAddr(sync->client_sync, &sync_addr);
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Failed to get FW address (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ goto err_sync_prim_free;
+ }
+ sync->vaddr = sync_addr;
+
+ list_add_tail(&sync->list, &sync_pool_active_list);
+ ++sync_pool_created;
+ } else {
+ sync = list_first_entry(&sync_pool_free_list,
+ struct pvr_sync_native_sync_prim, list);
+ list_move_tail(&sync->list, &sync_pool_active_list);
+ --sync_pool_size;
+ ++sync_pool_reused;
+ }
+
+ sync->id = atomic_inc_return(&pvr_sync_data.sync_id);
+ sync->type = type;
+
+ strncpy(sync->class, class_name, sizeof(sync->class));
+ sync->class[sizeof(sync->class) - 1] = '\0';
+ /* Its crucial to reset the sync to zero */
+ set_sync_value(sync, 0);
+ sync->next_value = 0;
+
+ *_sync = sync;
+err_unlock:
+ mutex_unlock(&sync_pool_mutex);
+ return error;
+
+err_sync_prim_free:
+ SyncPrimFree(sync->client_sync);
+
+err_free:
+ kfree(sync);
+ goto err_unlock;
+}
+
+static void sync_pool_put(struct pvr_sync_native_sync_prim *sync)
+{
+ mutex_lock(&sync_pool_mutex);
+
+ if (sync_pool_size < SYNC_MAX_POOL_SIZE) {
+ /* Mark it as unused */
+ set_sync_value(sync, 0xffffffff);
+
+ list_move(&sync->list, &sync_pool_free_list);
+ ++sync_pool_size;
+ } else {
+ /* Mark it as invalid */
+ set_sync_value(sync, 0xdeadbeef);
+
+ list_del(&sync->list);
+ SyncPrimFree(sync->client_sync);
+ kfree(sync);
+ }
+
+ mutex_unlock(&sync_pool_mutex);
+}
+
+static void sync_pool_clear(void)
+{
+ struct pvr_sync_native_sync_prim *sync, *n;
+
+ mutex_lock(&sync_pool_mutex);
+
+ list_for_each_entry_safe(sync, n, &sync_pool_free_list, list) {
+ /* Mark it as invalid */
+ set_sync_value(sync, 0xdeadbeef);
+
+ list_del(&sync->list);
+ SyncPrimFree(sync->client_sync);
+ kfree(sync);
+ --sync_pool_size;
+ }
+
+ mutex_unlock(&sync_pool_mutex);
+}
+
+static void pvr_sync_debug_request(void *hDebugRequestHandle,
+ u32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ struct pvr_sync_native_sync_prim *sync;
+
+ static const char *const type_names[] = {
+ "Timeline", "Fence", "Cleanup",
+ "Foreign Fence", "Foreign Cleanup"
+ };
+
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH) {
+ mutex_lock(&sync_pool_mutex);
+
+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+ "Dumping all pending android native syncs (Pool usage: %d%% - %d %d)",
+ sync_pool_reused ?
+ (10000 /
+ ((sync_pool_created + sync_pool_reused) *
+ 100 / sync_pool_reused)) : 0,
+ sync_pool_created, sync_pool_reused);
+
+ list_for_each_entry(sync, &sync_pool_active_list, list) {
+ if (is_sync_met(sync))
+ continue;
+
+ BUG_ON(sync->type >= ARRAY_SIZE(type_names));
+
+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+ "\tID = %d, FWAddr = 0x%08x: Current = 0x%08x, Next = 0x%08x, %s (%s)",
+ sync->id, sync->vaddr,
+ get_sync_value(sync),
+ sync->next_value,
+ sync->class,
+ type_names[sync->type]);
+ }
+#if 0
+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf,
+ "Dumping all unused syncs");
+ list_for_each_entry(sync, &sync_pool_free_list, list) {
+ BUG_ON(sync->type >= ARRAY_SIZE(type_names));
+
+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf,
+ "\tID = %d, FWAddr = 0x%08x: Current = 0x%08x, Next = 0x%08x, %s (%s)",
+ sync->id, sync->vaddr,
+ get_sync_value(sync),
+ sync->next_value,
+ sync->class,
+ type_names[sync->type]);
+ }
+#endif
+ mutex_unlock(&sync_pool_mutex);
+ }
+}
+
+static struct sync_pt *pvr_sync_dup(struct sync_pt *sync_pt)
+{
+ struct pvr_sync_pt *pvr_pt_a = (struct pvr_sync_pt *)sync_pt;
+ struct pvr_sync_pt *pvr_pt_b = NULL;
+
+ DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
+
+ pvr_pt_b = (struct pvr_sync_pt *)
+ sync_pt_create(sync_pt_parent(sync_pt),
+ sizeof(*pvr_pt_b));
+ if (!pvr_pt_b) {
+ pr_err("pvr_sync: %s: Failed to dup sync pt\n", __func__);
+ goto err_out;
+ }
+
+ kref_get(&pvr_pt_a->sync_data->kref);
+
+ pvr_pt_b->sync_data = pvr_pt_a->sync_data;
+
+err_out:
+ return (struct sync_pt *)pvr_pt_b;
+}
+
+static int pvr_sync_has_signaled(struct sync_pt *sync_pt)
+{
+ struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+
+ DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
+
+ return pvr_sync_has_kernel_signaled(pvr_pt->sync_data->kernel);
+}
+
+static int pvr_sync_compare(struct sync_pt *a, struct sync_pt *b)
+{
+ u32 a1 = ((struct pvr_sync_pt *)a)->sync_data->timeline_update_value;
+ u32 b1 = ((struct pvr_sync_pt *)b)->sync_data->timeline_update_value;
+
+ DPF("%s: a # %s", __func__, debug_info_sync_pt(a));
+ DPF("%s: b # %s", __func__, debug_info_sync_pt(b));
+
+ if (a1 == b1)
+ return 0;
+
+ /* Take integer wrapping into account */
+ return ((s32)a1 - (s32)b1) < 0 ? -1 : 1;
+}
+
+static void wait_for_sync(struct pvr_sync_native_sync_prim *sync)
+{
+#ifndef NO_HARDWARE
+ void *event_object = NULL;
+ enum PVRSRV_ERROR error = PVRSRV_OK;
+
+ while (sync && !is_sync_met(sync)) {
+ if (!event_object) {
+ error = OSEventObjectOpen(
+ pvr_sync_data.event_object_handle,
+ &event_object);
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Error opening event object (%s)\n",
+ __func__,
+ PVRSRVGetErrorStringKM(error));
+ break;
+ }
+ }
+ error = OSEventObjectWait(event_object);
+ if (error != PVRSRV_OK && error != PVRSRV_ERROR_TIMEOUT) {
+ pr_err("pvr_sync: %s: Error waiting on event object (%s)\n",
+ __func__,
+ PVRSRVGetErrorStringKM(error));
+ }
+ }
+
+ if (event_object)
+ OSEventObjectClose(event_object);
+#endif /* NO_HARDWARE */
+}
+
+static void pvr_sync_defer_free(struct pvr_sync_kernel_pair *kernel)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_prim_free_list_spinlock, flags);
+ list_add_tail(&kernel->list, &sync_prim_free_list);
+ spin_unlock_irqrestore(&sync_prim_free_list_spinlock, flags);
+
+ queue_work(pvr_sync_data.defer_free_wq, &pvr_sync_data.defer_free_work);
+}
+
+/* This function assumes the timeline_list_mutex is held while it runs */
+
+static void pvr_sync_destroy_timeline_locked(struct kref *kref)
+{
+ struct pvr_sync_timeline *timeline = (struct pvr_sync_timeline *)
+ container_of(kref, struct pvr_sync_timeline, kref);
+
+ pvr_sync_defer_free(timeline->kernel);
+ list_del(&timeline->list);
+ kfree(timeline);
+}
+
+static void pvr_sync_destroy_timeline(struct kref *kref)
+{
+ mutex_lock(&timeline_list_mutex);
+ pvr_sync_destroy_timeline_locked(kref);
+ mutex_unlock(&timeline_list_mutex);
+}
+
+static void pvr_sync_release_timeline(struct sync_timeline *obj)
+{
+ struct pvr_sync_timeline *timeline = get_timeline(obj);
+
+ /* If pvr_sync_open failed after calling sync_timeline_create, this
+ * can be called with a timeline that has not got a timeline sync
+ * or been added to our timeline list. Use a NULL timeline to
+ * detect and handle this condition
+ */
+ if (!timeline)
+ return;
+
+ DPF("%s: # %s", __func__, debug_info_timeline(timeline));
+
+ wait_for_sync(timeline->kernel->fence_sync);
+
+ /* Whether or not we're the last reference, obj is going away
+ * after this function returns, so remove our back reference
+ * to it.
+ */
+ timeline->obj = NULL;
+
+ /* This might be the last reference to the timeline object.
+ * If so, we'll go ahead and delete it now.
+ */
+ kref_put(&timeline->kref, pvr_sync_destroy_timeline);
+}
+
+/* The print_obj() and print_pt() functions have been removed, so we're forced
+ * to use the timeline_value_str() and pt_value_str() functions. These are
+ * worse because we're limited to 64 characters, and the strings for sync
+ * pts have to be formatted like:
+ *
+ * pt active: pt_info / tl_info
+ *
+ * For us, the tl_info is complicated and doesn't need to be repeated over
+ * and over. So try to detect the way sync_print_pt() calls the two value_str
+ * functions and change what pvr_sync_timeline_value_str() returns dynamically.
+ */
+static struct sync_timeline *last_pt_timeline;
+
+static void pvr_sync_timeline_value_str(struct sync_timeline *sync_timeline,
+ char *str, int size)
+{
+ struct pvr_sync_timeline *timeline = get_timeline(sync_timeline);
+
+ if (sync_timeline != last_pt_timeline) {
+ snprintf(str, size, "%u 0x%x %u/%u",
+ timeline->kernel->fence_sync->id,
+ timeline->kernel->fence_sync->vaddr,
+ get_sync_value(timeline->kernel->fence_sync),
+ timeline->kernel->fence_sync->next_value);
+ } else {
+ snprintf(str, size, "%u",
+ get_sync_value(timeline->kernel->fence_sync));
+ }
+}
+
+static void pvr_sync_pt_value_str(struct sync_pt *sync_pt, char *str, int size)
+{
+ struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+ struct pvr_sync_kernel_pair *kernel;
+
+ if (!pvr_pt->sync_data)
+ return;
+
+ kernel = pvr_pt->sync_data->kernel;
+
+ /* Messages must be at most 64 bytes (including the null terminator):
+ *
+ * 123456789012345678901234567890123456789012345678901234567890123
+ *
+ * ID FW ADDR C/N # REF TAKEN CLEANUP_COUNT
+ * 123456 0xdeadbeef 0/1 # r=2 123456 1
+ */
+ if (kernel) {
+ unsigned int cleanup_count = 0;
+ struct list_head *pos;
+
+ list_for_each(pos, &kernel->cleanup_sync_list) {
+ cleanup_count++;
+ }
+ snprintf(str, size,
+ "%u 0x%x %u/%u r=%d %u %u",
+ kernel->fence_sync->id,
+ kernel->fence_sync->vaddr,
+ get_sync_value(kernel->fence_sync),
+ kernel->fence_sync->next_value,
+ atomic_read(&pvr_pt->sync_data->kref.refcount),
+ cleanup_count,
+ pvr_pt->sync_data->timeline_update_value);
+
+ } else {
+ snprintf(str, size, "idle # r=%d %u",
+ atomic_read(&pvr_pt->sync_data->kref.refcount),
+ pvr_pt->sync_data->timeline_update_value);
+ }
+
+ last_pt_timeline = sync_pt_parent(sync_pt);
+}
+
+/* pvr_sync_create_sync_data() should be called with the bridge lock held */
+static struct pvr_sync_data *
+pvr_sync_create_sync_data(struct sync_timeline *obj)
+{
+ struct pvr_sync_data *sync_data = NULL;
+ enum PVRSRV_ERROR error;
+
+ sync_data = kzalloc(sizeof(*sync_data), GFP_KERNEL);
+ if (!sync_data)
+ goto err_out;
+
+ kref_init(&sync_data->kref);
+
+ sync_data->kernel =
+ kzalloc(sizeof(*sync_data->kernel),
+ GFP_KERNEL);
+
+ if (!sync_data->kernel)
+ goto err_free_data;
+
+ INIT_LIST_HEAD(&sync_data->kernel->cleanup_sync_list);
+
+ error = sync_pool_get(&sync_data->kernel->fence_sync,
+ obj->name, SYNC_PT_FENCE_TYPE);
+
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ goto err_free_kernel;
+ }
+
+err_out:
+ return sync_data;
+
+err_free_kernel:
+ kfree(sync_data->kernel);
+err_free_data:
+ kfree(sync_data);
+ sync_data = NULL;
+ goto err_out;
+}
+
+static void pvr_sync_free_sync_data(struct kref *kref)
+{
+ struct pvr_sync_data *sync_data = (struct pvr_sync_data *)
+ container_of(kref, struct pvr_sync_data, kref);
+
+ if (sync_data->kernel)
+ pvr_sync_defer_free(sync_data->kernel);
+ kfree(sync_data);
+}
+
+static void pvr_sync_free_sync(struct sync_pt *sync_pt)
+{
+ struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+
+ DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
+
+ kref_put(&pvr_pt->sync_data->kref, pvr_sync_free_sync_data);
+}
+
+/* this function uses pvr_sync_timeline_ops defined below */
+static int pvr_sync_fill_driver_data(struct sync_pt *, void *, int);
+
+static struct sync_timeline_ops pvr_sync_timeline_ops = {
+ .driver_name = PVRSYNC_MODNAME,
+ .dup = pvr_sync_dup,
+ .has_signaled = pvr_sync_has_signaled,
+ .compare = pvr_sync_compare,
+ .free_pt = pvr_sync_free_sync,
+ .release_obj = pvr_sync_release_timeline,
+ .timeline_value_str = pvr_sync_timeline_value_str,
+ .pt_value_str = pvr_sync_pt_value_str,
+ .fill_driver_data = pvr_sync_fill_driver_data,
+};
+
+static inline bool is_pvr_timeline(struct sync_timeline *obj)
+{
+ return obj->ops == &pvr_sync_timeline_ops;
+}
+
+static inline bool is_pvr_timeline_pt(struct sync_pt *pt)
+{
+ return is_pvr_timeline(sync_pt_parent(pt));
+}
+
+static int
+pvr_sync_fill_driver_data(struct sync_pt *sync_pt, void *data, int size)
+{
+ struct pvr_sync_pt_info *info = (struct pvr_sync_pt_info *)data;
+ struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+ struct pvr_sync_data *sync_data = pvr_pt->sync_data;
+ struct pvr_sync_kernel_pair *kernel = sync_data->kernel;
+
+ if ((unsigned int)size < sizeof(*info))
+ return -ENOMEM;
+
+ info->ui32TlTaken = sync_data->timeline_update_value;
+
+ if (kernel) {
+ info->id = kernel->fence_sync->id;
+ info->ui32FWAddr = kernel->fence_sync->vaddr;
+ info->ui32CurrOp = get_sync_value(kernel->fence_sync);
+ info->ui32NextOp = kernel->fence_sync->next_value;
+ } else {
+ info->id = 0;
+ info->ui32FWAddr = 0;
+ info->ui32CurrOp = 0;
+ info->ui32NextOp = 0;
+ }
+
+ return sizeof(*info);
+}
+
+/* foreign sync handling */
+
+static void pvr_sync_foreign_sync_pt_signaled(struct sync_fence *fence,
+ struct sync_fence_waiter *_waiter)
+{
+ struct pvr_sync_fence_waiter *waiter =
+ (struct pvr_sync_fence_waiter *)_waiter;
+ unsigned long flags;
+
+ /* Complete the SW operation and free the sync if we can. If we can't,
+ * it will be checked by a later workqueue kick.
+ */
+ complete_sync(waiter->kernel->fence_sync);
+
+ /* We can 'put' the fence now, but this function might be called in
+ * irq context so we must defer to WQ.
+ * This WQ is triggered in pvr_sync_defer_free, so adding it to the
+ * put list before that should guarantee it's cleaned up on the next
+ * wq run.
+ */
+ spin_lock_irqsave(&sync_fence_put_list_spinlock, flags);
+ list_add_tail(&waiter->sync_fence->list, &sync_fence_put_list);
+ spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags);
+
+ pvr_sync_defer_free(waiter->kernel);
+
+ /* The completed sw-sync may allow other tasks to complete,
+ * so we need to allow them to progress.
+ */
+ queue_work(pvr_sync_data.check_status_wq,
+ &pvr_sync_data.check_status_work);
+
+ kfree(waiter);
+}
+
+static struct pvr_sync_kernel_pair *
+pvr_sync_create_waiter_for_foreign_sync(int fd)
+{
+ struct pvr_sync_native_sync_prim *cleanup_sync = NULL;
+ struct pvr_sync_kernel_pair *kernel = NULL;
+ struct pvr_sync_fence_waiter *waiter;
+ struct pvr_sync_fence *sync_fence;
+ struct sync_fence *fence;
+ enum PVRSRV_ERROR error;
+ int err;
+
+ fence = sync_fence_fdget(fd);
+ if (!fence) {
+ pr_err("pvr_sync: %s: Failed to take reference on fence\n",
+ __func__);
+ goto err_out;
+ }
+
+ kernel = kmalloc(sizeof(*kernel), GFP_KERNEL);
+ if (!kernel) {
+ pr_err("pvr_sync: %s: Failed to allocate sync kernel\n",
+ __func__);
+ goto err_put_fence;
+ }
+
+ INIT_LIST_HEAD(&kernel->cleanup_sync_list);
+
+ sync_fence = kmalloc(sizeof(*sync_fence), GFP_KERNEL);
+ if (!sync_fence) {
+ pr_err("pvr_sync: %s: Failed to allocate pvr sync fence\n",
+ __func__);
+ goto err_free_kernel;
+ }
+
+ sync_fence->fence = fence;
+
+ error = sync_pool_get(&kernel->fence_sync,
+ fence->name, SYNC_PT_FOREIGN_FENCE_TYPE);
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ goto err_free_sync_fence;
+ }
+
+ kernel->fence_sync->next_value++;
+
+ error = sync_pool_get(&cleanup_sync, fence->name,
+ SYNC_PT_FOREIGN_CLEANUP_TYPE);
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Failed to allocate cleanup sync prim (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ goto err_free_sync;
+ }
+
+ cleanup_sync->next_value++;
+
+ list_add(&cleanup_sync->cleanup_list, &kernel->cleanup_sync_list);
+
+ /* The custom waiter structure is freed in the waiter callback */
+ waiter = kmalloc(sizeof(*waiter), GFP_KERNEL);
+ if (!waiter) {
+ pr_err("pvr_sync: %s: Failed to allocate waiter\n", __func__);
+ goto err_free_cleanup_sync;
+ }
+
+ waiter->kernel = kernel;
+ waiter->sync_fence = sync_fence;
+
+ sync_fence_waiter_init(&waiter->waiter,
+ pvr_sync_foreign_sync_pt_signaled);
+
+ err = sync_fence_wait_async(fence, &waiter->waiter);
+ if (err) {
+ if (err < 0) {
+ pr_err("pvr_sync: %s: Fence was in error state (%d)\n",
+ __func__, err);
+ /* Fall-thru */
+ }
+
+ /* -1 means the fence was broken, 1 means the fence already
+ * signalled. In either case, roll back what we've done and
+ * skip using this sync_pt for synchronization.
+ */
+ goto err_free_waiter;
+ }
+
+ kernel->current_cleanup_sync = cleanup_sync;
+
+err_out:
+ return kernel;
+err_free_waiter:
+ kfree(waiter);
+err_free_cleanup_sync:
+ list_del(&cleanup_sync->cleanup_list);
+ sync_pool_put(cleanup_sync);
+err_free_sync:
+ sync_pool_put(kernel->fence_sync);
+err_free_sync_fence:
+ kfree(sync_fence);
+err_free_kernel:
+ kfree(kernel);
+ kernel = NULL;
+err_put_fence:
+ sync_fence_put(fence);
+ goto err_out;
+}
+
+static
+struct pvr_sync_pt *pvr_sync_create_pt(struct pvr_sync_timeline *timeline)
+{
+ struct pvr_sync_data *sync_data;
+ struct pvr_sync_pt *pvr_pt = NULL;
+
+ sync_data = pvr_sync_create_sync_data(timeline->obj);
+ if (!sync_data) {
+ pr_err("pvr_sync: %s: Failed to create sync data\n", __func__);
+ goto err_out;
+ }
+
+ sync_data->kernel->fence_sync->next_value++;
+
+ pvr_pt = (struct pvr_sync_pt *)
+ sync_pt_create(timeline->obj, sizeof(*pvr_pt));
+
+ if (!pvr_pt) {
+ pr_err("pvr_sync: %s: Failed to create sync pt\n", __func__);
+ goto err_rollback_fence;
+ }
+
+ pvr_pt->sync_data = sync_data;
+
+ /* Increment the timeline next value */
+ pvr_pt->sync_data->timeline_update_value =
+ timeline->kernel->fence_sync->next_value++;
+
+ return pvr_pt;
+
+err_rollback_fence:
+ sync_data->kernel->fence_sync->next_value--;
+ kref_put(&sync_data->kref, pvr_sync_free_sync_data);
+err_out:
+ return NULL;
+}
+
+/* Predeclare the pvr_sync_fops as it's used for comparison to ensure the
+ * update_timeline_fd passed in to pvr_sync_append_fences() is a pvr_sync
+ * timeline.
+ */
+static const struct file_operations pvr_sync_fops;
+
+enum PVRSRV_ERROR pvr_sync_append_fences(
+ const char *name,
+ const s32 check_fence_fd,
+ const s32 update_timeline_fd,
+ const u32 nr_updates,
+ const struct _RGXFWIF_DEV_VIRTADDR_ *update_ufo_addresses,
+ const u32 *update_values,
+ const u32 nr_checks,
+ const struct _RGXFWIF_DEV_VIRTADDR_ *check_ufo_addresses,
+ const u32 *check_values,
+ struct pvr_sync_append_data **append_sync_data)
+{
+ struct pvr_sync_native_sync_prim **cleanup_sync_pos;
+ struct pvr_sync_pt *update_point = NULL;
+ struct sync_fence *update_fence = NULL;
+ struct pvr_sync_append_data *sync_data;
+ struct _RGXFWIF_DEV_VIRTADDR_ *update_address_pos;
+ struct _RGXFWIF_DEV_VIRTADDR_ *check_address_pos;
+ struct pvr_sync_timeline *timeline;
+ unsigned int num_used_sync_updates;
+ unsigned int num_used_sync_checks;
+ enum PVRSRV_ERROR err = PVRSRV_OK;
+ u32 *update_value_pos;
+ u32 *check_value_pos;
+
+ if ((nr_updates && (!update_ufo_addresses || !update_values)) ||
+ (nr_checks && (!check_ufo_addresses || !check_values))) {
+ err = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ sync_data =
+ kzalloc(sizeof(*sync_data), GFP_KERNEL);
+ if (!sync_data) {
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_out;
+ }
+
+ sync_data->update_fence_fd = -1;
+
+ if (update_timeline_fd >= 0) {
+ struct file *timeline_file;
+
+ /* We reserve the update fence FD before taking any operations
+ * as we do not want to fail (e.g. run out of FDs) after the
+ * kick operation has been submitted to the hw.
+ */
+ sync_data->update_fence_fd = get_unused_fd();
+ if (sync_data->update_fence_fd < 0) {
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free_append_data;
+ }
+
+ timeline_file = fget(update_timeline_fd);
+ if (!timeline_file) {
+ pr_err("pvr_sync: %s: Failed to open supplied timeline fd (%d)\n",
+ __func__, update_timeline_fd);
+ err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+ goto err_free_append_data;
+ }
+
+ if (timeline_file->f_op != &pvr_sync_fops) {
+ pr_err("pvr_sync: %s: Supplied timeline not pvr_sync timeline\n",
+ __func__);
+ fput(timeline_file);
+ err = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_free_append_data;
+ }
+
+ timeline = get_timeline(timeline_file->private_data);
+
+ /* We know this will not free the timeline as the user still
+ * has the fd referencing it.
+ */
+ fput(timeline_file);
+
+ if (!timeline) {
+ pr_err("pvr_sync: %s: Supplied timeline has no private data\n",
+ __func__);
+ err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+ goto err_free_append_data;
+ }
+
+ update_point = pvr_sync_create_pt(timeline);
+ if (!update_point) {
+ pr_err("pvr_sync: %s: Failed to create sync point\n",
+ __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free_append_data;
+ }
+
+#if defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
+ update_fence = sync_fence_create(name, &update_point->pt.base);
+#else
+ update_fence = sync_fence_create(name, &update_point->pt);
+#endif
+ if (!update_fence) {
+ struct pvr_sync_native_sync_prim *fence_prim =
+ update_point->sync_data->kernel->fence_sync;
+ struct pvr_sync_native_sync_prim *timeline_prim =
+ timeline->kernel->fence_sync;
+
+ pr_err("pvr_sync: %s: Failed to create sync fence\n",
+ __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+
+ /* If the point was created but the fence failed to be
+ * created, the point must be manually free'd as a
+ * fence has not yet taken ownership.
+ */
+
+ /* First rollback the point's taken operations */
+ timeline_prim->next_value--;
+ fence_prim->next_value--;
+ pvr_sync_free_sync(&update_point->pt);
+ goto err_free_append_data;
+ }
+
+ sync_data->update_fence = update_fence;
+ sync_data->update_sync =
+ update_point->sync_data->kernel->fence_sync;
+ sync_data->update_timeline_sync =
+ timeline->kernel->fence_sync;
+ }
+
+ sync_data->nr_checks = nr_checks;
+ sync_data->nr_updates = nr_updates;
+
+ if (check_fence_fd >= 0) {
+ struct sync_fence *fence = sync_fence_fdget(check_fence_fd);
+ struct pvr_sync_kernel_pair *sync_kernel;
+ unsigned int points_on_fence = 0;
+ bool has_foreign_point = false;
+ struct sync_pt *sync_pt;
+ int j;
+
+ if (!fence) {
+ pr_err("pvr_sync: %s: Failed to read sync private data for fd %d\n",
+ __func__, check_fence_fd);
+ err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+ goto err_free_fence;
+ }
+
+ sync_data->check_fence = fence;
+
+ (void)j;
+ for_each_sync_pt(sync_pt, fence, j) {
+ struct pvr_sync_native_sync_prim *cleanup_sync = NULL;
+ struct pvr_sync_pt *pvr_pt;
+
+ if (!is_pvr_timeline_pt(sync_pt)) {
+ if (!sync_pt_get_status(sync_pt))
+ has_foreign_point = true;
+ continue;
+ }
+
+ pvr_pt = (struct pvr_sync_pt *)sync_pt;
+ sync_kernel = pvr_pt->sync_data->kernel;
+
+ if (!sync_kernel ||
+ is_sync_met(sync_kernel->fence_sync)) {
+ continue;
+ }
+
+ /* We will use the above sync for "check" only. In this
+ * case also insert a "cleanup" update command into the
+ * opengl stream. This can later be used for checking
+ * if the sync prim could be freed.
+ */
+ err = sync_pool_get(&cleanup_sync,
+ sync_pt_parent(&pvr_pt->pt)->name,
+ SYNC_PT_CLEANUP_TYPE);
+ if (err != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Failed to allocate cleanup sync prim (%s)\n",
+ __func__,
+ PVRSRVGetErrorStringKM(err));
+ goto err_free_append_data;
+ }
+ list_add(&cleanup_sync->cleanup_list,
+ &sync_kernel->cleanup_sync_list);
+ sync_kernel->current_cleanup_sync = cleanup_sync;
+ points_on_fence++;
+ }
+
+ if (has_foreign_point)
+ points_on_fence++;
+
+ /* Each point has 1 check value, and 1 update value (for the
+ * cleanup fence).
+ */
+ sync_data->nr_checks += points_on_fence;
+ sync_data->nr_updates += points_on_fence;
+ sync_data->nr_cleanup_syncs += points_on_fence;
+ }
+
+ if (update_point) {
+ /* A fence update requires 2 update values (fence and timeline)
+ */
+ sync_data->nr_updates += 2;
+ }
+
+ if (sync_data->nr_updates > 0) {
+ sync_data->update_ufo_addresses =
+ kzalloc(sizeof(*sync_data->update_ufo_addresses) *
+ sync_data->nr_updates, GFP_KERNEL);
+ if (!sync_data->update_ufo_addresses) {
+ pr_err("pvr_sync: %s: Failed to allocate update UFO address list\n",
+ __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free_fence;
+ }
+
+ sync_data->update_values =
+ kzalloc(sizeof(*sync_data->update_values) *
+ sync_data->nr_updates, GFP_KERNEL);
+ if (!sync_data->update_values) {
+ pr_err("pvr_sync: %s: Failed to allocate update value list\n",
+ __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free_fence;
+ }
+ }
+
+ if (sync_data->nr_checks > 0) {
+
+ sync_data->check_ufo_addresses =
+ kzalloc(sizeof(*sync_data->check_ufo_addresses) *
+ sync_data->nr_checks, GFP_KERNEL);
+ if (!sync_data->check_ufo_addresses) {
+ pr_err("pvr_sync: %s: Failed to allocate check UFO address list\n",
+ __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free_fence;
+ }
+
+ sync_data->check_values =
+ kzalloc(sizeof(*sync_data->check_values) *
+ sync_data->nr_checks, GFP_KERNEL);
+ if (!sync_data->check_values) {
+ pr_err("pvr_sync: %s: Failed to allocate check value list\n",
+ __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free_fence;
+ }
+ }
+
+ if (sync_data->nr_cleanup_syncs > 0) {
+ sync_data->cleanup_syncs =
+ kzalloc(sizeof(*sync_data->cleanup_syncs) *
+ sync_data->nr_cleanup_syncs, GFP_KERNEL);
+ if (!sync_data->cleanup_syncs) {
+ pr_err("pvr_sync: %s: Failed to allocate cleanup rollback list\n",
+ __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free_fence;
+ }
+ }
+
+ update_address_pos = sync_data->update_ufo_addresses;
+ update_value_pos = sync_data->update_values;
+ check_address_pos = sync_data->check_ufo_addresses;
+ check_value_pos = sync_data->check_values;
+ cleanup_sync_pos = sync_data->cleanup_syncs;
+
+ /* Everything should be allocated/sanity checked. No errors are
+ * possible after this point.
+ */
+
+ /* Append any check syncs */
+ if (sync_data->check_fence) {
+ struct sync_fence *fence = sync_data->check_fence;
+ bool has_foreign_point = false;
+ struct sync_pt *sync_pt;
+ int j;
+
+ (void)j;
+ for_each_sync_pt(sync_pt, fence, j) {
+ struct pvr_sync_pt *pvr_pt;
+ struct pvr_sync_kernel_pair *sync_kernel;
+
+ if (!is_pvr_timeline_pt(sync_pt)) {
+ if (!sync_pt_get_status(sync_pt))
+ has_foreign_point = true;
+ continue;
+ }
+
+ pvr_pt = (struct pvr_sync_pt *)sync_pt;
+ sync_kernel = pvr_pt->sync_data->kernel;
+
+ if (!sync_kernel ||
+ is_sync_met(sync_kernel->fence_sync)) {
+ continue;
+ }
+
+ (*check_address_pos++).ui32Addr =
+ sync_kernel->fence_sync->vaddr;
+ *check_value_pos++ =
+ sync_kernel->fence_sync->next_value;
+
+ (*update_address_pos++).ui32Addr =
+ sync_kernel->current_cleanup_sync->vaddr;
+ *update_value_pos++ =
+ ++sync_kernel->current_cleanup_sync->next_value;
+ *cleanup_sync_pos++ = sync_kernel->current_cleanup_sync;
+
+ sync_kernel->current_cleanup_sync = NULL;
+ }
+
+ if (has_foreign_point) {
+ struct pvr_sync_kernel_pair *foreign_sync_kernel =
+ pvr_sync_create_waiter_for_foreign_sync(
+ check_fence_fd);
+
+ if (foreign_sync_kernel) {
+ struct pvr_sync_native_sync_prim *fence_sync =
+ foreign_sync_kernel->fence_sync;
+ struct pvr_sync_native_sync_prim *cleanup_sync =
+ foreign_sync_kernel->
+ current_cleanup_sync;
+
+ (*check_address_pos++).ui32Addr =
+ fence_sync->vaddr;
+ *check_value_pos++ =
+ fence_sync->next_value;
+
+ (*update_address_pos++).ui32Addr =
+ cleanup_sync->vaddr;
+ *update_value_pos++ =
+ ++cleanup_sync->next_value;
+ *cleanup_sync_pos++ = cleanup_sync;
+ foreign_sync_kernel->current_cleanup_sync =
+ NULL;
+ }
+ }
+ }
+
+ /* Append the update sync (if requested) */
+ if (update_point) {
+ struct pvr_sync_data *sync_data =
+ update_point->sync_data;
+ struct pvr_sync_kernel_pair *sync_kernel =
+ sync_data->kernel;
+
+ (*update_address_pos++).ui32Addr =
+ sync_kernel->fence_sync->vaddr;
+ *update_value_pos++ =
+ sync_kernel->fence_sync->next_value;
+
+ (*update_address_pos++).ui32Addr =
+ timeline->kernel->fence_sync->vaddr;
+
+ /* Copy in the timeline next value (which was incremented
+ * when this point was created).
+ */
+ sync_data->timeline_update_value =
+ timeline->kernel->fence_sync->next_value;
+
+ /* ...and set that to be updated when this kick is completed */
+ *update_value_pos++ =
+ sync_data->timeline_update_value;
+ }
+
+ /* We count the total number of sync points we attach, as it's possible
+ * some have become complete since the first loop through, or a waiter
+ * for a foreign point skipped (But they can never become un-complete,
+ * so it will only ever be the same or less, so the allocated arrays
+ * should still be sufficiently sized).
+ */
+ num_used_sync_updates =
+ update_address_pos - sync_data->update_ufo_addresses;
+ num_used_sync_checks =
+ check_address_pos - sync_data->check_ufo_addresses;
+
+ sync_data->nr_checks = nr_checks + num_used_sync_checks;
+ sync_data->nr_updates = nr_updates + num_used_sync_updates;
+
+ /* Append original check and update sync values/addresses */
+ if (update_ufo_addresses)
+ memcpy(update_address_pos, update_ufo_addresses,
+ sizeof(*update_ufo_addresses) * nr_updates);
+ if (update_values)
+ memcpy(update_value_pos, update_values,
+ sizeof(*update_values) * nr_updates);
+
+ if (check_ufo_addresses)
+ memcpy(check_address_pos, check_ufo_addresses,
+ sizeof(*check_ufo_addresses) * nr_checks);
+ if (check_values)
+ memcpy(check_value_pos, check_values,
+ sizeof(*check_values) * nr_checks);
+
+ *append_sync_data = sync_data;
+
+ return PVRSRV_OK;
+
+err_free_fence:
+ if (update_point) {
+ /* First rollback the taken operations */
+ timeline->kernel->fence_sync->next_value--;
+ update_point->sync_data->kernel->fence_sync->next_value--;
+ }
+err_free_append_data:
+ pvr_sync_free_append_fences_data(sync_data);
+err_out:
+ return err;
+}
+
+void pvr_sync_get_updates(const struct pvr_sync_append_data *sync_data,
+ u32 *nr_fences, struct _RGXFWIF_DEV_VIRTADDR_ **ufo_addrs, u32 **values)
+{
+ *nr_fences = sync_data->nr_updates;
+ *ufo_addrs = sync_data->update_ufo_addresses;
+ *values = sync_data->update_values;
+}
+
+void pvr_sync_get_checks(const struct pvr_sync_append_data *sync_data,
+ u32 *nr_fences, struct _RGXFWIF_DEV_VIRTADDR_ **ufo_addrs, u32 **values)
+{
+ *nr_fences = sync_data->nr_checks;
+ *ufo_addrs = sync_data->check_ufo_addresses;
+ *values = sync_data->check_values;
+}
+
+void pvr_sync_rollback_append_fences(struct pvr_sync_append_data *sync_data)
+{
+ u32 i;
+
+ if (!sync_data)
+ return;
+
+ for (i = 0; i < sync_data->nr_cleanup_syncs; i++) {
+ struct pvr_sync_native_sync_prim *cleanup_sync =
+ sync_data->cleanup_syncs[i];
+
+ /* If this cleanup was called on a partially-created data set
+ * it's possible to have NULL cleanup sync pointers.
+ */
+ if (!cleanup_sync)
+ continue;
+ cleanup_sync->next_value--;
+ }
+
+ /* If there was an update, rollback the next values taken on the
+ * fence and timeline. This must be done before the sync_fence_put()
+ * as that may free the corresponding fence.
+ */
+
+ if (sync_data->update_sync) {
+ BUG_ON(sync_data->update_sync->next_value != 1);
+ sync_data->update_sync->next_value = 0;
+ sync_data->update_sync = NULL;
+ }
+
+ if (sync_data->update_timeline_sync) {
+ BUG_ON(sync_data->update_timeline_sync->next_value == 0);
+ sync_data->update_timeline_sync->next_value--;
+ sync_data->update_timeline_sync = NULL;
+ }
+}
+
+int pvr_sync_get_update_fd(struct pvr_sync_append_data *sync_data)
+{
+ int fd = -EINVAL;
+
+ if (!sync_data || !sync_data->update_fence ||
+ sync_data->update_fence_fd < 0)
+ goto err_out;
+
+ fd = sync_data->update_fence_fd;
+ sync_data->update_fence_fd = -1;
+
+ sync_fence_install(sync_data->update_fence, fd);
+
+ /* Note: It is invalid for an FD to have been installed on the update
+ * fence then fput called - as this would leave a dangling reference
+ * in the FD table. Set it to NULL so the free_append_fences_data()
+ * call doesn't fput it.
+ */
+ sync_data->update_fence = NULL;
+
+err_out:
+ return fd;
+}
+
+void pvr_sync_free_append_fences_data(struct pvr_sync_append_data *sync_data)
+{
+ if (!sync_data)
+ return;
+
+ if (sync_data->check_fence)
+ sync_fence_put(sync_data->check_fence);
+
+ if (sync_data->update_fence)
+ sync_fence_put(sync_data->update_fence);
+
+ if (sync_data->update_fence_fd >= 0)
+ put_unused_fd(sync_data->update_fence_fd);
+
+ kfree(sync_data->update_ufo_addresses);
+ kfree(sync_data->update_values);
+ kfree(sync_data->check_ufo_addresses);
+ kfree(sync_data->check_values);
+ kfree(sync_data->cleanup_syncs);
+ kfree(sync_data);
+}
+
+void pvr_sync_nohw_complete_fences(struct pvr_sync_append_data *sync_data)
+{
+ u32 i;
+
+ if (!sync_data)
+ return;
+
+ for (i = 0; i < sync_data->nr_cleanup_syncs; i++) {
+ struct pvr_sync_native_sync_prim *cleanup_sync =
+ sync_data->cleanup_syncs[i];
+
+ if (!cleanup_sync)
+ continue;
+
+ complete_sync(cleanup_sync);
+ }
+
+ if (sync_data->update_sync)
+ complete_sync(sync_data->update_sync);
+ if (sync_data->update_timeline_sync)
+ complete_sync(sync_data->update_timeline_sync);
+
+ pvr_sync_update_all_timelines(NULL);
+}
+
+/* ioctl and fops handling */
+
+static int pvr_sync_open(struct inode *inode, struct file *file)
+{
+ struct pvr_sync_timeline_wrapper *timeline_wrapper;
+ struct pvr_sync_timeline *timeline;
+ char task_comm[TASK_COMM_LEN];
+ enum PVRSRV_ERROR error;
+ int err = -ENOMEM;
+
+ get_task_comm(task_comm, current);
+
+ timeline_wrapper = (struct pvr_sync_timeline_wrapper *)
+ sync_timeline_create(&pvr_sync_timeline_ops,
+ sizeof(*timeline_wrapper), task_comm);
+ if (!timeline_wrapper) {
+ pr_err("pvr_sync: %s: sync_timeline_create failed\n", __func__);
+ goto err_out;
+ }
+
+ timeline = kmalloc(sizeof(*timeline), GFP_KERNEL);
+ if (!timeline) {
+ pr_err("pvr_sync: %s: Out of memory\n", __func__);
+ goto err_free_timeline_wrapper;
+ }
+
+ timeline->kernel = kzalloc(sizeof(*timeline->kernel),
+ GFP_KERNEL);
+ if (!timeline->kernel) {
+ pr_err("pvr_sync: %s: Out of memory\n", __func__);
+ goto err_free_timeline;
+ }
+
+ INIT_LIST_HEAD(&timeline->kernel->cleanup_sync_list);
+
+ OSAcquireBridgeLock();
+ error = sync_pool_get(&timeline->kernel->fence_sync,
+ task_comm, SYNC_TL_TYPE);
+ OSReleaseBridgeLock();
+
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ goto err_free_timeline_kernel;
+ }
+
+ timeline_wrapper->timeline = timeline;
+
+ timeline->obj = &timeline_wrapper->obj;
+ kref_init(&timeline->kref);
+
+ mutex_lock(&timeline_list_mutex);
+ list_add_tail(&timeline->list, &timeline_list);
+ mutex_unlock(&timeline_list_mutex);
+
+ DPF("%s: # %s", __func__, debug_info_timeline(timeline));
+
+ file->private_data = timeline_wrapper;
+ err = 0;
+err_out:
+ return err;
+
+err_free_timeline_kernel:
+ kfree(timeline->kernel);
+err_free_timeline:
+ kfree(timeline);
+
+ /* Use a NULL timeline to detect this partially-setup timeline in the
+ * timeline release function (called by sync_timeline_destroy) and
+ * handle it appropriately.
+ */
+ timeline_wrapper->timeline = NULL;
+err_free_timeline_wrapper:
+ sync_timeline_destroy(&timeline_wrapper->obj);
+ goto err_out;
+}
+
+static int pvr_sync_close(struct inode *inode, struct file *file)
+{
+ struct sync_timeline *obj = file->private_data;
+
+ if (is_pvr_timeline(obj)) {
+ DPF("%s: # %s", __func__,
+ debug_info_timeline(get_timeline(obj)));
+ }
+
+ sync_timeline_destroy(obj);
+ return 0;
+}
+
+static long pvr_sync_ioctl_rename(struct pvr_sync_timeline *timeline,
+ void __user *user_data)
+{
+ int err = 0;
+ struct pvr_sync_rename_ioctl_data data;
+
+ if (!access_ok(VERIFY_READ, user_data, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ if (copy_from_user(&data, user_data, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ data.szName[sizeof(data.szName) - 1] = '\0';
+ strlcpy(timeline->obj->name, data.szName, sizeof(timeline->obj->name));
+
+ mutex_lock(&sync_pool_mutex);
+ strlcpy(timeline->kernel->fence_sync->class, data.szName,
+ sizeof(timeline->kernel->fence_sync->class));
+ mutex_unlock(&sync_pool_mutex);
+err:
+ return err;
+}
+
+#ifndef CONFIG_SW_SYNC_USER
+
+static long pvr_sync_ioctl_force_sw_only(struct pvr_sync_timeline *timeline,
+ void **private_data)
+{
+ struct sw_sync_timeline *sw_sync_timeline;
+
+ /* We can only convert an empty GPU timeline */
+ if (timeline->kernel->fence_sync->next_value)
+ return -EFAULT;
+
+ /* Create a sw_sync timeline with the old GPU timeline's name */
+ sw_sync_timeline = sw_sync_timeline_create(timeline->obj->name);
+ if (!sw_sync_timeline)
+ return -ENOMEM;
+
+ /* Destroy the old GPU timeline and update the struct file */
+ DPF("%s: # %s", __func__, debug_info_timeline(timeline));
+
+ sync_timeline_destroy(timeline->obj);
+ *private_data = sw_sync_timeline;
+ return 0;
+}
+
+static long pvr_sync_ioctl_sw_create_fence(struct sw_sync_timeline *timeline,
+ void __user *user_data)
+{
+ struct sw_sync_create_fence_data data;
+ struct sync_fence *fence;
+ int fd = get_unused_fd();
+ struct sync_pt *sync_pt;
+ int err = -EFAULT;
+
+ if (fd < 0) {
+ pr_err("pvr_sync: %s: Failed to find unused fd (%d)\n",
+ __func__, fd);
+ goto err_out;
+ }
+
+ if (copy_from_user(&data, user_data, sizeof(data)))
+ goto err_put_fd;
+
+ sync_pt = sw_sync_pt_create(timeline, data.value);
+ if (!sync_pt) {
+ pr_err("pvr_sync: %s: Failed to create a sync point (%d)\n",
+ __func__, fd);
+ err = -ENOMEM;
+ goto err_put_fd;
+ }
+
+ data.name[sizeof(data.name) - 1] = '\0';
+#if defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
+ fence = sync_fence_create(data.name, &sync_pt->base);
+#else
+ fence = sync_fence_create(data.name, sync_pt);
+#endif
+ if (!fence) {
+ pr_err("pvr_sync: %s: Failed to create a fence (%d)\n",
+ __func__, fd);
+ sync_pt_free(sync_pt);
+ err = -ENOMEM;
+ goto err_put_fd;
+ }
+
+ data.fence = fd;
+
+ if (copy_to_user(user_data, &data, sizeof(data)))
+ goto err_put_fence;
+
+ sync_fence_install(fence, fd);
+ err = 0;
+err_out:
+ return err;
+err_put_fence:
+ sync_fence_put(fence);
+err_put_fd:
+ put_unused_fd(fd);
+ goto err_out;
+}
+
+static long pvr_sync_ioctl_sw_inc(struct sw_sync_timeline *timeline,
+ void __user *user_data)
+{
+ u32 value;
+
+ if (copy_from_user(&value, user_data, sizeof(value)))
+ return -EFAULT;
+
+ sw_sync_timeline_inc(timeline, value);
+ return 0;
+}
+
+#endif /* !CONFIG_SW_SYNC_USER */
+
+static long
+pvr_sync_ioctl(struct file *file, unsigned int cmd, unsigned long __user arg)
+{
+ struct sync_timeline *obj = file->private_data;
+ void __user *user_data = (void __user *)arg;
+ long err = -ENOTTY;
+
+ if (is_pvr_timeline(obj)) {
+ struct pvr_sync_timeline *pvr = get_timeline(obj);
+
+ switch (cmd) {
+ case PVR_SYNC_IOC_RENAME:
+ err = pvr_sync_ioctl_rename(pvr, user_data);
+ break;
+#ifndef CONFIG_SW_SYNC_USER
+ case PVR_SYNC_IOC_FORCE_SW_ONLY:
+ err = pvr_sync_ioctl_force_sw_only(pvr,
+ &file->private_data);
+ break;
+#endif /* !CONFIG_SW_SYNC_USER */
+ default:
+ break;
+ }
+ } else {
+#ifndef CONFIG_SW_SYNC_USER
+ struct sw_sync_timeline *sw = file->private_data;
+
+ switch (cmd) {
+ case SW_SYNC_IOC_CREATE_FENCE:
+ err = pvr_sync_ioctl_sw_create_fence(sw, user_data);
+ break;
+ case SW_SYNC_IOC_INC:
+ err = pvr_sync_ioctl_sw_inc(sw, user_data);
+ break;
+ default:
+ break;
+ }
+#endif /* !CONFIG_SW_SYNC_USER */
+ }
+
+ return err;
+}
+
+static void
+pvr_sync_check_status_work_queue_function(struct work_struct *data)
+{
+ /* A completed SW operation may un-block the GPU */
+ PVRSRVCheckStatus(NULL);
+}
+
+/* Returns true if the freelist still has entries, else false if empty */
+static bool
+pvr_sync_clean_freelist(void)
+{
+ struct pvr_sync_kernel_pair *kernel, *k;
+ struct pvr_sync_fence *sync_fence, *f;
+ LIST_HEAD(unlocked_free_list);
+ unsigned long flags;
+ bool freelist_empty;
+
+ /* We can't call PVRSRVServerSyncFreeKM directly in this loop because
+ * that will take the mmap mutex. We can't take mutexes while we have
+ * this list locked with a spinlock. So move all the items we want to
+ * free to another, local list (no locking required) and process it
+ * in a second loop.
+ */
+
+ spin_lock_irqsave(&sync_prim_free_list_spinlock, flags);
+ list_for_each_entry_safe(kernel, k, &sync_prim_free_list, list) {
+ bool in_use = false;
+ struct list_head *pos;
+
+ /* Check if this sync is not used anymore. */
+ if (!is_sync_met(kernel->fence_sync))
+ continue;
+ list_for_each(pos, &kernel->cleanup_sync_list) {
+ struct pvr_sync_native_sync_prim *cleanup_sync =
+ list_entry(pos,
+ struct pvr_sync_native_sync_prim,
+ cleanup_list);
+
+ if (!is_sync_met(cleanup_sync)) {
+ in_use = true;
+ break;
+ }
+ }
+
+ if (in_use)
+ continue;
+
+ /* Remove the entry from the free list. */
+ list_move_tail(&kernel->list, &unlocked_free_list);
+ }
+
+ /* Wait and loop if there are still syncs on the free list (IE
+ * are still in use by the HW).
+ */
+ freelist_empty = list_empty(&sync_prim_free_list);
+
+ spin_unlock_irqrestore(&sync_prim_free_list_spinlock, flags);
+
+ OSAcquireBridgeLock();
+
+ list_for_each_entry_safe(kernel, k, &unlocked_free_list, list) {
+ struct list_head *pos, *n;
+
+ list_del(&kernel->list);
+
+ sync_pool_put(kernel->fence_sync);
+
+ list_for_each_safe(pos, n, &kernel->cleanup_sync_list) {
+ struct pvr_sync_native_sync_prim *cleanup_sync =
+ list_entry(pos,
+ struct pvr_sync_native_sync_prim,
+ cleanup_list);
+ list_del(&cleanup_sync->cleanup_list);
+ sync_pool_put(cleanup_sync);
+ }
+ kfree(kernel);
+ }
+
+ OSReleaseBridgeLock();
+
+ /* sync_fence_put() must be called from process/WQ context
+ * because it uses fput(), which is not allowed to be called
+ * from interrupt context in kernels <3.6.
+ */
+ INIT_LIST_HEAD(&unlocked_free_list);
+
+ spin_lock_irqsave(&sync_fence_put_list_spinlock, flags);
+ list_for_each_entry_safe(sync_fence, f, &sync_fence_put_list, list) {
+ list_move_tail(&sync_fence->list, &unlocked_free_list);
+ }
+ spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags);
+
+ list_for_each_entry_safe(sync_fence, f, &unlocked_free_list, list) {
+ list_del(&sync_fence->list);
+ sync_fence_put(sync_fence->fence);
+ kfree(sync_fence);
+ }
+
+ return !freelist_empty;
+}
+
+static void
+pvr_sync_defer_free_work_queue_function(struct work_struct *data)
+{
+ enum PVRSRV_ERROR error = PVRSRV_OK;
+ void *event_object;
+
+ error = OSEventObjectOpen(pvr_sync_data.event_object_handle,
+ &event_object);
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Error opening event object (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ return;
+
+ }
+
+ while (pvr_sync_clean_freelist()) {
+
+ error = OSEventObjectWait(event_object);
+
+ switch (error) {
+
+ case PVRSRV_OK:
+ case PVRSRV_ERROR_TIMEOUT:
+ /* Timeout is normal behaviour */
+ continue;
+ default:
+ pr_err("pvr_sync: %s: Error waiting for event object (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ break;
+ }
+ }
+ error = OSEventObjectClose(event_object);
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Error closing event object (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ }
+}
+
+static const struct file_operations pvr_sync_fops = {
+ .owner = THIS_MODULE,
+ .open = pvr_sync_open,
+ .release = pvr_sync_close,
+ .unlocked_ioctl = pvr_sync_ioctl,
+ .compat_ioctl = pvr_sync_ioctl,
+};
+
+static struct miscdevice pvr_sync_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = PVRSYNC_MODNAME,
+ .fops = &pvr_sync_fops,
+};
+
+static
+void pvr_sync_update_all_timelines(void *command_complete_handle)
+{
+ struct pvr_sync_timeline *timeline, *n;
+
+ mutex_lock(&timeline_list_mutex);
+
+ list_for_each_entry(timeline, &timeline_list, list) {
+ /* If a timeline is destroyed via pvr_sync_release_timeline()
+ * in parallel with a call to pvr_sync_update_all_timelines(),
+ * the timeline_list_mutex will block destruction of the
+ * 'timeline' pointer. Use kref_get_unless_zero() to detect
+ * and handle this race. Skip the timeline if it's being
+ * destroyed, blocked only on the timeline_list_mutex.
+ */
+ timeline->valid =
+ kref_get_unless_zero(&timeline->kref) ? true : false;
+ }
+
+ list_for_each_entry_safe(timeline, n, &timeline_list, list) {
+ /* We know timeline is valid at this point because we're
+ * holding the list lock (so pvr_sync_destroy_timeline() has
+ * to wait).
+ */
+ void *obj = timeline->obj;
+
+ /* If we're racing with pvr_sync_release_timeline(), ignore */
+ if (!timeline->valid)
+ continue;
+
+ /* If syncs have signaled on the GPU, echo this in pvr_sync.
+ *
+ * At this point we know the timeline is valid, but obj might
+ * have raced and been set to NULL. It's only important that
+ * we use NULL / non-NULL consistently with the if() and call
+ * to sync_timeline_signal() -- the timeline->obj can't be
+ * freed (pvr_sync_release_timeline() will be stuck waiting
+ * for the timeline_list_mutex) but it might have been made
+ * invalid by the base sync driver, in which case this call
+ * will bounce harmlessly.
+ */
+ if (obj)
+ sync_timeline_signal(obj);
+
+ /* We're already holding the timeline_list_mutex */
+ kref_put(&timeline->kref, pvr_sync_destroy_timeline_locked);
+ }
+
+ mutex_unlock(&timeline_list_mutex);
+}
+
+enum PVRSRV_ERROR pvr_sync_init(void *device_cookie)
+{
+ enum PVRSRV_ERROR error;
+ int err;
+
+ DPF("%s", __func__);
+
+ atomic_set(&pvr_sync_data.sync_id, 0);
+
+ error = PVRSRVAcquireGlobalEventObjectKM(
+ &pvr_sync_data.event_object_handle);
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Failed to acquire global event object (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ goto err_out;
+ }
+
+ OSAcquireBridgeLock();
+
+ error = SyncPrimContextCreate(device_cookie,
+ &pvr_sync_data.sync_prim_context);
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Failed to create sync prim context (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ OSReleaseBridgeLock();
+ goto err_release_event_object;
+ }
+
+ OSReleaseBridgeLock();
+
+ pvr_sync_data.defer_free_wq =
+ create_freezable_workqueue("pvr_sync_defer_free_workqueue");
+ if (!pvr_sync_data.defer_free_wq) {
+ pr_err("pvr_sync: %s: Failed to create pvr_sync defer_free workqueue\n",
+ __func__);
+ goto err_free_sync_context;
+ }
+
+ INIT_WORK(&pvr_sync_data.defer_free_work,
+ pvr_sync_defer_free_work_queue_function);
+
+ pvr_sync_data.check_status_wq =
+ create_freezable_workqueue("pvr_sync_check_status_workqueue");
+ if (!pvr_sync_data.check_status_wq) {
+ pr_err("pvr_sync: %s: Failed to create pvr_sync check_status workqueue\n",
+ __func__);
+ goto err_destroy_defer_free_wq;
+ }
+
+ INIT_WORK(&pvr_sync_data.check_status_work,
+ pvr_sync_check_status_work_queue_function);
+ error = PVRSRVRegisterCmdCompleteNotify(
+ &pvr_sync_data.command_complete_handle,
+ &pvr_sync_update_all_timelines,
+ &device_cookie);
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Failed to register MISR notification (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ goto err_destroy_status_wq;
+ }
+
+ error = PVRSRVRegisterDbgRequestNotify(
+ &pvr_sync_data.debug_notify_handle,
+ device_cookie,
+ pvr_sync_debug_request,
+ DEBUG_REQUEST_ANDROIDSYNC,
+ NULL);
+ if (error != PVRSRV_OK) {
+ pr_err("pvr_sync: %s: Failed to register debug notifier (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ goto err_unregister_cmd_complete;
+ }
+
+ err = misc_register(&pvr_sync_device);
+ if (err) {
+ pr_err("pvr_sync: %s: Failed to register pvr_sync device (%d)\n",
+ __func__, err);
+ error = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ goto err_unregister_dbg;
+ }
+
+ error = PVRSRV_OK;
+ return error;
+
+err_unregister_dbg:
+ PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.debug_notify_handle);
+err_unregister_cmd_complete:
+ PVRSRVUnregisterCmdCompleteNotify(
+ pvr_sync_data.command_complete_handle);
+err_destroy_status_wq:
+ destroy_workqueue(pvr_sync_data.check_status_wq);
+err_destroy_defer_free_wq:
+ destroy_workqueue(pvr_sync_data.defer_free_wq);
+err_free_sync_context:
+ OSAcquireBridgeLock();
+ SyncPrimContextDestroy(pvr_sync_data.sync_prim_context);
+ OSReleaseBridgeLock();
+err_release_event_object:
+ PVRSRVReleaseGlobalEventObjectKM(pvr_sync_data.event_object_handle);
+err_out:
+
+ return error;
+}
+
+void pvr_sync_deinit(void)
+{
+ DPF("%s", __func__);
+
+ misc_deregister(&pvr_sync_device);
+
+ PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.debug_notify_handle);
+
+ PVRSRVUnregisterCmdCompleteNotify(
+ pvr_sync_data.command_complete_handle);
+
+ /* This will drain the workqueue, so we guarantee that all deferred
+ * syncs are free'd before returning.
+ */
+ destroy_workqueue(pvr_sync_data.defer_free_wq);
+ destroy_workqueue(pvr_sync_data.check_status_wq);
+
+ OSAcquireBridgeLock();
+
+ sync_pool_clear();
+
+ SyncPrimContextDestroy(pvr_sync_data.sync_prim_context);
+
+ OSReleaseBridgeLock();
+
+ PVRSRVReleaseGlobalEventObjectKM(pvr_sync_data.event_object_handle);
+}
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File pvr_sync.h
+@Title Kernel driver for Android's sync mechanism
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_SYNC_H
+#define _PVR_SYNC_H
+
+#include "pvr_fd_sync_kernel.h"
+
+/* Services internal interface */
+enum PVRSRV_ERROR pvr_sync_init(void *device_cookie);
+void pvr_sync_deinit(void);
+
+struct _RGXFWIF_DEV_VIRTADDR_;
+struct pvr_sync_append_data;
+
+enum PVRSRV_ERROR
+pvr_sync_append_fences(
+ const char *name,
+ const s32 check_fence_fd,
+ const s32 update_timeline_fd,
+ const u32 nr_updates,
+ const struct _RGXFWIF_DEV_VIRTADDR_ *update_ufo_addresses,
+ const u32 *update_values,
+ const u32 nr_checks,
+ const struct _RGXFWIF_DEV_VIRTADDR_ *check_ufo_addresses,
+ const u32 *check_values,
+ struct pvr_sync_append_data **append_sync_data);
+
+void pvr_sync_get_updates(const struct pvr_sync_append_data *sync_data,
+ u32 *nr_fences,
+ struct _RGXFWIF_DEV_VIRTADDR_ **ufo_addrs,
+ u32 **values);
+void pvr_sync_get_checks(const struct pvr_sync_append_data *sync_data,
+ u32 *nr_fences,
+ struct _RGXFWIF_DEV_VIRTADDR_ **ufo_addrs,
+ u32 **values);
+
+void pvr_sync_rollback_append_fences(struct pvr_sync_append_data *sync_data);
+void pvr_sync_nohw_complete_fences(struct pvr_sync_append_data *sync_data);
+void pvr_sync_free_append_fences_data(struct pvr_sync_append_data *sync_data);
+int pvr_sync_get_update_fd(struct pvr_sync_append_data *sync_data);
+
+#endif /* _PVR_SYNC_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Version numbers and strings.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Version numbers and strings for PVR Consumer services
+ components.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVRVERSION_H_
+#define _PVRVERSION_H_
+
+/*
+ * Rogue KM Version Note
+ *
+ * L 1.17:
+ * Support gpu disable dvfs case.
+ * Add rk_tf_check_version to compatible for rk3328.
+ * L 1.18:
+ * If fix freq,then don't force to drop freq to the lowest.
+ *
+ * M 1.21:
+ * Merge 1.5_RTM3604260 DDK code.
+ * M 1.24:
+ * Merge 1.5_ED3653583 DDK code.
+ * M 1.28:
+ * Merge 1.5_ED3776568 DDK code.
+ * M 1.29
+ * 1. Reopen bEnableRDPowIsland since it doesn't appear splash screen when click the drawerbutton.
+ * 2. Don't set PVR_ANDROID_HAS_SET_BUFFERS_DATASPACE by default.
+ * 3. Remove hGPUUtilLock to avoid dead lock.
+ * 4. Get raw ion_device by IonDevAcquire.
+ * M 1.31
+ * 1. Merge 1.5_ED3830101 DDK code.
+ * M 1.31+
+ * 1. Let Rogue M support kernel 4.4.
+ * 2. Close OPEN_GPU_PD temporarily.
+ * M 2.00
+ * Init 1.6_ED3859696 DDK code.
+ * M 2.01
+ * 1. Merge 1.6_ED3861161 DDK code
+ * 2. Add GPU dvfs support.
+ * 3. Adjust the code indentation.
+ * 4. Add GPU pd support.
+ * 5. Disable RD power island.
+ * M 3.00
+ * 1. Merge 1.7_ED3904583 DDK code.
+ * 2. Add support for kernel 3.10.
+ * 3. Fix some compile error on DDK 1.7.
+ * 4. Fix some running errors on DDK 1.7.
+ * M 3.01
+ * 1. Merge 1.7_ED3957769 DDK code.
+ * 2. Fix compile error.
+ * 3. Fix USE_CLANG bug in preconfig.mk of km module.
+ * 4. Fix bug of missing of brackets.
+ * 5. Adjust order to judge Rogue gpu.
+ * N 4.00
+ * 1. Merge 1.7_Beta_4200570 DDK code.
+ * 2. Support for android n.
+ * 3. Fix rk dvfs bug.
+ * 4. Adjust code style for rk init.
+ * N 4.01
+ * 1. Fix rk_init compile error.
+ * N 4.02
+ * 1. Merge 1.7_ED4215145 DDK code.
+ * 2. Merge 1.7_ED4239735 DDK code.
+ * 3. Merge 1.7_ED4276001 DDK code.
+ * N 5.00
+ * Merge 1.8_ED4302432 DDK code.
+ * N 5.01
+ * 1. Add RK33_DVFS_MODE support.
+ * 2. Spinlock should not use for rk33_dvfs_set_clock,
+ * since it will sleep if mutex cann't be got.
+ * N 5.02
+ * 1. Merge 1.8_Beta_4490825 DDK code.
+ * 2. Add new support for kernel 4.4.
+ * 3. Close RK_TF_VERSION.
+ * 4. Remove dependence of rockchip_ion_dev.
+ * 5. Fix show freq bug on kernel 4.4.
+ * 6. Fix gpu dvfs bug.
+ * N 5.03
+ * 1. Enable PVR_DVFS for devfreq framework.
+ * 2. Remove some unneed code for devfreq.
+ * N 5.04
+ * Merge 1.8_ED4610191 DDK code.
+ * N 5.05
+ * 1. If freq is equal,but voltage is changed,we also set the new voltage.
+ * 2. Only give a warnning when initialize simple power model failed.
+ */
+
+#define PVR_STR(X) #X
+#define PVR_STR2(X) PVR_STR(X)
+
+#define PVRVERSION_MAJ 1
+#define PVRVERSION_MIN 8
+
+#define PVRVERSION_FAMILY "rogueddk"
+#define PVRVERSION_BRANCHNAME "1.8.RTM"
+#define PVRVERSION_BUILD 4610191
+#define PVRVERSION_BSCONTROL "Rogue_DDK_Android"
+
+#define PVRVERSION_STRING "Rogue_DDK_Android rogueddk 1.8.RTM@" PVR_STR2(PVRVERSION_BUILD)
+#define PVRVERSION_STRING_SHORT "1.8@" PVR_STR2(PVRVERSION_BUILD) " (1.8.RTM)"
+
+#define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved."
+
+#define PVRVERSION_BUILD_HI 461
+#define PVRVERSION_BUILD_LO 191
+#define PVRVERSION_STRING_NUMERIC PVR_STR2(PVRVERSION_MAJ) "." PVR_STR2(PVRVERSION_MIN) "." PVR_STR2(PVRVERSION_BUILD_HI) "." PVR_STR2(PVRVERSION_BUILD_LO)
+
+#define PVRVERSION_PACK(MAJ,MIN) ((((MAJ)&0xFFFF) << 16) | (((MIN)&0xFFFF) << 0))
+#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16) & 0xFFFF)
+#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0) & 0xFFFF)
+
+//chenli:define rockchip version
+#define RKVERSION "KM N 5.05"
+#endif /* _PVRVERSION_H_ */
--- /dev/null
+config POWERVR_ROGUE_N
+ tristate "PowerVR Rogue"
+ default n
+ depends on DRM
+ help
+ Driver for PowerVR Rogue graphics hardware.
+
+ Say Y here if your SoC contains a PowerVR Rogue GPU. For more
+ information, see <http://www.imgtec.com/powervr/>.
+
+config POWERVR_ROGUE_PDUMP
+ bool "Parameter dumping"
+ default n
+ depends on POWERVR_ROGUE_N
+
+config POWERVR_ROGUE_RESOURCE_INFO
+ bool "Resource info"
+ default n
+ depends on POWERVR_ROGUE_N
+ help
+ Annotate device memory allocations with human-readable names.
+ Requires a compatible userspace driver.
+
+config POWERVR_ROGUE_DEVICEMEM_HISTORY
+ bool "Device memory history"
+ default n
+ depends on POWERVR_ROGUE_N
+ help
+ Provides a debugfs file containing a list of recent memory
+ allocations for debugging purposes. Requires a compatible
+ userspace driver.
--- /dev/null
+ccflags-y += -include $(srctree)/drivers/staging/imgtec/config_kernel.h \
+ -I$(srctree)/drivers/staging/imgtec \
+ -I$(srctree)/$(src) \
+ -I$(srctree)/$(src)/hwdefs \
+ -I$(srctree)/$(src)/hwdefs/km \
+ -I$(srctree)/$(src)/rk3368
+ccflags-$(CONFIG_X86) += -mno-soft-float
+
+ccflags-$(CONFIG_POWERVR_APOLLO:m=y) += -I$(srctree)/drivers/staging/imgtec/apollo
+pvrsrvkm-$(CONFIG_POWERVR_APOLLO:m=y) += ../apollo/sysconfig.o
+
+tmp := $(addprefix -I,$(wildcard $(srctree)/$(src)/generated/*))
+ccflags-y += $(tmp)
+
+obj-$(CONFIG_POWERVR_ROGUE_N) += pvrsrvkm.o
+
+pvrsrvkm-y += rk3368/rk_init_v2.o
+pvrsrvkm-y += rk3368/sysconfig.o
+pvrsrvkm-y += interrupt_support.o
+pvrsrvkm-y += pvr_dvfs_device.o
+
+pvrsrvkm-y += allocmem.o
+pvrsrvkm-y += cache_km.o
+pvrsrvkm-y += connection_server.o
+pvrsrvkm-y += debugmisc_server.o
+pvrsrvkm-y += devicemem.o
+pvrsrvkm-y += devicemem_heapcfg.o
+pvrsrvkm-y += osmmap_stub.o
+pvrsrvkm-y += devicemem_server.o
+pvrsrvkm-y += devicemem_utils.o
+pvrsrvkm-y += event.o
+pvrsrvkm-y += handle.o
+pvrsrvkm-y += handle_idr.o
+pvrsrvkm-y += hash.o
+pvrsrvkm-y += htbserver.o
+pvrsrvkm-y += htbuffer.o
+pvrsrvkm-y += km_apphint.o
+pvrsrvkm-y += lists.o
+pvrsrvkm-y += mem_utils.o
+pvrsrvkm-y += mmu_common.o
+pvrsrvkm-y += module_common.o
+pvrsrvkm-y += osconnection_server.o
+pvrsrvkm-y += osfunc.o
+pvrsrvkm-y += physheap.o
+pvrsrvkm-y += physmem.o
+pvrsrvkm-y += physmem_lma.o
+pvrsrvkm-y += physmem_osmem_linux.o
+pvrsrvkm-y += physmem_tdsecbuf.o
+pvrsrvkm-y += pmr.o
+pvrsrvkm-y += pmr_os.o
+pvrsrvkm-y += power.o
+pvrsrvkm-y += process_stats.o
+pvrsrvkm-y += pvr_bridge_k.o
+pvrsrvkm-y += pvr_debug.o
+pvrsrvkm-y += pvr_debugfs.o
+pvrsrvkm-y += pvr_notifier.o
+pvrsrvkm-y += pvrsrv.o
+pvrsrvkm-y += ra.o
+pvrsrvkm-y += rgx_compat_bvnc.o
+pvrsrvkm-y += rgxbreakpoint.o
+pvrsrvkm-y += rgxccb.o
+pvrsrvkm-y += rgxcompute.o
+pvrsrvkm-y += rgxdebug.o
+pvrsrvkm-y += rgxfwutils.o
+pvrsrvkm-y += rgxhwperf.o
+pvrsrvkm-y += rgxinit.o
+pvrsrvkm-y += rgxkicksync.o
+pvrsrvkm-y += rgxlayer_km_impl.o
+pvrsrvkm-y += rgxmem.o
+pvrsrvkm-y += rgxmipsmmuinit.o
+pvrsrvkm-y += rgxmmuinit.o
+pvrsrvkm-y += rgxpower.o
+pvrsrvkm-y += rgxray.o
+pvrsrvkm-y += rgxregconfig.o
+pvrsrvkm-y += rgxsignals.o
+pvrsrvkm-y += rgxstartstop.o
+pvrsrvkm-y += rgxta3d.o
+pvrsrvkm-y += rgxtdmtransfer.o
+pvrsrvkm-y += rgxtimecorr.o
+pvrsrvkm-y += rgxtimerquery.o
+pvrsrvkm-y += rgxtransfer.o
+pvrsrvkm-y += rgxutils.o
+pvrsrvkm-y += srvcore.o
+pvrsrvkm-y += sync.o
+pvrsrvkm-y += sync_checkpoint.o
+pvrsrvkm-y += sync_server.o
+pvrsrvkm-y += tlclient.o
+pvrsrvkm-y += tlintern.o
+pvrsrvkm-y += tlserver.o
+pvrsrvkm-y += tlstream.o
+pvrsrvkm-y += uniq_key_splay_tree.o
+
+# Kernel srvinit
+pvrsrvkm-y += rgx_compat_bvnc.o
+pvrsrvkm-y += rgx_hwperf_table.o
+pvrsrvkm-y += rgxfwimageutils.o
+pvrsrvkm-y += rgxfwload.o
+pvrsrvkm-y += rgxlayer_impl.o
+pvrsrvkm-y += rgxsrvinit.o
+pvrsrvkm-y += rgxsrvinit_script.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += generated/dpdump_bridge/client_pdump_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += generated/dpdumpctrl_bridge/client_pdumpctrl_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += generated/drgxpdump_bridge/client_rgxpdump_bridge.o
+pvrsrvkm-y += generated/rgxinit_bridge/client_rgxinit_direct_bridge.o
+
+pvrsrvkm-y += ../pvr_platform_drv.o
+
+pvrsrvkm-$(CONFIG_DRM) += ../pvr_drm.o
+
+pvrsrvkm-$(CONFIG_DMA_SHARED_BUFFER) += physmem_dmabuf.o
+
+# RI strings
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_RESOURCE_INFO) += ri_server.o
+ccflags-$(CONFIG_POWERVR_ROGUE_RESOURCE_INFO) += -DPVR_RI_DEBUG=1
+
+# Device memory history
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_DEVICEMEM_HISTORY) += devicemem_history_server.o
+ccflags-$(CONFIG_POWERVR_ROGUE_DEVICEMEM_HISTORY) += -DSUPPORT_PAGE_FAULT_DEBUG=1
+
+# GPU tracing in systrace
+#pvrsrvkm-$(CONFIG_FTRACE) += pvr_gputrace.o
+
+# Event tracing
+pvrsrvkm-$(CONFIG_EVENT_TRACING) += trace_events.o
+
+# arch-specific wrapper functions
+pvrsrvkm-$(CONFIG_X86) += osfunc_x86.o
+pvrsrvkm-$(CONFIG_ARM) += osfunc_arm.o
+pvrsrvkm-$(CONFIG_ARM64) += osfunc_arm64.o
+pvrsrvkm-$(CONFIG_METAG) += osfunc_metag.o
+pvrsrvkm-$(CONFIG_MIPS) += osfunc_mips.o
+
+# PDump
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += dbgdriv.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += dbgdriv_handle.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += devicemem_pdump.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += devicememx_pdump.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += hostfunc.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += ioctl.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += main.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += pdump.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += pdump_common.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += pdump_mmu.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += pdump_physmem.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += rgxpdump.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += srvinit_pdump.o
+ccflags-$(CONFIG_POWERVR_ROGUE_PDUMP) += -DPDUMP=1
+
+# Android native synchronisation
+pvrsrvkm-$(CONFIG_SYNC) += ../pvr_sync.o
+
+# Generated bridge code
+pvrsrvkm-y += generated/mm_bridge/server_mm_bridge.o
+pvrsrvkm-y += generated/mm_bridge/client_mm_direct_bridge.o
+pvrsrvkm-y += generated/cmm_bridge/server_cmm_bridge.o
+pvrsrvkm-y += generated/rgxtq_bridge/server_rgxtq_bridge.o
+pvrsrvkm-y += generated/rgxta3d_bridge/server_rgxta3d_bridge.o
+pvrsrvkm-y += generated/rgxcmp_bridge/server_rgxcmp_bridge.o
+pvrsrvkm-y += generated/rgxsignals_bridge/server_rgxsignals_bridge.o
+pvrsrvkm-y += generated/srvcore_bridge/server_srvcore_bridge.o
+pvrsrvkm-y += generated/sync_bridge/server_sync_bridge.o
+pvrsrvkm-y += generated/sync_bridge/client_sync_direct_bridge.o
+pvrsrvkm-y += generated/cache_bridge/client_cache_direct_bridge.o
+pvrsrvkm-y += generated/cache_bridge/server_cache_bridge.o
+pvrsrvkm-y += generated/breakpoint_bridge/server_breakpoint_bridge.o
+pvrsrvkm-y += generated/debugmisc_bridge/server_debugmisc_bridge.o
+pvrsrvkm-y += generated/pvrtl_bridge/server_pvrtl_bridge.o
+pvrsrvkm-y += generated/pvrtl_bridge/client_pvrtl_direct_bridge.o
+pvrsrvkm-y += generated/rgxhwperf_bridge/server_rgxhwperf_bridge.o
+pvrsrvkm-y += generated/regconfig_bridge/server_regconfig_bridge.o
+pvrsrvkm-y += generated/timerquery_bridge/server_timerquery_bridge.o
+pvrsrvkm-y += generated/htbuffer_bridge/server_htbuffer_bridge.o
+pvrsrvkm-y += generated/htbuffer_bridge/client_htbuffer_direct_bridge.o
+pvrsrvkm-y += generated/rgxkicksync_bridge/server_rgxkicksync_bridge.o
+pvrsrvkm-y += generated/rgxray_bridge/server_rgxray_bridge.o
+pvrsrvkm-y += generated/rgxtq2_bridge/server_rgxtq2_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_RESOURCE_INFO) += generated/ri_bridge/server_ri_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_RESOURCE_INFO) += generated/ri_bridge/client_ri_direct_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_DEVICEMEM_HISTORY) += generated/devicememhistory_bridge/server_devicememhistory_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_DEVICEMEM_HISTORY) += generated/devicememhistory_bridge/client_devicememhistory_direct_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += generated/pdumpmm_bridge/server_pdumpmm_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += generated/pdumpmm_bridge/client_pdumpmm_direct_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += generated/pdump_bridge/server_pdump_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += generated/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += generated/pdumpctrl_bridge/server_pdumpctrl_bridge.o
+pvrsrvkm-$(CONFIG_POWERVR_ROGUE_PDUMP) += generated/rgxpdump_bridge/server_rgxpdump_bridge.o
+pvrsrvkm-$(CONFIG_DMA_SHARED_BUFFER) += generated/dmabuf_bridge/server_dmabuf_bridge.o
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Host memory management implementation for Linux
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "img_defs.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#include "osfunc.h"
+
+#if defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#define ALLOCMEM_MEMSTATS_PADDING 0
+#else
+#define ALLOCMEM_MEMSTATS_PADDING sizeof(IMG_UINT32)
+#endif
+
+/* Ensure poison value is not divisible by 4.
+ * Used to poison memory to trip up use after free in kernel-side code
+ */
+#define OS_MEM_POISON_VALUE (0x6b)
+
+static inline void _pvr_vfree(const void* pvAddr)
+{
+#if defined(DEBUG)
+ /* Size harder to come by for vmalloc and since vmalloc allocates
+ * a whole number of pages, poison the minimum size known to have
+ * been allocated.
+ */
+ OSCachedMemSet((void*)pvAddr, OS_MEM_POISON_VALUE, PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD);
+#endif
+ vfree(pvAddr);
+}
+
+static inline void _pvr_kfree(const void* pvAddr)
+{
+#if defined(DEBUG)
+ /* Poison whole memory block */
+ OSCachedMemSet((void*)pvAddr, OS_MEM_POISON_VALUE, ksize(pvAddr));
+#endif
+ kfree(pvAddr);
+}
+
+#if !defined(PVRSRV_ENABLE_PROCESS_STATS)
+IMG_INTERNAL void *OSAllocMem(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vmalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kmalloc(ui32Size, GFP_KERNEL);
+ }
+
+ return pvRet;
+}
+
+IMG_INTERNAL void *OSAllocZMem(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vzalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kzalloc(ui32Size, GFP_KERNEL);
+ }
+
+ return pvRet;
+}
+
+/*
+ * The parentheses around OSFreeMem prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+IMG_INTERNAL void (OSFreeMem)(void *pvMem)
+{
+ if (pvMem != NULL)
+ {
+ if (!is_vmalloc_addr(pvMem))
+ {
+ _pvr_kfree(pvMem);
+ }
+ else
+ {
+ _pvr_vfree(pvMem);
+ }
+ }
+}
+#else
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+IMG_INTERNAL void *_OSAllocMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vmalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kmalloc(ui32Size, GFP_KERNEL);
+ }
+
+ if (pvRet != NULL)
+ {
+
+ if (!is_vmalloc_addr(pvRet))
+ {
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ksize(pvRet),
+ NULL,
+ pvAllocFromFile,
+ ui32AllocFromLine);
+ }
+ else
+ {
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ NULL,
+ pvAllocFromFile,
+ ui32AllocFromLine);
+ }
+ }
+ return pvRet;
+}
+
+IMG_INTERNAL void *_OSAllocZMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vzalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kzalloc(ui32Size, GFP_KERNEL);
+ }
+
+ if (pvRet != NULL)
+ {
+ if (!is_vmalloc_addr(pvRet))
+ {
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ksize(pvRet),
+ NULL,
+ pvAllocFromFile,
+ ui32AllocFromLine);
+ }
+ else
+ {
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ NULL,
+ pvAllocFromFile,
+ ui32AllocFromLine);
+ }
+ }
+ return pvRet;
+}
+#else
+IMG_INTERNAL void *OSAllocMem(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vmalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ /* Allocate an additional 4 bytes to store the PID of the allocating process */
+ pvRet = kmalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+ }
+
+ if (pvRet != NULL)
+ {
+
+ if (!is_vmalloc_addr(pvRet))
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ {
+ /* Store the PID in the final additional 4 bytes allocated */
+ IMG_UINT32 *puiTemp = (IMG_UINT32*) (((IMG_BYTE*)pvRet) + (ksize(pvRet) - ALLOCMEM_MEMSTATS_PADDING));
+ *puiTemp = OSGetCurrentProcessID();
+ }
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvRet));
+#else
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ksize(pvRet),
+ NULL);
+#endif
+#endif
+ }
+ else
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ (IMG_UINT64)(uintptr_t) pvRet);
+#else
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ NULL);
+#endif
+#endif
+ }
+ }
+ return pvRet;
+}
+
+IMG_INTERNAL void *OSAllocZMem(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vzalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ /* Allocate an additional 4 bytes to store the PID of the allocating process */
+ pvRet = kzalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+ }
+
+ if (pvRet != NULL)
+ {
+ if (!is_vmalloc_addr(pvRet))
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ {
+ /* Store the PID in the final additional 4 bytes allocated */
+ IMG_UINT32 *puiTemp = (IMG_UINT32*) (((IMG_BYTE*)pvRet) + (ksize(pvRet) - ALLOCMEM_MEMSTATS_PADDING));
+ *puiTemp = OSGetCurrentProcessID();
+ }
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvRet));
+#else
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ksize(pvRet),
+ NULL);
+#endif
+#endif
+ }
+ else
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ (IMG_UINT64)(uintptr_t) pvRet);
+#else
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ NULL);
+#endif
+#endif
+ }
+ }
+ return pvRet;
+}
+#endif
+
+/*
+ * The parentheses around OSFreeMem prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+IMG_INTERNAL void (OSFreeMem)(void *pvMem)
+{
+ if (pvMem != NULL)
+ {
+ if (!is_vmalloc_addr(pvMem))
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvMem));
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+ (IMG_UINT64)(uintptr_t) pvMem);
+#endif
+#endif
+ _pvr_kfree(pvMem);
+ }
+ else
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ (IMG_UINT64)(uintptr_t) pvMem);
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ (IMG_UINT64)(uintptr_t) pvMem);
+#endif
+#endif
+ _pvr_vfree(pvMem);
+ }
+ }
+}
+#endif
+
+
+IMG_INTERNAL void *OSAllocMemNoStats(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vmalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kmalloc(ui32Size, GFP_KERNEL);
+ }
+
+ return pvRet;
+}
+
+IMG_INTERNAL void *OSAllocZMemNoStats(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vzalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kzalloc(ui32Size, GFP_KERNEL);
+ }
+
+ return pvRet;
+}
+
+/*
+ * The parentheses around OSFreeMemNoStats prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+IMG_INTERNAL void (OSFreeMemNoStats)(void *pvMem)
+{
+ if (pvMem != NULL)
+ {
+ if ( !is_vmalloc_addr(pvMem) )
+ {
+ _pvr_kfree(pvMem);
+ }
+ else
+ {
+ _pvr_vfree(pvMem);
+ }
+ }
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File allocmem.h
+@Title memory allocation header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Memory-Allocation API definitions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __ALLOCMEM_H__
+#define __ALLOCMEM_H__
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) || !defined(DEBUG) || !defined(PVRSRV_ENABLE_PROCESS_STATS) || !defined(PVRSRV_ENABLE_MEMORY_STATS)
+/**************************************************************************/ /*!
+@Function OSAllocMem
+@Description Allocates CPU memory. Contents are uninitialized.
+ If passed a size of zero, function should not assert,
+ but just return a NULL pointer.
+@Input ui32Size Size of required allocation (in bytes)
+@Return Pointer to allocated memory on success.
+ Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocMem(IMG_UINT32 ui32Size);
+/**************************************************************************/ /*!
+@Function OSAllocZMem
+@Description Allocates CPU memory and initializes the contents to zero.
+ If passed a size of zero, function should not assert,
+ but just return a NULL pointer.
+@Input ui32Size Size of required allocation (in bytes)
+@Return Pointer to allocated memory on success.
+ Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocZMem(IMG_UINT32 ui32Size);
+#else
+void *_OSAllocMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+void *_OSAllocZMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+#define OSAllocMem(_size) \
+ _OSAllocMem ((_size), (__FILE__), (__LINE__));
+#define OSAllocZMem(_size) \
+ _OSAllocZMem ((_size), (__FILE__), (__LINE__));
+#endif
+
+/**************************************************************************/ /*!
+@Function OSAllocMemNoStats
+@Description Allocates CPU memory. Contents are uninitialized.
+ If passed a size of zero, function should not assert,
+ but just return a NULL pointer.
+ The allocated memory is not accounted for by process stats.
+ Process stats are an optional feature (enabled only when
+ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+ of memory allocated to help in debugging. Where this is not
+ required, OSAllocMem() and OSAllocMemNoStats() equate to
+ the same operation.
+@Input ui32Size Size of required allocation (in bytes)
+@Return Pointer to allocated memory on success.
+ Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocMemNoStats(IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+@Function OSAllocZMemNoStats
+@Description Allocates CPU memory and initializes the contents to zero.
+ If passed a size of zero, function should not assert,
+ but just return a NULL pointer.
+ The allocated memory is not accounted for by process stats.
+ Process stats are an optional feature (enabled only when
+ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+ of memory allocated to help in debugging. Where this is not
+ required, OSAllocZMem() and OSAllocZMemNoStats() equate to
+ the same operation.
+@Input ui32Size Size of required allocation (in bytes)
+@Return Pointer to allocated memory on success.
+ Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocZMemNoStats(IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+@Function OSFreeMem
+@Description Frees previously allocated CPU memory.
+@Input pvCpuVAddr Pointer to the memory to be freed.
+@Return None.
+ */ /**************************************************************************/
+void OSFreeMem(void *pvCpuVAddr);
+
+/**************************************************************************/ /*!
+@Function OSFreeMemNoStats
+@Description Frees previously allocated CPU memory.
+ The freed memory does not update the figures in process stats.
+ Process stats are an optional feature (enabled only when
+ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+ of memory allocated to help in debugging. Where this is not
+ required, OSFreeMem() and OSFreeMemNoStats() equate to the
+ same operation.
+@Input pvCpuVAddr Pointer to the memory to be freed.
+@Return None.
+ */ /**************************************************************************/
+void OSFreeMemNoStats(void *pvCpuVAddr);
+
+/*
+ * These macros allow us to catch double-free bugs on DEBUG builds and
+ * prevent crashes on RELEASE builds.
+ */
+
+#if defined(DEBUG)
+#define double_free_sentinel (void*) &OSFreeMem
+#define ALLOCMEM_ASSERT(exp) PVR_ASSERT(exp)
+#else
+#define double_free_sentinel NULL
+#define ALLOCMEM_ASSERT(exp) do {} while(0)
+#endif
+
+#define OSFreeMem(_ptr) do { \
+ ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \
+ (OSFreeMem)(_ptr); \
+ (_ptr) = double_free_sentinel; \
+ MSC_SUPPRESS_4127 \
+ } while (0)
+
+#define OSFreeMemNoStats(_ptr) do { \
+ ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \
+ (OSFreeMemNoStats)(_ptr); \
+ (_ptr) = double_free_sentinel; \
+ MSC_SUPPRESS_4127 \
+ } while (0)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __ALLOCMEM_H__ */
+
+/******************************************************************************
+ End of file (allocmem.h)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File cache_km.c
+@Title CPU data cache management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements server side code for CPU cache maintenance management.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined(CONFIG_SW_SYNC)
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+#include <linux/sw_sync.h>
+#else
+#include <../drivers/staging/android/sw_sync.h>
+#endif
+#include <linux/file.h>
+#include <linux/fs.h>
+#endif
+#include "pmr.h"
+#include "device.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "cache_km.h"
+#include "pvr_debug.h"
+#include "lock_types.h"
+#include "allocmem.h"
+#include "process_stats.h"
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+/* Top-level file-local build definitions */
+#if defined(DEBUG) && defined(LINUX)
+ #define CACHEOP_DEBUG
+#endif
+
+/* Type of cache maintenance mechanism being used */
+#if (CACHEFLUSH_KM_TYPE == CACHEFLUSH_KM_RANGEBASED_DEFERRED)
+ #define SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED
+ #define SUPPORT_RANGEBASED_CACHEFLUSH
+#elif (CACHEFLUSH_KM_TYPE == CACHEFLUSH_KM_RANGEBASED)
+ #define SUPPORT_RANGEBASED_CACHEFLUSH
+#elif (CACHEFLUSH_KM_TYPE == CACHEFLUSH_KM_GLOBAL)
+ /* Nothing to do here */
+#else
+ #error "Unknown CACHEFLUSH_KM_TYPE"
+#endif
+
+typedef struct _CACHEOP_WORK_ITEM_
+{
+ DLLIST_NODE sNode;
+ PMR *psPMR;
+ struct file *psTimeline;
+ IMG_UINT32 ui32OpSeqNum;
+ IMG_DEVMEM_SIZE_T uiSize;
+ PVRSRV_CACHE_OP uiCacheOp;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_BOOL bSignalEventObject;
+#if defined(CACHEOP_DEBUG)
+ IMG_UINT64 ui64QueuedTime;
+ IMG_UINT64 ui64ExecuteTime;
+ IMG_BOOL bRBF;
+ IMG_BOOL bUMF;
+ IMG_PID pid;
+#if defined(PVR_RI_DEBUG)
+ RGXFWIF_DM eFenceOpType;
+#endif
+#endif
+} CACHEOP_WORK_ITEM;
+
+/* Copy of CPU page & dcache-line size */
+static size_t guiOSPageSize;
+static IMG_UINT32 guiCacheLineSize;
+
+/*
+ System-wide CacheOp sequence numbers
+ - ghCommonCacheOpSeqNum:
+ This common sequence, numbers mostly CacheOp requests
+ from UM/KM but might also number fence checks and
+ completed CacheOps depending on SUPPORT_XXX configs.
+ - ghCompletedCacheOpSeqNum:
+ This tracks last CacheOp request that was executed
+ in all SUPPORT_XXX configurations and is used for
+ fence checks exclusively.
+*/
+static ATOMIC_T ghCommonCacheOpSeqNum;
+static ATOMIC_T ghCompletedCacheOpSeqNum;
+
+#if defined(CACHEOP_DEBUG)
+#define CACHEOP_MAX_STATS_ITEMS 128
+#define INCR_WRAP(x) ((x+1) >= CACHEOP_MAX_STATS_ITEMS ? 0 : (x+1))
+#define DECR_WRAP(x) ((x-1) < 0 ? (CACHEOP_MAX_STATS_ITEMS-1) : (x-1))
+#if defined(PVR_RI_DEBUG)
+/* Refer to CacheOpStatExecLogHeader() for header item names */
+#define CACHEOP_RI_PRINTF_HEADER "%-10s %-10s %-5s %-8s %-16s %-10s %-10s %-18s %-12s"
+#define CACHEOP_RI_PRINTF_FENCE "%-10s %-10s %-5s %-8d %-16s %-10s %-10s %-18llu 0x%-10x\n"
+#define CACHEOP_RI_PRINTF "%-10s %-10s %-5s %-8d 0x%-14llx 0x%-8llx 0x%-8llx %-18llu 0x%-10x\n"
+#else
+#define CACHEOP_PRINTF_HEADER "%-10s %-10s %-5s %-10s %-10s %-18s %-12s"
+#define CACHEOP_PRINTF_FENCE "%-10s %-10s %-5s %-10s %-10s %-18llu 0x%-10x\n"
+#define CACHEOP_PRINTF "%-10s %-10s %-5s 0x%-8llx 0x%-8llx %-18llu 0x%-10x\n"
+#endif
+
+/* Divide a number by 10 using shifts only */
+static INLINE IMG_UINT64 DivBy10(IMG_UINT64 uiNum)
+{
+ IMG_UINT64 uiQuot;
+ IMG_UINT64 uiRem;
+
+ uiQuot = (uiNum >> 1) + (uiNum >> 2);
+ uiQuot = uiQuot + (uiQuot >> 4);
+ uiQuot = uiQuot + (uiQuot >> 8);
+ uiQuot = uiQuot + (uiQuot >> 16);
+ uiQuot = uiQuot >> 3;
+ uiRem = uiNum - (((uiQuot << 2) + uiQuot) << 1);
+
+ return uiQuot + (uiRem > 9);
+}
+
+#if defined(SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED)
+typedef struct _CACHEOP_STAT_STALL_ITEM_
+{
+ IMG_UINT32 ui32OpSeqNum;
+ IMG_UINT32 ui32RetryCount;
+ IMG_UINT64 ui64QueuedTime;
+ IMG_UINT64 ui64ExecuteTime;
+} CACHEOP_STAT_STALL_ITEM;
+
+/* These are used in an atomic way so will never
+ hold values outside of the valid range */
+static IMG_INT32 gi32CacheOpStatStallWriteIdx;
+static IMG_HANDLE ghCacheOpStatStallLock;
+static void *pvCacheOpStatStallEntry;
+
+static CACHEOP_STAT_STALL_ITEM gasCacheOpStatStalled[CACHEOP_MAX_STATS_ITEMS];
+
+static INLINE void CacheOpStatStallLogHeader(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN])
+{
+ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+ "%-10s %-12s %-10s",
+ "SeqNo",
+ "Time (ns)",
+ "RetryCount");
+}
+
+static INLINE void CacheOpStatStallLogWrite(IMG_UINT32 ui32FenceOpSeqNum,
+ IMG_UINT64 ui64QueuedTime,
+ IMG_UINT64 ui64ExecuteTime,
+ IMG_UINT32 ui32RetryCount)
+{
+ IMG_INT32 i32WriteOffset = gi32CacheOpStatStallWriteIdx;
+ gi32CacheOpStatStallWriteIdx = INCR_WRAP(gi32CacheOpStatStallWriteIdx);
+ gasCacheOpStatStalled[i32WriteOffset].ui32RetryCount = ui32RetryCount;
+ gasCacheOpStatStalled[i32WriteOffset].ui32OpSeqNum = ui32FenceOpSeqNum;
+ gasCacheOpStatStalled[i32WriteOffset].ui64QueuedTime = ui64QueuedTime;
+ gasCacheOpStatStalled[i32WriteOffset].ui64ExecuteTime = ui64ExecuteTime;
+}
+
+static void CacheOpStatStallLogRead(void *pvFilePtr, void *pvData,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ IMG_INT32 i32ReadOffset;
+ IMG_INT32 i32WriteOffset;
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]={0};
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ CacheOpStatStallLogHeader(szBuffer);
+ pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+
+ OSLockAcquire(ghCacheOpStatStallLock);
+
+ i32WriteOffset = gi32CacheOpStatStallWriteIdx;
+ for (i32ReadOffset = DECR_WRAP(i32WriteOffset);
+ i32ReadOffset != i32WriteOffset;
+ i32ReadOffset = DECR_WRAP(i32ReadOffset))
+ {
+ IMG_UINT64 ui64QueuedTime, ui64ExecuteTime;
+
+ if (gasCacheOpStatStalled[i32ReadOffset].ui32OpSeqNum == 0)
+ {
+ break;
+ }
+
+ /* Convert from nano-seconds to micro-seconds */
+ ui64ExecuteTime = gasCacheOpStatStalled[i32ReadOffset].ui64ExecuteTime;
+ ui64QueuedTime = gasCacheOpStatStalled[i32ReadOffset].ui64QueuedTime;
+ ui64ExecuteTime = DivBy10(DivBy10(DivBy10(ui64ExecuteTime)));
+ ui64QueuedTime = DivBy10(DivBy10(DivBy10(ui64QueuedTime)));
+
+ pfnOSStatsPrintf(pvFilePtr,
+ "%-10x 0x%-10llx %-10x\n",
+ gasCacheOpStatStalled[i32ReadOffset].ui32OpSeqNum,
+ ui64QueuedTime < ui64ExecuteTime ?
+ ui64ExecuteTime - ui64QueuedTime :
+ ui64QueuedTime - ui64ExecuteTime,
+ gasCacheOpStatStalled[i32ReadOffset].ui32RetryCount);
+ }
+
+ OSLockRelease(ghCacheOpStatStallLock);
+}
+#endif
+
+typedef struct _CACHEOP_STAT_EXEC_ITEM_
+{
+ IMG_UINT32 ui32OpSeqNum;
+ PVRSRV_CACHE_OP uiCacheOp;
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT64 ui64QueuedTime;
+ IMG_UINT64 ui64ExecuteTime;
+ IMG_BOOL bHasTimeline;
+ IMG_BOOL bIsFence;
+ IMG_BOOL bRBF;
+ IMG_BOOL bUMF;
+#if defined(PVR_RI_DEBUG)
+ IMG_DEV_VIRTADDR sDevVAddr;
+ RGXFWIF_DM eFenceOpType;
+ IMG_PID pid;
+#endif
+} CACHEOP_STAT_EXEC_ITEM;
+
+/* These are used in an atomic way so will never
+ hold values outside of the valid range */
+static IMG_INT32 gi32CacheOpStatExecWriteIdx;
+static IMG_HANDLE ghCacheOpStatExecLock;
+static void *pvCacheOpStatExecEntry;
+
+static CACHEOP_STAT_EXEC_ITEM gasCacheOpStatExecuted[CACHEOP_MAX_STATS_ITEMS];
+
+static INLINE void CacheOpStatExecLogHeader(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN])
+{
+ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+#if defined(PVR_RI_DEBUG)
+ CACHEOP_RI_PRINTF_HEADER,
+#else
+ CACHEOP_PRINTF_HEADER,
+#endif
+ "CacheOp",
+ "Type",
+ "Mode",
+#if defined(PVR_RI_DEBUG)
+ "Pid",
+ "DevVAddr",
+#endif
+ "Offset",
+ "Size",
+ "Time (us)",
+ "SeqNo");
+}
+
+static INLINE void CacheOpStatExecLogWrite(DLLIST_NODE *psNode)
+{
+ CACHEOP_WORK_ITEM *psCacheOpWorkItem;
+ IMG_UINT64 ui64ExecuteTime;
+ IMG_UINT64 ui64QueuedTime;
+ IMG_INT32 i32WriteOffset;
+
+ psCacheOpWorkItem = IMG_CONTAINER_OF(psNode, CACHEOP_WORK_ITEM, sNode);
+ if (psCacheOpWorkItem->ui32OpSeqNum == 0)
+ {
+ /* This breaks the logic of read-out, so we
+ do not queue items with zero sequence
+ number */
+ return;
+ }
+
+ i32WriteOffset = gi32CacheOpStatExecWriteIdx;
+ gi32CacheOpStatExecWriteIdx = INCR_WRAP(gi32CacheOpStatExecWriteIdx);
+
+ gasCacheOpStatExecuted[i32WriteOffset].uiSize = psCacheOpWorkItem->uiSize;
+ gasCacheOpStatExecuted[i32WriteOffset].uiOffset = psCacheOpWorkItem->uiOffset;
+ gasCacheOpStatExecuted[i32WriteOffset].uiCacheOp = psCacheOpWorkItem->uiCacheOp;
+ gasCacheOpStatExecuted[i32WriteOffset].ui32OpSeqNum = psCacheOpWorkItem->ui32OpSeqNum;
+ gasCacheOpStatExecuted[i32WriteOffset].ui64QueuedTime = psCacheOpWorkItem->ui64QueuedTime;
+ gasCacheOpStatExecuted[i32WriteOffset].ui64ExecuteTime = psCacheOpWorkItem->ui64ExecuteTime;
+ gasCacheOpStatExecuted[i32WriteOffset].bHasTimeline = psCacheOpWorkItem->psPMR == NULL;
+ gasCacheOpStatExecuted[i32WriteOffset].bRBF = psCacheOpWorkItem->bRBF;
+ gasCacheOpStatExecuted[i32WriteOffset].bUMF = psCacheOpWorkItem->bUMF;
+ gasCacheOpStatExecuted[i32WriteOffset].bIsFence =
+ psCacheOpWorkItem->psPMR == NULL && psCacheOpWorkItem->psTimeline == NULL;
+#if defined(PVR_RI_DEBUG)
+ gasCacheOpStatExecuted[i32WriteOffset].pid = psCacheOpWorkItem->pid;
+ PVR_ASSERT(gasCacheOpStatExecuted[i32WriteOffset].pid);
+
+ if (psCacheOpWorkItem->psPMR != NULL)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Get more detailed information regarding the sub allocations that
+ PMR has from RI manager for process that requested the CacheOp */
+ eError = RIDumpProcessListKM(psCacheOpWorkItem->psPMR,
+ gasCacheOpStatExecuted[i32WriteOffset].pid,
+ gasCacheOpStatExecuted[i32WriteOffset].uiOffset,
+ &gasCacheOpStatExecuted[i32WriteOffset].sDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ return;
+ }
+ }
+
+ if (gasCacheOpStatExecuted[i32WriteOffset].bIsFence)
+ {
+ gasCacheOpStatExecuted[i32WriteOffset].eFenceOpType = psCacheOpWorkItem->eFenceOpType;
+ }
+#endif
+
+ ui64ExecuteTime = gasCacheOpStatExecuted[i32WriteOffset].ui64ExecuteTime;
+ ui64QueuedTime = gasCacheOpStatExecuted[i32WriteOffset].ui64QueuedTime;
+
+ /* This operation queues this CacheOp in per-PID process statistics database */
+ PVRSRVStatsUpdateCacheOpStats(gasCacheOpStatExecuted[i32WriteOffset].uiCacheOp,
+ gasCacheOpStatExecuted[i32WriteOffset].ui32OpSeqNum,
+#if defined(PVR_RI_DEBUG)
+ gasCacheOpStatExecuted[i32WriteOffset].sDevVAddr,
+ gasCacheOpStatExecuted[i32WriteOffset].eFenceOpType,
+#endif
+ gasCacheOpStatExecuted[i32WriteOffset].uiOffset,
+ gasCacheOpStatExecuted[i32WriteOffset].uiSize,
+ ui64QueuedTime < ui64ExecuteTime ?
+ ui64ExecuteTime - ui64QueuedTime:
+ ui64QueuedTime - ui64ExecuteTime,
+ gasCacheOpStatExecuted[i32WriteOffset].bRBF,
+ gasCacheOpStatExecuted[i32WriteOffset].bUMF,
+ gasCacheOpStatExecuted[i32WriteOffset].bIsFence,
+ gasCacheOpStatExecuted[i32WriteOffset].bHasTimeline,
+ psCacheOpWorkItem->pid);
+}
+
+static void CacheOpStatExecLogRead(void *pvFilePtr, void *pvData,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ IMG_INT32 i32ReadOffset;
+ IMG_INT32 i32WriteOffset;
+ IMG_CHAR *pszCacheOpType;
+ IMG_CHAR *pszFlushSource;
+ IMG_CHAR *pszFlushype;
+ IMG_UINT64 ui64QueuedTime, ui64ExecuteTime;
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]={0};
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ CacheOpStatExecLogHeader(szBuffer);
+ pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+
+ OSLockAcquire(ghCacheOpStatExecLock);
+
+ i32WriteOffset = gi32CacheOpStatExecWriteIdx;
+ for (i32ReadOffset = DECR_WRAP(i32WriteOffset);
+ i32ReadOffset != i32WriteOffset;
+ i32ReadOffset = DECR_WRAP(i32ReadOffset))
+ {
+ if (gasCacheOpStatExecuted[i32ReadOffset].ui32OpSeqNum == 0)
+ {
+ break;
+ }
+
+ /* Convert from nano-seconds to micro-seconds */
+ ui64ExecuteTime = gasCacheOpStatExecuted[i32ReadOffset].ui64ExecuteTime;
+ ui64QueuedTime = gasCacheOpStatExecuted[i32ReadOffset].ui64QueuedTime;
+ ui64ExecuteTime = DivBy10(DivBy10(DivBy10(ui64ExecuteTime)));
+ ui64QueuedTime = DivBy10(DivBy10(DivBy10(ui64QueuedTime)));
+
+ if (gasCacheOpStatExecuted[i32ReadOffset].bIsFence)
+ {
+ IMG_CHAR *pszFenceType = "";
+ pszCacheOpType = "Fence";
+
+#if defined(PVR_RI_DEBUG)
+ switch (gasCacheOpStatExecuted[i32ReadOffset].eFenceOpType)
+ {
+ case RGXFWIF_DM_GP:
+ pszFenceType = "GP";
+ break;
+
+ case RGXFWIF_DM_TDM:
+ /* Also case RGXFWIF_DM_2D: */
+ pszFenceType = "TDM/2D";
+ break;
+
+ case RGXFWIF_DM_TA:
+ pszFenceType = "TA";
+ break;
+
+ case RGXFWIF_DM_3D:
+ pszFenceType = "3D";
+ break;
+
+ case RGXFWIF_DM_CDM:
+ pszFenceType = "CDM";
+ break;
+
+ case RGXFWIF_DM_RTU:
+ pszFenceType = "RTU";
+ break;
+
+ case RGXFWIF_DM_SHG:
+ pszFenceType = "SHG";
+ break;
+
+ default:
+ PVR_ASSERT(0);
+ break;
+ }
+#endif
+ pfnOSStatsPrintf(pvFilePtr,
+#if defined(PVR_RI_DEBUG)
+ CACHEOP_RI_PRINTF_FENCE,
+#else
+ CACHEOP_PRINTF_FENCE,
+#endif
+ pszCacheOpType,
+ pszFenceType,
+ "",
+#if defined(PVR_RI_DEBUG)
+ gasCacheOpStatExecuted[i32ReadOffset].pid,
+ "",
+#endif
+ "",
+ "",
+ ui64QueuedTime < ui64ExecuteTime ?
+ ui64ExecuteTime - ui64QueuedTime :
+ ui64QueuedTime - ui64ExecuteTime,
+ gasCacheOpStatExecuted[i32ReadOffset].ui32OpSeqNum);
+ }
+ else if (gasCacheOpStatExecuted[i32ReadOffset].bHasTimeline)
+ {
+ pfnOSStatsPrintf(pvFilePtr,
+#if defined(PVR_RI_DEBUG)
+ CACHEOP_RI_PRINTF_FENCE,
+#else
+ CACHEOP_PRINTF_FENCE,
+#endif
+ "Timeline",
+ "",
+ "",
+#if defined(PVR_RI_DEBUG)
+ gasCacheOpStatExecuted[i32ReadOffset].pid,
+ "",
+#endif
+ "",
+ "",
+ ui64QueuedTime < ui64ExecuteTime ?
+ ui64ExecuteTime - ui64QueuedTime :
+ ui64QueuedTime - ui64ExecuteTime,
+ gasCacheOpStatExecuted[i32ReadOffset].ui32OpSeqNum);
+ }
+ else
+ {
+ if (gasCacheOpStatExecuted[i32ReadOffset].bRBF)
+ {
+ IMG_DEVMEM_SIZE_T ui64NumOfPages;
+
+ ui64NumOfPages = gasCacheOpStatExecuted[i32ReadOffset].uiSize >> OSGetPageShift();
+ if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ pszFlushype = "RBF.Fast";
+ }
+ else
+ {
+ pszFlushype = "RBF.Slow";
+ }
+ }
+ else
+ {
+ pszFlushype = "GF";
+ }
+
+ if (gasCacheOpStatExecuted[i32ReadOffset].bUMF)
+ {
+ pszFlushSource = "UM";
+ }
+ else
+ {
+ pszFlushSource = "KM";
+ }
+
+ switch (gasCacheOpStatExecuted[i32ReadOffset].uiCacheOp)
+ {
+ case PVRSRV_CACHE_OP_NONE:
+ pszCacheOpType = "None";
+ break;
+ case PVRSRV_CACHE_OP_CLEAN:
+ pszCacheOpType = "Clean";
+ break;
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ pszCacheOpType = "Invalidate";
+ break;
+ case PVRSRV_CACHE_OP_FLUSH:
+ pszCacheOpType = "Flush";
+ break;
+ default:
+ pszCacheOpType = "Unknown";
+ break;
+ }
+
+ pfnOSStatsPrintf(pvFilePtr,
+#if defined(PVR_RI_DEBUG)
+ CACHEOP_RI_PRINTF,
+#else
+ CACHEOP_PRINTF,
+#endif
+ pszCacheOpType,
+ pszFlushype,
+ pszFlushSource,
+#if defined(PVR_RI_DEBUG)
+ gasCacheOpStatExecuted[i32ReadOffset].pid,
+ gasCacheOpStatExecuted[i32ReadOffset].sDevVAddr.uiAddr,
+#endif
+ gasCacheOpStatExecuted[i32ReadOffset].uiOffset,
+ gasCacheOpStatExecuted[i32ReadOffset].uiSize,
+ ui64QueuedTime < ui64ExecuteTime ?
+ ui64ExecuteTime - ui64QueuedTime :
+ ui64QueuedTime - ui64ExecuteTime,
+ gasCacheOpStatExecuted[i32ReadOffset].ui32OpSeqNum);
+ }
+ }
+
+ OSLockRelease(ghCacheOpStatExecLock);
+}
+
+static PVRSRV_ERROR CacheOpStatExecLog(void *pvData)
+{
+ DLLIST_NODE *psListNode = (DLLIST_NODE *) pvData;
+ DLLIST_NODE *psCurrentNode, *psNextNode;
+
+ OSLockAcquire(ghCacheOpStatExecLock);
+
+ CacheOpStatExecLogWrite(psListNode);
+ dllist_foreach_node (psListNode, psCurrentNode, psNextNode)
+ {
+ CacheOpStatExecLogWrite(psCurrentNode);
+ }
+
+ OSLockRelease(ghCacheOpStatExecLock);
+
+ return PVRSRV_OK;
+}
+#endif /* defined(CACHEOP_DEBUG) */
+
+//#define CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING
+#define CACHEOP_SEQ_MIDPOINT (IMG_UINT32) 0x7FFFFFFF
+#define CACHEOP_DPFL PVR_DBG_MESSAGE
+
+/* Perform requested CacheOp on the CPU data cache for successive cache
+ line worth of bytes up to page or in-page cache-line boundary */
+static INLINE void CacheOpCPURangeBased(PVRSRV_DEVICE_NODE *psDevNode,
+ PVRSRV_CACHE_OP uiCacheOp,
+ IMG_BYTE *pbCpuVirtAddr,
+ IMG_CPU_PHYADDR sCpuPhyAddr,
+ IMG_DEVMEM_OFFSET_T uiPgAlignedOffset,
+ IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset,
+ IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset)
+{
+ IMG_BYTE *pbCpuVirtAddrEnd;
+ IMG_BYTE *pbCpuVirtAddrStart;
+ IMG_CPU_PHYADDR sCpuPhyAddrEnd;
+ IMG_CPU_PHYADDR sCpuPhyAddrStart;
+ IMG_DEVMEM_SIZE_T uiRelFlushSize;
+ IMG_DEVMEM_OFFSET_T uiRelFlushOffset;
+ IMG_DEVMEM_SIZE_T uiNextPgAlignedOffset;
+
+ /* These quantities allows us to perform cache operations
+ at cache-line granularity thereby ensuring we do not
+ perform more than is necessary */
+ PVR_ASSERT(uiPgAlignedOffset < uiCLAlignedEndOffset);
+ uiRelFlushSize = (IMG_DEVMEM_SIZE_T)guiOSPageSize;
+ uiRelFlushOffset = 0;
+
+ if (uiCLAlignedStartOffset > uiPgAlignedOffset)
+ {
+ /* Zero unless initially starting at an in-page offset */
+ uiRelFlushOffset = uiCLAlignedStartOffset - uiPgAlignedOffset;
+ uiRelFlushSize -= uiRelFlushOffset;
+ }
+
+ /* uiRelFlushSize is guiOSPageSize unless current outstanding CacheOp
+ size is smaller. The 1st case handles in-page CacheOp range and
+ the 2nd case handles multiple-page CacheOp range with a last
+ CacheOp size that is less than guiOSPageSize */
+ uiNextPgAlignedOffset = uiPgAlignedOffset + (IMG_DEVMEM_SIZE_T)guiOSPageSize;
+ if (uiNextPgAlignedOffset < uiPgAlignedOffset)
+ {
+ /* uiNextPgAlignedOffset is greater than uiCLAlignedEndOffset
+ by implication of this wrap-round; this only happens when
+ uiPgAlignedOffset is the last page aligned offset */
+ uiRelFlushSize = uiRelFlushOffset ?
+ uiCLAlignedEndOffset - uiCLAlignedStartOffset :
+ uiCLAlignedEndOffset - uiPgAlignedOffset;
+ }
+ else
+ {
+ if (uiNextPgAlignedOffset > uiCLAlignedEndOffset)
+ {
+ uiRelFlushSize = uiRelFlushOffset ?
+ uiCLAlignedEndOffset - uiCLAlignedStartOffset :
+ uiCLAlignedEndOffset - uiPgAlignedOffset;
+ }
+ }
+
+ /* More efficient to request cache maintenance operation for full
+ relative range as opposed to multiple cache-aligned ranges */
+ pbCpuVirtAddrStart = pbCpuVirtAddr + uiRelFlushOffset;
+ pbCpuVirtAddrEnd = pbCpuVirtAddrStart + uiRelFlushSize;
+ sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + uiRelFlushOffset;
+ sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr + uiRelFlushSize;
+
+ switch (uiCacheOp)
+ {
+ case PVRSRV_CACHE_OP_CLEAN:
+ OSCleanCPUCacheRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+ sCpuPhyAddrStart, sCpuPhyAddrEnd);
+ break;
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ OSInvalidateCPUCacheRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+ sCpuPhyAddrStart, sCpuPhyAddrEnd);
+ break;
+ case PVRSRV_CACHE_OP_FLUSH:
+ OSFlushCPUCacheRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+ sCpuPhyAddrStart, sCpuPhyAddrEnd);
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d",
+ __FUNCTION__, uiCacheOp));
+ PVR_ASSERT(0);
+ break;
+ }
+}
+
+/* This function assumes the PMR is locked */
+static PVRSRV_ERROR CacheOpRangeBased (PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PVRSRV_CACHE_OP uiCacheOp,
+ IMG_BOOL *bUsedGlobalFlush)
+{
+ IMG_HANDLE hPrivOut;
+ IMG_BOOL bPMRIsSparse;
+ IMG_UINT32 ui32PageIndex;
+ IMG_UINT32 ui32NumOfPages;
+ IMG_DEVMEM_SIZE_T uiOutSize;
+ IMG_DEVMEM_SIZE_T uiPgAlignedSize;
+ IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset;
+ IMG_DEVMEM_OFFSET_T uiPgAlignedEndOffset;
+ IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset;
+ IMG_DEVMEM_OFFSET_T uiPgAlignedStartOffset;
+ IMG_DEVMEM_OFFSET_T uiPgAlignedOffsetNext;
+ PVRSRV_CACHE_OP_ADDR_TYPE uiCacheOpAddrType;
+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_CPU_PHYADDR asCpuPhyAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_UINT32 OS_PAGE_SHIFT = (IMG_UINT32) OSGetPageShift();
+ IMG_CPU_PHYADDR *psCpuPhyAddr = asCpuPhyAddr;
+ IMG_BOOL bIsPMRDataRetrieved = IMG_FALSE;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BYTE *pbCpuVirtAddr = NULL;
+ IMG_BOOL *pbValid = abValid;
+
+ if (uiCacheOp == PVRSRV_CACHE_OP_NONE)
+ {
+ PVR_ASSERT(0);
+ return PVRSRV_OK;
+ }
+ else
+ {
+ /* Carry out full dcache operation if size (in pages) qualifies */
+ if (uiSize >= PVR_DIRTY_BYTES_FLUSH_THRESHOLD)
+ {
+ /* Flush, so we can skip subsequent invalidates */
+ eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+ if (eError == PVRSRV_OK)
+ {
+ *bUsedGlobalFlush = IMG_TRUE;
+ return PVRSRV_OK;
+ }
+ }
+ }
+
+ /* Need this for kernel mapping */
+ bPMRIsSparse = PMR_IsSparse(psPMR);
+
+ /* Round the incoming offset down to the nearest cache-line / page aligned-address */
+ uiCLAlignedEndOffset = uiOffset + uiSize;
+ uiCLAlignedEndOffset = PVR_ALIGN(uiCLAlignedEndOffset, (IMG_DEVMEM_SIZE_T)guiCacheLineSize);
+ uiCLAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)guiCacheLineSize-1));
+
+ uiPgAlignedEndOffset = uiCLAlignedEndOffset;
+ uiPgAlignedEndOffset = PVR_ALIGN(uiPgAlignedEndOffset, (IMG_DEVMEM_SIZE_T)guiOSPageSize);
+ uiPgAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)guiOSPageSize-1));
+ uiPgAlignedSize = uiPgAlignedEndOffset - uiPgAlignedStartOffset;
+
+#if defined(CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING)
+ /* For internal debug if cache-line optimised
+ flushing is suspected of causing data corruption */
+ uiCLAlignedStartOffset = uiPgAlignedStartOffset;
+ uiCLAlignedEndOffset = uiPgAlignedEndOffset;
+#endif
+
+ /* Which type of address(es) do we need for this CacheOp */
+ uiCacheOpAddrType = OSCPUCacheOpAddressType(uiCacheOp);
+
+ /* Type of allocation backing the PMR data */
+ ui32NumOfPages = uiPgAlignedSize >> OS_PAGE_SHIFT;
+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ /* The pbValid array is allocated first as it is needed in
+ both physical/virtual cache maintenance methods */
+ pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL));
+ if (pbValid != NULL)
+ {
+ if (uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+ {
+ psCpuPhyAddr = OSAllocZMem(ui32NumOfPages * sizeof(IMG_CPU_PHYADDR));
+ if (psCpuPhyAddr == NULL)
+ {
+ psCpuPhyAddr = asCpuPhyAddr;
+ OSFreeMem(pbValid);
+ pbValid = abValid;
+ }
+ }
+ }
+ else
+ {
+ pbValid = abValid;
+ }
+ }
+
+ /* We always retrieve PMR data in bulk, up-front if number of pages is within
+ PMR_MAX_TRANSLATION_STACK_ALLOC limits else we check to ensure that a
+ dynamic buffer has been allocated to satisfy requests outside limits */
+ if (ui32NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC || pbValid != abValid)
+ {
+ if (uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+ {
+ /* Look-up PMR CpuPhyAddr once, if possible */
+ eError = PMR_CpuPhysAddr(psPMR,
+ OS_PAGE_SHIFT,
+ ui32NumOfPages,
+ uiPgAlignedStartOffset,
+ psCpuPhyAddr,
+ pbValid);
+ if (eError == PVRSRV_OK)
+ {
+ bIsPMRDataRetrieved = IMG_TRUE;
+ }
+ }
+ else
+ {
+ /* Look-up PMR per-page validity once, if possible */
+ eError = PMR_IsOffsetValid(psPMR,
+ OS_PAGE_SHIFT,
+ ui32NumOfPages,
+ uiPgAlignedStartOffset,
+ pbValid);
+ bIsPMRDataRetrieved = eError == PVRSRV_OK ? IMG_TRUE : IMG_FALSE;
+ }
+ }
+
+ /* For each device page, carry out the requested cache maintenance operation */
+ for (uiPgAlignedOffsetNext = uiPgAlignedStartOffset, ui32PageIndex = 0;
+ uiPgAlignedOffsetNext < uiPgAlignedEndOffset;
+ uiPgAlignedOffsetNext += (IMG_DEVMEM_OFFSET_T) guiOSPageSize, ui32PageIndex += 1)
+ {
+ if (bIsPMRDataRetrieved == IMG_FALSE)
+ {
+ /* Never cross page boundary without looking up corresponding
+ PMR page physical address and/or page validity if these
+ were not looked-up, in bulk, up-front */
+ ui32PageIndex = 0;
+ if (uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+ {
+ eError = PMR_CpuPhysAddr(psPMR,
+ OS_PAGE_SHIFT,
+ 1,
+ uiPgAlignedOffsetNext,
+ psCpuPhyAddr,
+ pbValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_ASSERT(0);
+ goto e0;
+ }
+ }
+ else
+ {
+ eError = PMR_IsOffsetValid(psPMR,
+ OS_PAGE_SHIFT,
+ 1,
+ uiPgAlignedOffsetNext,
+ pbValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_ASSERT(0);
+ goto e0;
+ }
+ }
+ }
+
+ /* Skip invalid PMR pages (i.e. sparse) */
+ if (pbValid[ui32PageIndex] == IMG_FALSE)
+ {
+ continue;
+ }
+
+ /* Skip virtual address acquire if CacheOp can be maintained
+ entirely using PMR physical addresses */
+ if (uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL)
+ {
+ if (bPMRIsSparse)
+ {
+ eError =
+ PMRAcquireSparseKernelMappingData(psPMR,
+ uiPgAlignedOffsetNext,
+ guiOSPageSize,
+ (void **)&pbCpuVirtAddr,
+ (size_t*)&uiOutSize,
+ &hPrivOut);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_ASSERT(0);
+ goto e0;
+ }
+ }
+ else
+ {
+ eError =
+ PMRAcquireKernelMappingData(psPMR,
+ uiPgAlignedOffsetNext,
+ guiOSPageSize,
+ (void **)&pbCpuVirtAddr,
+ (size_t*)&uiOutSize,
+ &hPrivOut);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_ASSERT(0);
+ goto e0;
+ }
+ }
+ }
+
+ /* Issue actual cache maintenance for PMR */
+ CacheOpCPURangeBased(PMR_DeviceNode(psPMR),
+ uiCacheOp,
+ pbCpuVirtAddr,
+ (uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL) ?
+ psCpuPhyAddr[ui32PageIndex] : psCpuPhyAddr[0],
+ uiPgAlignedOffsetNext,
+ uiCLAlignedStartOffset,
+ uiCLAlignedEndOffset);
+
+ /* Skip virtual address release if CacheOp can be maintained
+ entirely using PMR physical addresses */
+ if (uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL)
+ {
+ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ }
+
+e0:
+ if (psCpuPhyAddr != asCpuPhyAddr)
+ {
+ OSFreeMem(psCpuPhyAddr);
+ }
+
+ if (pbValid != abValid)
+ {
+ OSFreeMem(pbValid);
+ }
+
+ return eError;
+}
+
+static INLINE IMG_BOOL CacheOpFenceCheck(IMG_UINT32 ui32UpdateSeqNum,
+ IMG_UINT32 ui32FenceSeqNum)
+{
+ IMG_UINT32 ui32RebasedUpdateNum;
+ IMG_UINT32 ui32RebasedFenceNum;
+ IMG_UINT32 ui32Rebase;
+
+ if (ui32FenceSeqNum == 0)
+ {
+ return IMG_TRUE;
+ }
+
+ /*
+ The problem statement is how to compare two values
+ on a numerical sequentially incrementing timeline in
+ the presence of wrap around arithmetic semantics using
+ a single ui32 counter & atomic (increment) operations.
+
+ The rationale for the solution here is to rebase the
+ incoming values to the sequence midpoint and perform
+ comparisons there; this allows us to handle overflow
+ or underflow wrap-round using only a single integer.
+
+ NOTE: We assume that the absolute value of the
+ difference between the two incoming values in _not_
+ greater than CACHEOP_SEQ_MIDPOINT. This assumption
+ holds as it implies that it is very _unlikely_ that 2
+ billion CacheOp requests could have been made between
+ a single client's CacheOp request & the corresponding
+ fence check. This code sequence is hopefully a _more_
+ hand optimised (branchless) version of this:
+
+ x = ui32CompletedOpSeqNum
+ y = ui32FenceOpSeqNum
+
+ if (|x - y| < CACHEOP_SEQ_MIDPOINT)
+ return (x - y) >= 0 ? true : false
+ else
+ return (y - x) >= 0 ? true : false
+ */
+ ui32Rebase = CACHEOP_SEQ_MIDPOINT - ui32UpdateSeqNum;
+
+ /* ui32Rebase could be either positive/negative, in
+ any case we still perform operation using unsigned
+ semantics as 2's complement notation always means
+ we end up with the correct result */
+ ui32RebasedUpdateNum = ui32Rebase + ui32UpdateSeqNum;
+ ui32RebasedFenceNum = ui32Rebase + ui32FenceSeqNum;
+
+ return (ui32RebasedUpdateNum >= ui32RebasedFenceNum);
+}
+
+#if defined(SUPPORT_RANGEBASED_CACHEFLUSH)
+#if defined(SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED)
+/* Wait 8hrs when no deferred CacheOp is required;
+ for fence checks, wait 10ms then retry */
+#define CACHEOP_THREAD_WAIT_TIMEOUT 28800000000ULL
+#define CACHEOP_FENCE_WAIT_TIMEOUT 10000ULL
+
+typedef struct _CACHEOP_CLEANUP_WORK_ITEM_
+{
+ PVRSRV_CLEANUP_THREAD_WORK sCleanupWorkItem;
+ DLLIST_NODE *psListNode;
+} CACHEOP_CLEANUP_WORK_ITEM;
+
+/* These are used to track pending CacheOps */
+static IMG_DEVMEM_SIZE_T guiPendingDevmemSize;
+static IMG_BOOL gbPendingTimeline;
+
+static INLINE PVRSRV_ERROR CacheOpFree(void *pvData)
+{
+ CACHEOP_CLEANUP_WORK_ITEM *psCacheOpCleanupItem = pvData;
+ DLLIST_NODE *psListNode = psCacheOpCleanupItem->psListNode;
+ CACHEOP_WORK_ITEM *psCacheOpWorkItem;
+ DLLIST_NODE *psNodeIter;
+
+ while (! dllist_is_empty(psListNode))
+ {
+ psNodeIter = dllist_get_next_node(psListNode);
+ dllist_remove_node(psNodeIter);
+
+ psCacheOpWorkItem = IMG_CONTAINER_OF(psNodeIter, CACHEOP_WORK_ITEM, sNode);
+ if (psCacheOpWorkItem->psPMR)
+ {
+ PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+ }
+
+ OSFreeMem(psCacheOpWorkItem);
+ }
+
+ /* Finally free pseudo head node which is also a valid CacheOp work item */
+ psCacheOpWorkItem = IMG_CONTAINER_OF(psListNode, CACHEOP_WORK_ITEM, sNode);
+ if (psCacheOpWorkItem->psPMR)
+ {
+ PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+ }
+
+ OSFreeMem(psCacheOpWorkItem);
+ OSFreeMem(psCacheOpCleanupItem);
+
+ return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR CacheOpCleanup(DLLIST_NODE *psListNode)
+{
+ CACHEOP_CLEANUP_WORK_ITEM *psCacheOpCleanupItem;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ psCacheOpCleanupItem = OSAllocMem(sizeof(CACHEOP_CLEANUP_WORK_ITEM));
+ if (! psCacheOpCleanupItem)
+ {
+ PVR_DPF((CACHEOP_DPFL,
+ "%s: performing sync cleanup",
+ __FUNCTION__));
+ eError = CacheOpFree(psListNode);
+ }
+ else
+ {
+ psCacheOpCleanupItem->psListNode = psListNode;
+ psCacheOpCleanupItem->sCleanupWorkItem.ui32RetryCount = 0;
+ psCacheOpCleanupItem->sCleanupWorkItem.pfnFree = CacheOpFree;
+ psCacheOpCleanupItem->sCleanupWorkItem.pvData = psCacheOpCleanupItem;
+ psCacheOpCleanupItem->sCleanupWorkItem.bDependsOnHW = IMG_FALSE;
+ PVRSRVCleanupThreadAddWork(&psCacheOpCleanupItem->sCleanupWorkItem);
+ }
+
+ return eError;
+}
+
+static INLINE PVRSRV_ERROR CacheOpEnqueue(PVRSRV_DATA *psPVRSRVData,
+ CACHEOP_WORK_ITEM *psData,
+ IMG_UINT32 *psSeqNum)
+{
+ OSLockAcquire(psPVRSRVData->hCacheOpThreadWorkListLock);
+
+ /* Queue this CacheOp work item into the pending list, update queue size */
+ dllist_add_to_tail(&psPVRSRVData->sCacheOpThreadWorkList, &psData->sNode);
+ gbPendingTimeline = psData->psTimeline ? IMG_TRUE : gbPendingTimeline;
+ guiPendingDevmemSize += psData->uiSize;
+
+ /* Advance the system-wide CacheOp common sequence value */
+ *psSeqNum = OSAtomicIncrement(&ghCommonCacheOpSeqNum);
+ if (! *psSeqNum)
+ {
+ /* Zero is _not_ a valid sequence value, doing so
+ simplifies subsequent fence checking when no
+ cache maintenance operation is outstanding as
+ in this case a fence value of zero is supplied */
+ *psSeqNum = OSAtomicIncrement(&ghCommonCacheOpSeqNum);
+ }
+ psData->ui32OpSeqNum = *psSeqNum;
+
+ OSLockRelease(psPVRSRVData->hCacheOpThreadWorkListLock);
+
+ return PVRSRV_OK;
+}
+
+static INLINE DLLIST_NODE *CacheOpDequeue(PVRSRV_DATA *psPVRSRVData,
+ IMG_UINT64 *uiQueueDevmemSize,
+ IMG_BOOL *bHasTimeline)
+{
+ DLLIST_NODE *psListNode = NULL;
+
+ OSLockAcquire(psPVRSRVData->hCacheOpThreadWorkListLock);
+
+ if (! dllist_is_empty(&psPVRSRVData->sCacheOpThreadWorkList))
+ {
+ /* Replace entire pending list with a (re)initialized list */
+ psListNode = psPVRSRVData->sCacheOpThreadWorkList.psNextNode;
+ dllist_remove_node(&psPVRSRVData->sCacheOpThreadWorkList);
+ dllist_init(&psPVRSRVData->sCacheOpThreadWorkList);
+
+ /* These capture information about this dequeued list */
+ *uiQueueDevmemSize = (IMG_UINT64) guiPendingDevmemSize;
+ guiPendingDevmemSize = (IMG_DEVMEM_SIZE_T) 0;
+ *bHasTimeline = gbPendingTimeline;
+ gbPendingTimeline = IMG_FALSE;
+ }
+
+ OSLockRelease(psPVRSRVData->hCacheOpThreadWorkListLock);
+
+ return psListNode;
+}
+
+static PVRSRV_ERROR CacheOpExecGlobal(PVRSRV_DATA *psPVRSRVData,
+ DLLIST_NODE *psListNode)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32CacheOpSeqNum;
+ CACHEOP_WORK_ITEM *psCacheOpWorkItem;
+ DLLIST_NODE *psCurrentNode, *psNextNode;
+#if defined(CACHEOP_DEBUG)
+ IMG_UINT64 uiTimeNow;
+#endif
+
+ eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if defined(CACHEOP_DEBUG)
+ uiTimeNow = OSClockns64();
+#endif
+
+ /* The head node is a _valid_ CacheOp work item so process it first */
+ psCacheOpWorkItem = IMG_CONTAINER_OF(psListNode, CACHEOP_WORK_ITEM, sNode);
+#if defined(CACHEOP_DEBUG)
+ psCacheOpWorkItem->ui64ExecuteTime = uiTimeNow;
+ psCacheOpWorkItem->bRBF = IMG_FALSE;
+ psCacheOpWorkItem->bUMF = IMG_FALSE;
+#endif
+
+ /* Process other queue CacheOp work items if present */
+ dllist_foreach_node (psListNode, psCurrentNode, psNextNode)
+ {
+ psCacheOpWorkItem = IMG_CONTAINER_OF(psCurrentNode, CACHEOP_WORK_ITEM, sNode);
+#if defined(CACHEOP_DEBUG)
+ psCacheOpWorkItem->ui64ExecuteTime = uiTimeNow;
+ psCacheOpWorkItem->bRBF = IMG_FALSE;
+ psCacheOpWorkItem->bUMF = IMG_FALSE;
+#endif
+ }
+
+ /* Last CacheOp item updates ghCompletedCacheOpSeqNum */
+ ui32CacheOpSeqNum = psCacheOpWorkItem->ui32OpSeqNum;
+ OSAtomicWrite(&ghCompletedCacheOpSeqNum, ui32CacheOpSeqNum);
+
+ /* Signal any waiting threads blocked on CacheOp fence checks;
+ update completed sequence number to last queue work item */
+ eError = OSEventObjectSignal(psPVRSRVData->hCacheOpUpdateEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+ return eError;
+}
+
+static PVRSRV_ERROR CacheOpExecRangeBased(PVRSRV_DATA *psPVRSRVData,
+ DLLIST_NODE *psListNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ CACHEOP_WORK_ITEM *psCacheOpWorkItem;
+ DLLIST_NODE *psNodeIter = psListNode;
+ IMG_BOOL bSkipRemainingCacheOps = IMG_FALSE;
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_WORK_ITEM *psPrevWorkItem = NULL;
+#endif
+
+ do
+ {
+ /* Lookup corresponding work item & perform cache maintenance operation if
+ it is a non-timeline work-item (i.e. pmr is null) else notify timeline */
+ psCacheOpWorkItem = IMG_CONTAINER_OF(psNodeIter, CACHEOP_WORK_ITEM, sNode);
+ if (psCacheOpWorkItem->psPMR != NULL)
+ {
+ if (bSkipRemainingCacheOps == IMG_FALSE)
+ {
+ eError = CacheOpRangeBased(psCacheOpWorkItem->psPMR,
+ psCacheOpWorkItem->uiOffset,
+ psCacheOpWorkItem->uiSize,
+ psCacheOpWorkItem->uiCacheOp,
+ &bSkipRemainingCacheOps);
+ if (eError != PVRSRV_OK)
+ {
+ /* This _should_ not fail but if it does, not much
+ we can do about it; for now we log it but still
+ increment the completed CacheOp seq number */
+ PVR_DPF((CACHEOP_DPFL,
+ "CacheOp failed: PMR:%p Offset:%llx Size:%llx CacheOp:%d",
+ psCacheOpWorkItem->psPMR,
+ psCacheOpWorkItem->uiOffset,
+ psCacheOpWorkItem->uiSize,
+ psCacheOpWorkItem->uiCacheOp));
+ PVR_ASSERT(0);
+ }
+ }
+
+#if defined(CACHEOP_DEBUG)
+ psCacheOpWorkItem->ui64ExecuteTime = bSkipRemainingCacheOps ?
+ (psPrevWorkItem ? psPrevWorkItem->ui64ExecuteTime : OSClockns64()) : OSClockns64();
+ psCacheOpWorkItem->bRBF = !bSkipRemainingCacheOps;
+ psCacheOpWorkItem->bUMF = IMG_FALSE;
+ psPrevWorkItem = psCacheOpWorkItem;
+#endif
+
+ /* Currently executed CacheOp item updates ghCompletedCacheOpSeqNum */
+ OSAtomicWrite(&ghCompletedCacheOpSeqNum, psCacheOpWorkItem->ui32OpSeqNum);
+
+ if (psCacheOpWorkItem->bSignalEventObject == IMG_TRUE)
+ {
+ /* It is possible that multiple CacheOp work items from two or more
+ threads might be present within processed queue so we have to
+ signal when these CacheOps are processed to unblock waiting threads */
+ eError = OSEventObjectSignal(psPVRSRVData->hCacheOpUpdateEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+ }
+ else
+ {
+ PVR_ASSERT(psCacheOpWorkItem->psTimeline != NULL);
+
+ OSAtomicWrite(&ghCompletedCacheOpSeqNum, psCacheOpWorkItem->ui32OpSeqNum);
+
+#if defined(CONFIG_SW_SYNC)
+ sw_sync_timeline_inc(psCacheOpWorkItem->psTimeline->private_data, 1);
+ fput(psCacheOpWorkItem->psTimeline);
+#endif
+
+#if defined(CACHEOP_DEBUG)
+ psCacheOpWorkItem->ui64ExecuteTime = OSClockns64();
+#endif
+ }
+
+ /* This terminates on NULL or 1 item queue */
+ psNodeIter = dllist_get_next_node(psNodeIter);
+ } while (psNodeIter && psNodeIter != psListNode);
+
+ return eError;
+}
+
+static void CacheOpExecQueuedList(PVRSRV_DATA *psPVRSRVData)
+{
+ PVRSRV_ERROR eError;
+ DLLIST_NODE *psListNode;
+ IMG_BOOL bUseGlobalCachOp;
+ IMG_BOOL bHasTimeline = IMG_FALSE;
+ IMG_UINT64 ui64Size = (IMG_UINT64) 0;
+ IMG_UINT64 ui64FlushThreshold = PVR_DIRTY_BYTES_FLUSH_THRESHOLD;
+
+ /* Obtain the current queue of pending CacheOps, this also provides
+ information pertaining to the queue such as if one or more
+ CacheOps in the queue is a timeline request and the total
+ CacheOp size */
+ psListNode = CacheOpDequeue(psPVRSRVData, &ui64Size, &bHasTimeline);
+ if (psListNode == NULL)
+ {
+ /* This should _not_ happen but if it does, wake-up waiting threads */
+ eError = OSEventObjectSignal(psPVRSRVData->hCacheOpUpdateEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ return;
+ }
+
+ /* Perform a global cache operation if queue size (in pages)
+ qualifies and there is no work item in the queue which is a
+ timeline request */
+ bUseGlobalCachOp = ui64Size >= ui64FlushThreshold;
+ if (bUseGlobalCachOp == IMG_TRUE && bHasTimeline == IMG_FALSE)
+ {
+ eError = CacheOpExecGlobal(psPVRSRVData, psListNode);
+ if (eError == PVRSRV_OK)
+ {
+ goto e0;
+ }
+ }
+
+ /* Else use range-based cache maintenance per queue item */
+ eError = CacheOpExecRangeBased(psPVRSRVData, psListNode);
+
+e0:
+#if defined(CACHEOP_DEBUG)
+ eError = CacheOpStatExecLog(psListNode);
+#endif
+
+ /* Once done, defer CacheOp cleanup */
+ eError = CacheOpCleanup(psListNode);
+}
+
+static void CacheOpThread(void *pvData)
+{
+ PVRSRV_DATA *psPVRSRVData = pvData;
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF((CACHEOP_DPFL, "%s: thread starting...", __FUNCTION__));
+
+ /* Store the process id (pid) of the CacheOp-up thread */
+ psPVRSRVData->CacheOpThreadPid = OSGetCurrentProcessID();
+
+ /* Open CacheOp thread event object, abort driver if event object open fails */
+ eError = OSEventObjectOpen(psPVRSRVData->hCacheOpThreadEventObject, &hOSEvent);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* While driver is in good state and not being unloaded, perform pending cache maintenance */
+ while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && (!psPVRSRVData->bUnload))
+ {
+ /* Wait here until when signalled for queued (pending) CacheOp work items */
+ eError = OSEventObjectWaitTimeout(hOSEvent, CACHEOP_THREAD_WAIT_TIMEOUT);
+ if (eError == PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_DPF((CACHEOP_DPFL, "%s: wait timeout", __FUNCTION__));
+ }
+ else if (eError == PVRSRV_OK)
+ {
+ PVR_DPF((CACHEOP_DPFL, "%s: wait OK, signal received", __FUNCTION__));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: wait error %d", __FUNCTION__, eError));
+ }
+
+ CacheOpExecQueuedList(psPVRSRVData);
+ }
+
+ eError = OSEventObjectClose(hOSEvent);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+
+ PVR_DPF((CACHEOP_DPFL, "%s: thread terminating...", __FUNCTION__));
+}
+
+static PVRSRV_ERROR CacheOpExecQueue (PMR **ppsPMR,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *puiCacheOp,
+ IMG_UINT32 ui32NumCacheOps,
+ IMG_UINT32 *pui32OpSeqNum)
+{
+ IMG_UINT32 ui32Idx;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ if (psPVRSRVData->bUnload)
+ {
+ PVR_DPF((CACHEOP_DPFL,
+ "%s: driver unloading, performing CacheOp synchronously",
+ __FUNCTION__));
+
+ for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+ {
+ eError = CacheOpExec(ppsPMR[ui32Idx],
+ puiOffset[ui32Idx],
+ puiSize[ui32Idx],
+ puiCacheOp[ui32Idx]);
+ }
+
+ /* No CacheOp fence dependencies */
+ *pui32OpSeqNum = 0;
+ }
+ else
+ {
+ CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+
+ for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+ {
+ /* As PVRSRV_CACHE_OP_INVALIDATE is used to transfer
+ device memory buffer ownership back to processor
+ we cannot defer it so must action it immediately */
+ if (puiCacheOp[ui32Idx] == PVRSRV_CACHE_OP_INVALIDATE)
+ {
+ eError = CacheOpExec (ppsPMR[ui32Idx],
+ puiOffset[ui32Idx],
+ puiSize[ui32Idx],
+ puiCacheOp[ui32Idx]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((CACHEOP_DPFL,
+ "%s: PVRSRV_CACHE_OP_INVALIDATE failed (%u)",
+ __FUNCTION__, eError));
+ }
+
+ /* Clear CacheOp fence dependencies if single entry; in a
+ multiple entry batch, preserve fence dependency update */
+ *pui32OpSeqNum = (ui32Idx == 0) ? 0 : *pui32OpSeqNum;
+ continue;
+ }
+
+ /* For now use dynamic alloc, static CCB _might_ be faster */
+ psCacheOpWorkItem = OSAllocMem(sizeof(CACHEOP_WORK_ITEM));
+ if (psCacheOpWorkItem == NULL)
+ {
+ PVR_DPF((CACHEOP_DPFL, "%s: OSAllocMem failed (%u)",
+ __FUNCTION__, eError));
+
+ /* Signal the CacheOp thread to ensure whatever was enqueued thus
+ far (if any) gets processed even though we fail the request */
+ eError = OSEventObjectSignal(psPVRSRVData->hCacheOpThreadEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* For safety, take reference here in user context; to speed
+ up deferred cache management we drop reference as late as
+ possible (during cleanup) */
+ eError = PMRLockSysPhysAddresses(ppsPMR[ui32Idx]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((CACHEOP_DPFL, "%s: PMRLockSysPhysAddresses failed (%u)",
+ __FUNCTION__, eError));
+
+ OSFreeMem(psCacheOpWorkItem);
+ psCacheOpWorkItem = NULL;
+
+ /* Signal the CacheOp thread to ensure whatever was enqueued thus
+ far (if any) gets processed even though we fail the request */
+ eError = OSEventObjectSignal(psPVRSRVData->hCacheOpThreadEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+ return eError;
+ }
+
+ /* Prepare & enqueue CacheOp work item */
+#if defined(CACHEOP_DEBUG)
+ psCacheOpWorkItem->pid = OSGetCurrentClientProcessIDKM();
+ psCacheOpWorkItem->ui64QueuedTime = OSClockns64();
+#endif
+ psCacheOpWorkItem->bSignalEventObject = IMG_FALSE;
+ psCacheOpWorkItem->uiCacheOp = puiCacheOp[ui32Idx];
+ psCacheOpWorkItem->uiOffset = puiOffset[ui32Idx];
+ psCacheOpWorkItem->uiSize = puiSize[ui32Idx];
+ psCacheOpWorkItem->psPMR = ppsPMR[ui32Idx];
+ psCacheOpWorkItem->psTimeline = NULL;
+
+ if (ui32Idx == (ui32NumCacheOps - 1))
+ {
+ /* The idea here is to track the last CacheOp in a
+ batch queue so that we only wake-up stalled threads
+ waiting on fence checks when such CacheOp has been
+ processed; this serves to reduce spurious thread
+ wake-up */
+ psCacheOpWorkItem->bSignalEventObject = IMG_TRUE;
+ }
+
+ eError = CacheOpEnqueue(psPVRSRVData, psCacheOpWorkItem, pui32OpSeqNum);
+ PVR_LOG_IF_ERROR(eError, "CacheOpEnqueue");
+ }
+
+ if (psCacheOpWorkItem != NULL)
+ {
+ /* Signal the CacheOp thread to ensure this item gets processed */
+ eError = OSEventObjectSignal(psPVRSRVData->hCacheOpThreadEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+ }
+
+ return eError;
+}
+#else /* defined(SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED) */
+static PVRSRV_ERROR CacheOpExecQueue(PMR **ppsPMR,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *puiCacheOp,
+ IMG_UINT32 ui32NumCacheOps,
+ IMG_UINT32 *pui32OpSeqNum)
+{
+ IMG_UINT32 ui32Idx;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+ {
+ eError = CacheOpExec(ppsPMR[ui32Idx],
+ puiOffset[ui32Idx],
+ puiSize[ui32Idx],
+ puiCacheOp[ui32Idx]);
+ }
+
+ /* For immediate RBF, common/completed are identical */
+ *pui32OpSeqNum = OSAtomicRead(&ghCommonCacheOpSeqNum);
+ OSAtomicWrite(&ghCompletedCacheOpSeqNum, *pui32OpSeqNum);
+
+ return eError;
+}
+#endif
+
+PVRSRV_ERROR CacheOpQueue (IMG_UINT32 ui32NumCacheOps,
+ PMR **ppsPMR,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *puiCacheOp,
+ IMG_UINT32 *pui32OpSeqNum)
+{
+ return CacheOpExecQueue(ppsPMR,
+ puiOffset,
+ puiSize,
+ puiCacheOp,
+ ui32NumCacheOps,
+ pui32OpSeqNum);
+}
+
+PVRSRV_ERROR CacheOpFence (RGXFWIF_DM eFenceOpType,
+ IMG_UINT32 ui32FenceOpSeqNum)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32CompletedOpSeqNum;
+ IMG_BOOL b1stCacheOpFenceCheckPass;
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_WORK_ITEM sCacheOpWorkItem;
+ IMG_UINT64 uiTimeNow = OSClockns64();
+ IMG_UINT32 ui32RetryCount = 0;
+
+ dllist_init(&sCacheOpWorkItem.sNode);
+
+ /* No PMR/timeline for fence CacheOp */
+ sCacheOpWorkItem.psPMR = NULL;
+ sCacheOpWorkItem.psTimeline = NULL;
+ sCacheOpWorkItem.ui64QueuedTime = uiTimeNow;
+ sCacheOpWorkItem.ui32OpSeqNum = ui32FenceOpSeqNum;
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+#if defined(PVR_RI_DEBUG)
+ sCacheOpWorkItem.eFenceOpType = eFenceOpType;
+#endif
+#endif
+
+ PVR_UNREFERENCED_PARAMETER(eFenceOpType);
+
+ ui32CompletedOpSeqNum = OSAtomicRead(&ghCompletedCacheOpSeqNum);
+ b1stCacheOpFenceCheckPass = CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum);
+
+#if defined(SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED)
+ /* If initial fence check fails, then wait-and-retry in loop */
+ if (b1stCacheOpFenceCheckPass == IMG_FALSE)
+ {
+ IMG_HANDLE hOSEvent;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ /* Open CacheOp update event object, if event object open fails return error */
+ eError = OSEventObjectOpen(psPVRSRVData->hCacheOpUpdateEventObject, &hOSEvent);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((CACHEOP_DPFL,
+ "%s: failed to open update event object",
+ __FUNCTION__));
+ goto e0;
+ }
+
+ /* (Re)read completed cache op sequence number before wait */
+ ui32CompletedOpSeqNum = OSAtomicRead(&ghCompletedCacheOpSeqNum);
+
+ /* Check if the CacheOp dependencies for this thread are met */
+ eError = CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum) ?
+ PVRSRV_OK : PVRSRV_ERROR_FAILED_DEPENDENCIES;
+
+ while (eError != PVRSRV_OK)
+ {
+ /* Wait here until signalled that update has occurred by CacheOp thread */
+ eError = OSEventObjectWaitTimeout(hOSEvent, CACHEOP_FENCE_WAIT_TIMEOUT);
+ if (eError == PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_DPF((CACHEOP_DPFL, "%s: wait timeout", __FUNCTION__));
+#if defined(CACHEOP_DEBUG)
+ /* This is a more accurate notion of fence check retries */
+ ui32RetryCount += 1;
+#endif
+ }
+ else if (eError == PVRSRV_OK)
+ {
+ PVR_DPF((CACHEOP_DPFL, "%s: wait OK, signal received", __FUNCTION__));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: wait error %d", __FUNCTION__, eError));
+ }
+
+ /* (Re)read latest completed CacheOp sequence number to fence */
+ ui32CompletedOpSeqNum = OSAtomicRead(&ghCompletedCacheOpSeqNum);
+
+ /* Check if the CacheOp dependencies for this thread are met */
+ eError = CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum) ?
+ PVRSRV_OK : PVRSRV_ERROR_FAILED_DEPENDENCIES;
+ }
+
+#if defined(CACHEOP_DEBUG)
+ uiTimeNow = OSClockns64();
+#endif
+
+ eError = OSEventObjectClose(hOSEvent);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+ }
+
+e0:
+#if defined(CACHEOP_DEBUG)
+ if (b1stCacheOpFenceCheckPass == IMG_FALSE)
+ {
+ /* This log gives an indication of how badly deferred
+ cache maintenance is doing and provides data for
+ possible dynamic spawning of multiple CacheOpThreads;
+ currently not implemented in the framework but such
+ an extension would require a monitoring thread to
+ scan the gasCacheOpStatStalled table and spawn/kill a
+ new CacheOpThread if certain conditions are met */
+ CacheOpStatStallLogWrite(ui32FenceOpSeqNum,
+ sCacheOpWorkItem.ui64QueuedTime,
+ uiTimeNow,
+ ui32RetryCount);
+ }
+#endif
+#else /* defined(SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED) */
+#if defined(CACHEOP_DEBUG)
+ PVR_UNREFERENCED_PARAMETER(ui32RetryCount);
+#endif
+ /* Fence checks _cannot_ fail in immediate RBF */
+ PVR_UNREFERENCED_PARAMETER(b1stCacheOpFenceCheckPass);
+ PVR_ASSERT(b1stCacheOpFenceCheckPass == IMG_TRUE);
+#endif
+
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.ui64ExecuteTime = uiTimeNow;
+ sCacheOpWorkItem.uiCacheOp = PVRSRV_CACHE_OP_NONE;
+ eError = CacheOpStatExecLog(&sCacheOpWorkItem.sNode);
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR CacheOpSetTimeline (IMG_INT32 i32Timeline)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED)
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ CACHEOP_WORK_ITEM *psCacheOpWorkItem;
+ IMG_UINT32 ui32OpSeqNum;
+
+ if (i32Timeline < 0)
+ {
+ return PVRSRV_OK;
+ }
+
+ psCacheOpWorkItem = OSAllocMem(sizeof(CACHEOP_WORK_ITEM));
+ if (psCacheOpWorkItem == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Prepare & enqueue a timeline CacheOp work item */
+ psCacheOpWorkItem->psPMR = NULL;
+#if defined(CACHEOP_DEBUG)
+ psCacheOpWorkItem->ui64QueuedTime = OSClockns64();
+ psCacheOpWorkItem->pid = OSGetCurrentClientProcessIDKM();
+#endif
+
+#if defined(CONFIG_SW_SYNC)
+ psCacheOpWorkItem->psTimeline = fget(i32Timeline);
+ if (!psCacheOpWorkItem->psTimeline ||
+ !psCacheOpWorkItem->psTimeline->private_data)
+ {
+ OSFreeMem(psCacheOpWorkItem);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Enqueue timeline work-item, notifies timeline FD when executed */
+ eError = CacheOpEnqueue(psPVRSRVData, psCacheOpWorkItem, &ui32OpSeqNum);
+ PVR_LOG_IF_ERROR(eError, "CacheOpEnqueue");
+
+ /* Signal the CacheOp thread to ensure this item gets processed */
+ eError = OSEventObjectSignal(psPVRSRVData->hCacheOpThreadEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+#else
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
+ PVR_UNREFERENCED_PARAMETER(ui32OpSeqNum);
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ PVR_ASSERT(0);
+#endif
+#else /* defined(SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED) */
+ struct file *psFile;
+
+ if (i32Timeline < 0)
+ {
+ return PVRSRV_OK;
+ }
+
+#if defined(CONFIG_SW_SYNC)
+ psFile = fget(i32Timeline);
+ if (!psFile || !psFile->private_data)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ sw_sync_timeline_inc(psFile->private_data, 1);
+ fput(psFile);
+
+ eError = PVRSRV_OK;
+#else
+ PVR_UNREFERENCED_PARAMETER(psFile);
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ PVR_ASSERT(0);
+#endif
+#endif
+
+ return eError;
+}
+#else /* defined(SUPPORT_RANGEBASED_CACHEFLUSH) */
+PVRSRV_ERROR CacheOpQueue (IMG_UINT32 ui32NumCacheOps,
+ PMR **ppsPMR,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *puiCacheOp,
+ IMG_UINT32 *pui32OpSeqNum)
+{
+ IMG_UINT32 ui32Idx;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BOOL bHasInvalidate = IMG_FALSE;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_CACHE_OP uiCacheOp = PVRSRV_CACHE_OP_NONE;
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_WORK_ITEM sCacheOpWorkItem;
+ dllist_init(&sCacheOpWorkItem.sNode);
+
+ sCacheOpWorkItem.psPMR = ppsPMR[0];
+ sCacheOpWorkItem.bRBF = IMG_FALSE;
+ sCacheOpWorkItem.bUMF = IMG_FALSE;
+ sCacheOpWorkItem.psTimeline = NULL;
+ sCacheOpWorkItem.uiOffset = puiOffset[0];
+ sCacheOpWorkItem.ui64QueuedTime = (IMG_UINT64)0;
+ sCacheOpWorkItem.ui64ExecuteTime = (IMG_UINT64)0;
+ sCacheOpWorkItem.uiSize = (IMG_DEVMEM_OFFSET_T)0;
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+#endif
+
+ /* Coalesce all requests into a single superset request */
+ for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+ {
+ uiCacheOp = SetCacheOp(uiCacheOp, puiCacheOp[ui32Idx]);
+ if (puiCacheOp[ui32Idx] == PVRSRV_CACHE_OP_INVALIDATE)
+ {
+ /* Cannot be deferred, action now */
+ bHasInvalidate = IMG_TRUE;
+#if !defined(CACHEOP_DEBUG)
+ break;
+#endif
+ }
+#if defined(CACHEOP_DEBUG)
+ /* For debug, we _want_ to know how many items are in batch */
+ sCacheOpWorkItem.uiSize += puiSize[ui32Idx];
+ *pui32OpSeqNum = OSAtomicIncrement(&ghCommonCacheOpSeqNum);
+ *pui32OpSeqNum = !*pui32OpSeqNum ?
+ OSAtomicIncrement(&ghCommonCacheOpSeqNum) : *pui32OpSeqNum;
+#endif
+ }
+
+#if !defined(CACHEOP_DEBUG)
+ /* For release, we don't care, so use per-batch sequencing */
+ *pui32OpSeqNum = OSAtomicIncrement(&ghCommonCacheOpSeqNum);
+ *pui32OpSeqNum = !*pui32OpSeqNum ?
+ OSAtomicIncrement(&ghCommonCacheOpSeqNum) : *pui32OpSeqNum;
+#endif
+
+ if (bHasInvalidate == IMG_TRUE)
+ {
+ psPVRSRVData->uiCacheOp = PVRSRV_CACHE_OP_NONE;
+
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.ui64QueuedTime = OSClockns64();
+#endif
+
+ /* Perform global cache maintenance operation */
+ eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSCPUOperation failed (%u)",
+ __FUNCTION__, eError));
+ goto e0;
+ }
+
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+#endif
+
+ /* Having completed the invalidate, note sequence number */
+ OSAtomicWrite(&ghCompletedCacheOpSeqNum, *pui32OpSeqNum);
+ }
+ else
+ {
+ /* NOTE: Possible race condition, CacheOp value set here using SetCacheOp()
+ might be over-written during read-modify-write sequence in CacheOpFence() */
+ psPVRSRVData->uiCacheOp = SetCacheOp(psPVRSRVData->uiCacheOp, uiCacheOp);
+ }
+
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+ sCacheOpWorkItem.ui32OpSeqNum = *pui32OpSeqNum;
+ eError = CacheOpStatExecLog(&sCacheOpWorkItem.sNode);
+#endif
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR CacheOpFence (RGXFWIF_DM eFenceOpType,
+ IMG_UINT32 ui32FenceOpSeqNum)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ IMG_BOOL b1stCacheOpFenceCheckPass;
+ IMG_UINT32 ui32CacheOpSeqNum;
+ PVRSRV_CACHE_OP uiCacheOp;
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_WORK_ITEM sCacheOpWorkItem;
+ sCacheOpWorkItem.ui64QueuedTime = (IMG_UINT64)0;
+ sCacheOpWorkItem.ui64ExecuteTime = (IMG_UINT64)0;
+ sCacheOpWorkItem.uiCacheOp = PVRSRV_CACHE_OP_NONE;
+#endif
+
+ ui32CacheOpSeqNum = OSAtomicRead(&ghCompletedCacheOpSeqNum);
+ b1stCacheOpFenceCheckPass = CacheOpFenceCheck(ui32CacheOpSeqNum, ui32FenceOpSeqNum);
+
+ /* Flush if there is pending CacheOp that affects this fence */
+ if (b1stCacheOpFenceCheckPass == IMG_FALSE)
+ {
+ /* After global CacheOp, requests before this sequence are met */
+ ui32CacheOpSeqNum = OSAtomicIncrement(&ghCommonCacheOpSeqNum);
+ ui32CacheOpSeqNum = !ui32CacheOpSeqNum ?
+ OSAtomicIncrement(&ghCommonCacheOpSeqNum) : ui32CacheOpSeqNum;
+
+ uiCacheOp = psPVRSRVData->uiCacheOp;
+ psPVRSRVData->uiCacheOp = PVRSRV_CACHE_OP_NONE;
+
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.ui64QueuedTime = OSClockns64();
+#endif
+
+ /* Perform global cache maintenance operation */
+ eError = OSCPUOperation(uiCacheOp);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSCPUOperation failed (%u)",
+ __FUNCTION__, eError));
+ goto e0;
+ }
+
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+ sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+#endif
+
+ /* Having completed global CacheOp, note sequence number */
+ OSAtomicWrite(&ghCompletedCacheOpSeqNum, ui32CacheOpSeqNum);
+ }
+
+#if defined(CACHEOP_DEBUG)
+ dllist_init(&sCacheOpWorkItem.sNode);
+
+ sCacheOpWorkItem.psPMR = NULL;
+ sCacheOpWorkItem.psTimeline = NULL;
+ sCacheOpWorkItem.ui32OpSeqNum = ui32FenceOpSeqNum;
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+#if defined(PVR_RI_DEBUG)
+ sCacheOpWorkItem.eFenceOpType = eFenceOpType;
+#endif
+
+ eError = CacheOpStatExecLog(&sCacheOpWorkItem.sNode);
+#endif
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR CacheOpSetTimeline (IMG_INT32 i32Timeline)
+{
+ PVRSRV_ERROR eError;
+ struct file *psFile;
+ PVRSRV_CACHE_OP uiCacheOp;
+ PVRSRV_DATA *psPVRSRVData;
+
+ if (i32Timeline < 0)
+ {
+ return PVRSRV_OK;
+ }
+
+#if defined(CONFIG_SW_SYNC)
+ psFile = fget(i32Timeline);
+ if (!psFile || !psFile->private_data)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psPVRSRVData = PVRSRVGetPVRSRVData();
+ uiCacheOp = psPVRSRVData->uiCacheOp;
+ psPVRSRVData->uiCacheOp = PVRSRV_CACHE_OP_NONE;
+
+ /* Perform global cache maintenance operation */
+ eError = OSCPUOperation(uiCacheOp);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSCPUOperation failed (%u)",
+ __FUNCTION__, eError));
+ goto e0;
+ }
+
+ sw_sync_timeline_inc(psFile->private_data, 1);
+ fput(psFile);
+e0:
+#else
+ PVR_UNREFERENCED_PARAMETER(psFile);
+ PVR_UNREFERENCED_PARAMETER(uiCacheOp);
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ PVR_ASSERT(0);
+#endif
+
+ return eError;
+}
+#endif /* defined(SUPPORT_RANGEBASED_CACHEFLUSH) */
+
+PVRSRV_ERROR CacheOpExec (PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PVRSRV_CACHE_OP uiCacheOp)
+{
+ PVRSRV_ERROR eError;
+ IMG_BOOL bUsedGlobalFlush = IMG_FALSE;
+#if defined(CACHEOP_DEBUG)
+ /* This interface is always synchronous and not deferred;
+ during debug build, use work-item to capture debug logs */
+ CACHEOP_WORK_ITEM sCacheOpWorkItem;
+ dllist_init(&sCacheOpWorkItem.sNode);
+
+ sCacheOpWorkItem.psPMR = psPMR;
+ sCacheOpWorkItem.uiSize = uiSize;
+ sCacheOpWorkItem.psTimeline = NULL;
+ sCacheOpWorkItem.uiOffset = uiOffset;
+ sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+ sCacheOpWorkItem.ui64QueuedTime = OSClockns64();
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+ sCacheOpWorkItem.ui32OpSeqNum = OSAtomicIncrement(&ghCommonCacheOpSeqNum);
+ sCacheOpWorkItem.ui32OpSeqNum = !sCacheOpWorkItem.ui32OpSeqNum ?
+ OSAtomicIncrement(&ghCommonCacheOpSeqNum) : sCacheOpWorkItem.ui32OpSeqNum;
+#endif
+
+ /* Perform range-based cache maintenance operation */
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((CACHEOP_DPFL,
+ "%s: PMRLockSysPhysAddresses failed (%u)",
+ __FUNCTION__, eError));
+ goto e0;
+ }
+
+ eError = CacheOpRangeBased(psPMR, uiOffset, uiSize, uiCacheOp, &bUsedGlobalFlush);
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.bUMF = IMG_FALSE;
+ sCacheOpWorkItem.bRBF = !bUsedGlobalFlush;
+ sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+ eError = CacheOpStatExecLog(&sCacheOpWorkItem.sNode);
+#endif
+
+ PMRUnlockSysPhysAddresses(psPMR);
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR CacheOpLog (PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT64 ui64QueuedTimeUs,
+ IMG_UINT64 ui64ExecuteTimeUs,
+ PVRSRV_CACHE_OP uiCacheOp)
+{
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_WORK_ITEM sCacheOpWorkItem;
+ dllist_init(&sCacheOpWorkItem.sNode);
+
+ sCacheOpWorkItem.psPMR = psPMR;
+ sCacheOpWorkItem.uiSize = uiSize;
+ sCacheOpWorkItem.psTimeline = NULL;
+ sCacheOpWorkItem.uiOffset = uiOffset;
+ sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+ sCacheOpWorkItem.ui32OpSeqNum = OSAtomicIncrement(&ghCommonCacheOpSeqNum);
+ sCacheOpWorkItem.ui32OpSeqNum = !sCacheOpWorkItem.ui32OpSeqNum ?
+ OSAtomicIncrement(&ghCommonCacheOpSeqNum) : sCacheOpWorkItem.ui32OpSeqNum;
+
+ /* All UM cache maintenance is range-based */
+ sCacheOpWorkItem.ui64ExecuteTime = ui64ExecuteTimeUs;
+ sCacheOpWorkItem.ui64QueuedTime = ui64QueuedTimeUs;
+ sCacheOpWorkItem.bUMF = IMG_TRUE;
+ sCacheOpWorkItem.bRBF = IMG_TRUE;
+
+ CacheOpStatExecLogWrite(&sCacheOpWorkItem.sNode);
+#else /* defined(CACHEOP_DEBUG) */
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(ui64QueuedTimeUs);
+ PVR_UNREFERENCED_PARAMETER(ui64ExecuteTimeUs);
+ PVR_UNREFERENCED_PARAMETER(uiCacheOp);
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR CacheOpGetLineSize (IMG_UINT32 *pui32L1DataCacheLineSize)
+{
+ *pui32L1DataCacheLineSize = guiCacheLineSize;
+ PVR_ASSERT(guiCacheLineSize != 0);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR CacheOpInit (void)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DATA *psPVRSRVData;
+
+ /* DDK initialisation is anticipated to be performed on the boot
+ processor (little core in big/little systems) though this may
+ not always be the case. If so, the value cached here is the
+ system wide safe (i.e. smallest) L1 d-cache line size value
+ on platforms with mismatched d-cache line sizes */
+ guiCacheLineSize = OSCPUCacheAttributeSize(PVR_DCACHE_LINE_SIZE);
+ PVR_ASSERT(guiCacheLineSize != 0);
+
+ guiOSPageSize = OSGetPageSize();
+ PVR_ASSERT(guiOSPageSize != 0);
+
+ OSAtomicWrite(&ghCommonCacheOpSeqNum, 0);
+ OSAtomicWrite(&ghCompletedCacheOpSeqNum, 0);
+
+#if defined(SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED)
+ psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ /* Create an event object for pending CacheOp work items */
+ eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &psPVRSRVData->hCacheOpThreadEventObject);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Create an event object for updating pending fence checks on CacheOp */
+ eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &psPVRSRVData->hCacheOpUpdateEventObject);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Create a lock to police list of pending CacheOp work items */
+ eError = OSLockCreate((POS_LOCK*)&psPVRSRVData->hCacheOpThreadWorkListLock, LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Initialise pending CacheOp list & seq number */
+ dllist_init(&psPVRSRVData->sCacheOpThreadWorkList);
+ guiPendingDevmemSize = (IMG_DEVMEM_SIZE_T) 0;
+ gbPendingTimeline = IMG_FALSE;
+
+#if defined(CACHEOP_DEBUG)
+ gi32CacheOpStatExecWriteIdx = 0;
+ gi32CacheOpStatStallWriteIdx = 0;
+
+ OSCachedMemSet(gasCacheOpStatExecuted, 0, sizeof(gasCacheOpStatExecuted));
+ OSCachedMemSet(gasCacheOpStatStalled, 0, sizeof(gasCacheOpStatStalled));
+
+ eError = OSLockCreate((POS_LOCK*)&ghCacheOpStatExecLock, LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = OSLockCreate((POS_LOCK*)&ghCacheOpStatStallLock, LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ pvCacheOpStatExecEntry = OSCreateStatisticEntry("cache_ops_exec",
+ NULL,
+ CacheOpStatExecLogRead,
+ NULL,
+ NULL,
+ NULL);
+ PVR_ASSERT(pvCacheOpStatExecEntry != NULL);
+
+ pvCacheOpStatStallEntry = OSCreateStatisticEntry("cache_ops_stall",
+ NULL,
+ CacheOpStatStallLogRead,
+ NULL,
+ NULL,
+ NULL);
+ PVR_ASSERT(pvCacheOpStatStallEntry != NULL);
+#endif
+
+ /* Create a thread which is used to do the deferred CacheOp */
+ eError = OSThreadCreatePriority(&psPVRSRVData->hCacheOpThread,
+ "pvr_cache_ops",
+ CacheOpThread,
+ psPVRSRVData,
+ OS_THREAD_HIGHEST_PRIORITY);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"CacheOpInit: failed to create CacheOp thread"));
+ return CacheOpDeInit();
+ }
+#else /* defined(SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED) */
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
+#if defined(CACHEOP_DEBUG)
+ gi32CacheOpStatExecWriteIdx = 0;
+
+ OSCachedMemSet(gasCacheOpStatExecuted, 0, sizeof(gasCacheOpStatExecuted));
+
+ eError = OSLockCreate((POS_LOCK*)&ghCacheOpStatExecLock, LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ pvCacheOpStatExecEntry = OSCreateStatisticEntry("cache_ops_exec",
+ NULL,
+ CacheOpStatExecLogRead,
+ NULL,
+ NULL,
+ NULL);
+ PVR_ASSERT(pvCacheOpStatExecEntry != NULL);
+#endif
+ eError = PVRSRV_OK;
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR CacheOpDeInit (void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DATA *psPVRSRVData;
+
+#if defined(SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED)
+ psPVRSRVData = PVRSRVGetPVRSRVData();
+
+#if defined(CACHEOP_DEBUG)
+ OSLockDestroy(ghCacheOpStatExecLock);
+ OSRemoveStatisticEntry(pvCacheOpStatExecEntry);
+ OSRemoveStatisticEntry(pvCacheOpStatStallEntry);
+#endif
+
+ if (psPVRSRVData->hCacheOpThread)
+ {
+ if (psPVRSRVData->hCacheOpThreadEventObject)
+ {
+ eError = OSEventObjectSignal(psPVRSRVData->hCacheOpThreadEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+
+ if (psPVRSRVData->hCacheOpUpdateEventObject)
+ {
+ eError = OSEventObjectSignal(psPVRSRVData->hCacheOpUpdateEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+
+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+ {
+ eError = OSThreadDestroy(psPVRSRVData->hCacheOpThread);
+ if (PVRSRV_OK == eError)
+ {
+ psPVRSRVData->hCacheOpThread = NULL;
+ break;
+ }
+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+ }
+
+ OSLockDestroy(psPVRSRVData->hCacheOpThreadWorkListLock);
+ psPVRSRVData->hCacheOpThreadWorkListLock = NULL;
+
+ if (psPVRSRVData->hCacheOpUpdateEventObject)
+ {
+ eError = OSEventObjectDestroy(psPVRSRVData->hCacheOpUpdateEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+ psPVRSRVData->hCacheOpUpdateEventObject = NULL;
+ }
+
+ if (psPVRSRVData->hCacheOpThreadEventObject)
+ {
+ eError = OSEventObjectDestroy(psPVRSRVData->hCacheOpThreadEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+ psPVRSRVData->hCacheOpThreadEventObject = NULL;
+ }
+
+ eError = PVRSRV_OK;
+#else /* defined(SUPPORT_RANGEBASED_CACHEFLUSH_DEFERRED) */
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
+#if defined(CACHEOP_DEBUG)
+ OSLockDestroy(ghCacheOpStatExecLock);
+ OSRemoveStatisticEntry(pvCacheOpStatExecEntry);
+#endif
+ eError = PVRSRV_OK;
+#endif
+
+ return eError;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File cache.h
+@Title CPU cache management header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_KM_H_
+#define _CACHE_KM_H_
+
+#if defined(LINUX)
+#include <linux/version.h>
+#endif
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "pmr.h"
+
+typedef IMG_UINT32 PVRSRV_CACHE_OP_ADDR_TYPE; /*!< Type represents address required for cache op. */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL 0x1 /*!< Operation requires virtual address only */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL 0x2 /*!< Operation requires physical address only */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_BOTH 0x3 /*!< Operation requires both virtual & physical addresses */
+
+#define CACHEFLUSH_KM_RANGEBASED_DEFERRED 0x1 /*!< Services KM using deferred (i.e asynchronous) range-based flush */
+#define CACHEFLUSH_KM_RANGEBASED 0x2 /*!< Services KM using immediate (i.e synchronous) range-based flush */
+#define CACHEFLUSH_KM_GLOBAL 0x3 /*!< Services KM using global flush */
+#ifndef CACHEFLUSH_KM_TYPE /*!< Type represents cache maintenance operation method */
+ #if defined(__x86__)
+ /* Default for x86/x86_64 is global */
+ #define CACHEFLUSH_KM_TYPE CACHEFLUSH_KM_GLOBAL
+ #elif defined(__aarch64__)
+ #if defined(LINUX) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
+ /* Default here is range-based (i.e. no linux global flush) */
+ #define CACHEFLUSH_KM_TYPE CACHEFLUSH_KM_RANGEBASED
+ #else
+ /* Default here is global (i.e. OS supports global flush) */
+ #define CACHEFLUSH_KM_TYPE CACHEFLUSH_KM_GLOBAL
+ #endif
+ #else
+ /* Default for other architecture is range-based */
+ #define CACHEFLUSH_KM_TYPE CACHEFLUSH_KM_RANGEBASED
+ #endif
+#else
+ #if (CACHEFLUSH_KM_TYPE == CACHEFLUSH_KM_GLOBAL)
+ #if defined(__mips__)
+ /* Architecture does not support global cache maintenance */
+ #error "CACHEFLUSH_KM_GLOBAL is not supported on architecture"
+ #elif defined(__aarch64__)
+ #if defined(LINUX) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
+ /* Linux revisions does not support global cache maintenance */
+ #error "CACHEFLUSH_KM_GLOBAL is not supported on Linux v4.2 onwards"
+ #endif
+ #endif
+ #endif
+#endif
+
+/*
+ If we get multiple cache operations before the operation which will
+ trigger the operation to happen then we need to make sure we do
+ the right thing. Used for global cache maintenance
+*/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SetCacheOp)
+#endif
+static INLINE PVRSRV_CACHE_OP SetCacheOp(PVRSRV_CACHE_OP uiCurrent, PVRSRV_CACHE_OP uiNew)
+{
+ PVRSRV_CACHE_OP uiRet;
+ uiRet = uiCurrent | uiNew;
+ return uiRet;
+}
+
+/*
+ Cache maintenance framework API
+*/
+PVRSRV_ERROR CacheOpInit(void);
+PVRSRV_ERROR CacheOpDeInit(void);
+
+/* This interface is always guaranteed to be synchronous */
+PVRSRV_ERROR CacheOpExec (PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PVRSRV_CACHE_OP uiCacheOp);
+
+/* This interface _may_ defer cache-ops (i.e. asynchronous) */
+PVRSRV_ERROR CacheOpQueue (IMG_UINT32 ui32OpCount,
+ PMR **ppsPMR,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *puiCacheOp,
+ IMG_UINT32 *pui32OpSeqNum);
+
+/* This interface is used to log user-mode cache-ops */
+PVRSRV_ERROR CacheOpLog (PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT64 ui64QueuedTimeMs,
+ IMG_UINT64 ui64ExecuteTimeMs,
+ PVRSRV_CACHE_OP uiCacheOp);
+
+/* This interface must be used to fence for pending cache-ops before kicks */
+PVRSRV_ERROR CacheOpFence (RGXFWIF_DM eOpType, IMG_UINT32 ui32OpSeqNum);
+
+/* This interface is used for notification of completed cache-ops */
+PVRSRV_ERROR CacheOpSetTimeline (IMG_INT32 i32OpTimeline);
+
+/* This interface is used for retrieving the processor d-cache line size */
+PVRSRV_ERROR CacheOpGetLineSize (IMG_UINT32 *pui32L1DataCacheLineSize);
+#endif /* _CACHE_KM_H_ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services cache management header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines for cache management which are visible internally
+ and externally
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_OPS_H_
+#define _CACHE_OPS_H_
+#include "img_types.h"
+
+typedef IMG_UINT32 PVRSRV_CACHE_OP; /*!< Type represents cache maintenance operation */
+#define PVRSRV_CACHE_OP_NONE 0x0 /*!< No operation */
+#define PVRSRV_CACHE_OP_CLEAN 0x1 /*!< Flush w/o invalidate */
+#define PVRSRV_CACHE_OP_INVALIDATE 0x2 /*!< Invalidate w/o flush */
+#define PVRSRV_CACHE_OP_FLUSH 0x3 /*!< Flush w/ invalidate */
+
+#define CACHEFLUSH_UM_X86 0x1 /*!< Intel x86/x64 specific UM range-based cache flush */
+#define CACHEFLUSH_UM_ARM64 0x2 /*!< ARM Aarch64 specific UM range-based cache flush */
+#define CACHEFLUSH_UM_GENERIC 0x3 /*!< Generic UM/KM cache flush (i.e. CACHEFLUSH_KM_TYPE) */
+#define CACHEFLUSH_UM_X86_ONLY 0x4 /*!< Force x86/x64 UM flush exclusively */
+#define CACHEFLUSH_UM_ARM64_ONLY 0x5 /*!< Force ARM Aarch64 UM flush exclusively */
+#ifndef CACHEFLUSH_UM_TYPE
+ #if defined(__i386__) || defined(__x86_64__)
+ #define CACHEFLUSH_UM_TYPE CACHEFLUSH_UM_X86
+ #elif defined(__aarch64__)
+ #define CACHEFLUSH_UM_TYPE CACHEFLUSH_UM_ARM64
+ #else
+ #define CACHEFLUSH_UM_TYPE CACHEFLUSH_UM_GENERIC
+ #endif
+#endif
+
+#endif /* _CACHE_OPS_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server side connection management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Handles connections coming from the client and the management
+ connection based information
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "handle.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "osconnection_server.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "sync_server.h"
+#include "process_stats.h"
+#include "pdump_km.h"
+#include "lists.h"
+#include "osfunc.h"
+#include "tlstream.h"
+
+/* PID associated with Connection currently being purged by Cleanup thread */
+static IMG_PID gCurrentPurgeConnectionPid = 0;
+
+static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection)
+{
+ PVRSRV_ERROR eError;
+ PROCESS_HANDLE_BASE *psProcessHandleBase;
+ IMG_UINT64 ui64MaxBridgeTime;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ if(psPVRSRVData->bUnload)
+ {
+ /* driver is unloading so do not allow the bridge lock to be released */
+ ui64MaxBridgeTime = 0;
+ }
+ else
+ {
+ ui64MaxBridgeTime = CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS;
+ }
+
+ if (psConnection == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ConnectionDestroy: Missing connection!"));
+ PVR_ASSERT(0);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Close the process statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ if (psConnection->hProcessStats != NULL)
+ {
+ PVRSRVStatsDeregisterProcess(psConnection->hProcessStats);
+ psConnection->hProcessStats = NULL;
+ }
+#endif
+
+ /* Close HWPerfClient stream here even though we created it in
+ * PVRSRVConnectKM(). */
+ if (psConnection->hClientTLStream)
+ {
+ TLStreamClose(psConnection->hClientTLStream);
+ psConnection->hClientTLStream = NULL;
+ PVR_DPF((PVR_DBG_MESSAGE, "Destroyed private stream."));
+ }
+
+ /* Get process handle base to decrement the refcount */
+ psProcessHandleBase = psConnection->psProcessHandleBase;
+
+ if (psProcessHandleBase != NULL)
+ {
+ /* In case the refcount becomes 0 we can remove the process handle base */
+ if (OSAtomicDecrement(&psProcessHandleBase->iRefCount) == 0)
+ {
+ uintptr_t uiHashValue;
+
+ OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock);
+ uiHashValue = HASH_Remove(psPVRSRVData->psProcessHandleBase_Table, psConnection->pid);
+ OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+
+ if (!uiHashValue)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to remove handle base from hash table.",
+ __func__));
+ return PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE;
+ }
+
+ eError = PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, ui64MaxBridgeTime);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ConnectionDataDestroy: Couldn't free handle base for process (%d)",
+ eError));
+ }
+
+ return eError;
+ }
+
+ OSFreeMem(psProcessHandleBase);
+ }
+
+ psConnection->psProcessHandleBase = NULL;
+ }
+
+ /* Free handle base for this connection */
+ if (psConnection->psHandleBase != NULL)
+ {
+ eError = PVRSRVFreeHandleBase(psConnection->psHandleBase, ui64MaxBridgeTime);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ConnectionDataDestroy: Couldn't free handle base for connection (%d)",
+ eError));
+ }
+
+ return eError;
+ }
+
+ psConnection->psHandleBase = NULL;
+ }
+
+ if (psConnection->psSyncConnectionData != NULL)
+ {
+ SyncUnregisterConnection(psConnection->psSyncConnectionData);
+ psConnection->psSyncConnectionData = NULL;
+ }
+
+ if (psConnection->psPDumpConnectionData != NULL)
+ {
+ PDumpUnregisterConnection(psConnection->psPDumpConnectionData);
+ psConnection->psPDumpConnectionData = NULL;
+ }
+
+ /* Call environment specific connection data deinit function */
+ if (psConnection->hOsPrivateData != NULL)
+ {
+ eError = OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionDataDestroy: OSConnectionPrivateDataDeInit failed (%d)",
+ eError));
+
+ return eError;
+ }
+
+ psConnection->hOsPrivateData = NULL;
+ }
+
+ OSFreeMem(psConnection);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVConnectionConnect(void **ppvPrivData, void *pvOSData)
+{
+ CONNECTION_DATA *psConnection;
+ PVRSRV_ERROR eError;
+ PROCESS_HANDLE_BASE *psProcessHandleBase;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ /* Allocate connection data area */
+ psConnection = OSAllocZMem(sizeof(*psConnection));
+ if (psConnection == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: Couldn't allocate connection data"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Call environment specific connection data init function */
+ eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, pvOSData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: OSConnectionPrivateDataInit failed (%d)",
+ eError));
+ goto failure;
+ }
+
+ psConnection->pid = OSGetCurrentClientProcessIDKM();
+
+ /* Register this connection with the sync core */
+ eError = SyncRegisterConnection(&psConnection->psSyncConnectionData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: Couldn't register the sync data"));
+ goto failure;
+ }
+
+ /*
+ * Register this connection with the pdump core. Pass in the sync connection data
+ * as it will be needed later when we only get passed in the PDump connection data.
+ */
+ eError = PDumpRegisterConnection(psConnection->psSyncConnectionData,
+ &psConnection->psPDumpConnectionData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: Couldn't register the PDump data"));
+ goto failure;
+ }
+
+ /* Allocate handle base for this connection */
+ eError = PVRSRVAllocHandleBase(&psConnection->psHandleBase,
+ PVRSRV_HANDLE_BASE_TYPE_CONNECTION);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: Couldn't allocate handle base for connection (%d)",
+ eError));
+ goto failure;
+ }
+
+ /* Try to get process handle base if it already exists */
+ OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock);
+ psProcessHandleBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table,
+ psConnection->pid);
+
+ /* In case there is none we are going to allocate one */
+ if (psProcessHandleBase == NULL)
+ {
+ psProcessHandleBase = OSAllocZMem(sizeof(PROCESS_HANDLE_BASE));
+ if (psProcessHandleBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to allocate handle base, oom.",
+ __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto failureLock;
+ }
+
+ /* Allocate handle base for this process */
+ eError = PVRSRVAllocHandleBase(&psProcessHandleBase->psHandleBase,
+ PVRSRV_HANDLE_BASE_TYPE_PROCESS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Couldn't allocate handle base for process (%d)",
+ __func__,
+ eError));
+ OSFreeMem(psProcessHandleBase);
+ goto failureLock;
+ }
+
+ /* Insert the handle base into the global hash table */
+ if (!HASH_Insert(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table,
+ psConnection->pid,
+ (uintptr_t) psProcessHandleBase))
+ {
+
+ eError = PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE;
+
+ PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, 0);
+
+ OSFreeMem(psProcessHandleBase);
+ goto failureLock;
+ }
+ }
+ OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+
+ psConnection->psProcessHandleBase = psProcessHandleBase;
+
+ OSAtomicIncrement(&psProcessHandleBase->iRefCount);
+
+ /* Allocate process statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ eError = PVRSRVStatsRegisterProcess(&psConnection->hProcessStats);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: Couldn't register process statistics (%d)",
+ eError));
+ goto failure;
+ }
+#endif
+
+ *ppvPrivData = psConnection;
+
+ return eError;
+
+failureLock:
+ OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+failure:
+ ConnectionDataDestroy(psConnection);
+
+ return eError;
+}
+
+static PVRSRV_ERROR _CleanupThreadPurgeConnectionData(void *pvConnectionData)
+{
+ PVRSRV_ERROR eErrorConnection, eErrorKernel;
+ CONNECTION_DATA *psConnectionData = pvConnectionData;
+
+ OSAcquireBridgeLock();
+
+ gCurrentPurgeConnectionPid = psConnectionData->pid;
+
+ eErrorConnection = ConnectionDataDestroy(psConnectionData);
+ if (eErrorConnection != PVRSRV_OK)
+ {
+ if (eErrorConnection == PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "_CleanupThreadPurgeConnectionData: Failed to purge connection data %p "
+ "(deferring destruction)",
+ psConnectionData));
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "_CleanupThreadPurgeConnectionData: Connection data %p deferred destruction finished",
+ psConnectionData));
+ }
+
+ /* Check if possible resize the global handle base */
+ eErrorKernel = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
+ if (eErrorKernel != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "_CleanupThreadPurgeConnectionData: Purge of global handle pool failed (%d)",
+ eErrorKernel));
+ }
+
+ gCurrentPurgeConnectionPid = 0;
+
+ OSReleaseBridgeLock();
+
+ return eErrorConnection;
+}
+
+void PVRSRVConnectionDisconnect(void *pvDataPtr)
+{
+ CONNECTION_DATA *psConnectionData = pvDataPtr;
+
+ /* Notify the PDump core if the pdump control client is disconnecting */
+ if (psConnectionData->ui32ClientFlags & SRV_FLAGS_PDUMPCTRL)
+ {
+ PDumpDisconnectionNotify();
+ }
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+#endif
+ {
+ /* Defer the release of the connection data */
+ psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData;
+ psConnectionData->sCleanupThreadFn.pvData = psConnectionData;
+ psConnectionData->sCleanupThreadFn.ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT;
+ psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_FALSE;
+ PVRSRVCleanupThreadAddWork(&psConnectionData->sCleanupThreadFn);
+ }
+}
+
+IMG_PID PVRSRVGetPurgeConnectionPid(void)
+{
+ return gCurrentPurgeConnectionPid;
+}
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Server side connection management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description API for server side connection management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(_CONNECTION_SERVER_H_)
+#define _CONNECTION_SERVER_H_
+
+
+#include "img_types.h"
+#include "handle.h"
+#include "pvrsrv_cleanup.h"
+
+/* Variable used to hold in memory the timeout for the current time slice*/
+extern IMG_UINT64 gui64TimesliceLimit;
+/* Counter number of handle data freed during the current time slice */
+extern IMG_UINT32 gui32HandleDataFreeCounter;
+/* Set the maximum time the freeing of the resources can keep the lock */
+#define CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS 3000 * 1000 /* 3ms */
+
+typedef struct _CONNECTION_DATA_
+{
+ PVRSRV_HANDLE_BASE *psHandleBase;
+ PROCESS_HANDLE_BASE *psProcessHandleBase;
+ struct _SYNC_CONNECTION_DATA_ *psSyncConnectionData;
+ struct _PDUMP_CONNECTION_DATA_ *psPDumpConnectionData;
+
+ /* Holds the client flags supplied at connection time */
+ IMG_UINT32 ui32ClientFlags;
+
+ /*
+ * OS specific data can be stored via this handle.
+ * See osconnection_server.h for a generic mechanism
+ * for initialising this field.
+ */
+ IMG_HANDLE hOsPrivateData;
+
+ IMG_PID pid;
+
+ void *hSecureData;
+
+ IMG_HANDLE hProcessStats;
+
+ IMG_HANDLE hClientTLStream;
+
+ /* Structure which is hooked into the cleanup thread work list */
+ PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+
+ /* List navigation for deferred freeing of connection data */
+ struct _CONNECTION_DATA_ **ppsThis;
+ struct _CONNECTION_DATA_ *psNext;
+} CONNECTION_DATA;
+
+#include "osconnection_server.h"
+
+PVRSRV_ERROR PVRSRVConnectionConnect(void **ppvPrivData, void *pvOSData);
+void PVRSRVConnectionDisconnect(void *pvPrivData);
+
+IMG_PID PVRSRVGetPurgeConnectionPid(void);
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVConnectionPrivateData)
+#endif
+static INLINE
+IMG_HANDLE PVRSRVConnectionPrivateData(CONNECTION_DATA *psConnection)
+{
+ return (psConnection != NULL) ? psConnection->hOsPrivateData : NULL;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVGetDevData)
+#endif
+static INLINE
+PVRSRV_DEVICE_NODE * PVRSRVGetDevData(CONNECTION_DATA *psConnection)
+{
+ return OSGetDevData(psConnection);
+}
+
+#endif /* !defined(_CONNECTION_SERVER_H_) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Debug Driver
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description 32 Bit kernel mode debug driver
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(_WIN32)
+#pragma warning(disable:4201)
+#pragma warning(disable:4214)
+#pragma warning(disable:4115)
+#pragma warning(disable:4514)
+
+
+#include <ntddk.h>
+#include <windef.h>
+#include <winerror.h>
+#endif /* _WIN32 */
+
+#ifdef LINUX
+#include <linux/string.h>
+#endif
+
+#if defined (__QNXNTO__) || defined (INTEGRITY_OS)
+#include <string.h>
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "hostfunc.h"
+
+#ifdef _WIN32
+#pragma warning(default:4214)
+#pragma warning(default:4115)
+#endif /* _WIN32 */
+
+
+/******************************************************************************
+ Types
+******************************************************************************/
+
+/*
+ Per-buffer control structure.
+*/
+typedef struct _DBG_STREAM_
+{
+ struct _DBG_STREAM_* psNext;
+ struct _DBG_STREAM_* psInitStream;
+ struct _DBG_STREAM_* psDeinitStream;
+ IMG_UINT32 ui32Flags; /*!< flags (see DEBUG_FLAGS) */
+ void *pvBase;
+ IMG_UINT32 ui32Size;
+ IMG_UINT32 ui32RPtr;
+ IMG_UINT32 ui32WPtr;
+
+ IMG_UINT32 ui32Marker; /*!< Size marker for file splitting */
+
+ IMG_UINT32 ui32InitPhaseWOff; /*!< snapshot offset for init phase end for follow-on pdump */
+
+ IMG_CHAR szName[DEBUG_STREAM_NAME_MAX]; /* Give this a size, some compilers don't like [] */
+} DBG_STREAM;
+
+/* Check 4xDBG_STREAM will fit in one page */
+static_assert((sizeof(DBG_STREAM) * 4) < HOST_PAGESIZE, "DBG_STREAM is too large");
+
+/******************************************************************************
+ Global variables
+******************************************************************************/
+
+static PDBG_STREAM g_psStreamList = 0;
+
+/* Mutex used to prevent UM threads (via the dbgdrv ioctl interface) and KM
+ * threads (from pvrsrvkm via the ExtDBG API) entering the debug driver core
+ * and changing the state of share data at the same time.
+ */
+void * g_pvAPIMutex=NULL;
+
+static IMG_UINT32 g_PDumpCurrentFrameNo = 0;
+
+DBGKM_SERVICE_TABLE g_sDBGKMServices =
+{
+ sizeof (DBGKM_SERVICE_TABLE),
+ ExtDBGDrivCreateStream,
+ ExtDBGDrivDestroyStream,
+ ExtDBGDrivWrite2,
+ ExtDBGDrivSetMarker,
+ ExtDBGDrivWaitForEvent,
+ ExtDBGDrivGetCtrlState,
+ ExtDBGDrivSetFrame
+};
+
+
+/***************************************************************************
+ Forward declarations
+***************************************************************************/
+
+IMG_BOOL IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Pages, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+void IMG_CALLCONV DBGDrivDestroyStream(IMG_HANDLE hInit,IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+void * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
+void IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
+void IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(void);
+void IMG_CALLCONV DBGDrivSetFrame(IMG_UINT32 ui32Frame);
+void DestroyAllStreams(void);
+
+/* Static function declarations */
+static IMG_UINT32 SpaceInStream(PDBG_STREAM psStream);
+static IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize);
+static void InvalidateAllStreams(void);
+
+
+/*****************************************************************************
+ Code
+*****************************************************************************/
+
+/*!
+ @name ExtDBGDrivCreateStream
+ */
+IMG_BOOL IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit)
+{
+ IMG_BOOL pvRet;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ pvRet=DBGDrivCreateStream(pszName, ui32Flags, ui32Size, phInit, phMain, phDeinit);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return pvRet;
+}
+
+/*!
+ @name ExtDBGDrivDestroyStream
+ */
+void IMG_CALLCONV ExtDBGDrivDestroyStream(IMG_HANDLE hInit,IMG_HANDLE hMain, IMG_HANDLE hDeinit)
+{
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivDestroyStream(hInit, hMain, hDeinit);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+/*!
+ @name ExtDBGDrivFindStream
+ */
+void * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+ void * pvRet;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ pvRet=DBGDrivFindStream(pszName, bResetStream);
+ if (pvRet == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ExtDBGDrivFindStream: Stream not found"));
+ }
+
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return pvRet;
+}
+
+/*!
+ @name ExtDBGDrivRead
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+ IMG_UINT32 ui32Ret;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivRead(psStream, ui32BufID, ui32OutBuffSize, pui8OutBuf);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+/*!
+ @name ExtDBGDrivWrite2
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+ IMG_UINT32 ui32Ret;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+/*!
+ @name ExtDBGDrivSetMarker
+ */
+void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetMarker(psStream, ui32Marker);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+/*!
+ @name ExtDBGDrivGetMarker
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Marker;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Marker = DBGDrivGetMarker(psStream);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Marker;
+}
+
+/*!
+ @name ExtDBGDrivWaitForEvent
+ */
+void IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ DBGDrivWaitForEvent(eEvent);
+#else /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+ PVR_UNREFERENCED_PARAMETER(eEvent); /* PRQA S 3358 */
+#endif /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+}
+
+
+/*!
+ @name ExtDBGDrivGetCtrlState
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID)
+{
+ IMG_UINT32 ui32State = 0;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32State = DBGDrivGetCtrlState(psStream, ui32StateID);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32State;
+}
+
+/*!
+ @name ExtDBGDrivGetFrame
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(void)
+{
+ IMG_UINT32 ui32Frame = 0;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Frame = DBGDrivGetFrame();
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Frame;
+}
+
+/*!
+ @name ExtDBGDrivGetCtrlState
+ */
+void IMG_CALLCONV ExtDBGDrivSetFrame(IMG_UINT32 ui32Frame)
+{
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetFrame(ui32Frame);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+
+
+/*!****************************************************************************
+ @name AtoI
+ @brief Returns the integer value of a decimal string
+ @param szIn - String with hexadecimal value
+ @return IMG_UINT32 integer value, 0 if string is null or not valid
+ Based on Max`s one, now copes with (only) hex ui32ords, upper or lower case a-f.
+*****************************************************************************/
+IMG_UINT32 AtoI(IMG_CHAR *szIn)
+{
+ IMG_INT iLen = 0;
+ IMG_UINT32 ui32Value = 0;
+ IMG_UINT32 ui32Digit=1;
+ IMG_UINT32 ui32Base=10;
+ IMG_INT iPos;
+ IMG_CHAR bc;
+
+ //get len of string
+ while (szIn[iLen] > 0)
+ {
+ iLen ++;
+ }
+
+ //nothing to do
+ if (iLen == 0)
+ {
+ return (0);
+ }
+
+ /* See if we have an 'x' or 'X' before the number to make it a hex number */
+ iPos=0;
+ while (szIn[iPos] == '0')
+ {
+ iPos++;
+ }
+ if (szIn[iPos] == '\0')
+ {
+ return 0;
+ }
+ if (szIn[iPos] == 'x' || szIn[iPos] == 'X')
+ {
+ ui32Base=16;
+ szIn[iPos]='0';
+ }
+
+ //go through string from right (least significant) to left
+ for (iPos = iLen - 1; iPos >= 0; iPos --)
+ {
+ bc = szIn[iPos];
+
+ if ( (bc >= 'a') && (bc <= 'f') && ui32Base == 16) //handle lower case a-f
+ {
+ bc -= 'a' - 0xa;
+ }
+ else
+ if ( (bc >= 'A') && (bc <= 'F') && ui32Base == 16) //handle upper case A-F
+ {
+ bc -= 'A' - 0xa;
+ }
+ else
+ if ((bc >= '0') && (bc <= '9')) //if char out of range, return 0
+ {
+ bc -= '0';
+ }
+ else
+ return (0);
+
+ ui32Value += (IMG_UINT32)bc * ui32Digit;
+
+ ui32Digit = ui32Digit * ui32Base;
+ }
+ return (ui32Value);
+}
+
+
+/*!****************************************************************************
+ @name StreamValid
+ @brief Validates supplied debug buffer.
+ @param psStream - debug stream
+ @return true if valid
+*****************************************************************************/
+static IMG_BOOL StreamValid(PDBG_STREAM psStream)
+{
+ PDBG_STREAM psThis;
+
+ psThis = g_psStreamList;
+
+ while (psThis)
+ {
+ if (psStream && ((psThis == psStream) ||
+ (psThis->psInitStream == psStream) ||
+ (psThis->psDeinitStream == psStream)) )
+ {
+ return(IMG_TRUE);
+ }
+ else
+ {
+ psThis = psThis->psNext;
+ }
+ }
+
+ return(IMG_FALSE);
+}
+
+
+/*!****************************************************************************
+ @name StreamValidForRead
+ @brief Validates supplied debug buffer for read op.
+ @param psStream - debug stream
+ @return true if readable
+*****************************************************************************/
+static IMG_BOOL StreamValidForRead(PDBG_STREAM psStream)
+{
+ if( StreamValid(psStream) &&
+ ((psStream->ui32Flags & DEBUG_FLAGS_WRITEONLY) == 0) )
+ {
+ return(IMG_TRUE);
+ }
+
+ return(IMG_FALSE);
+}
+
+/*!****************************************************************************
+ @name StreamValidForWrite
+ @brief Validates supplied debug buffer for write op.
+ @param psStream - debug stream
+ @return true if writable
+*****************************************************************************/
+static IMG_BOOL StreamValidForWrite(PDBG_STREAM psStream)
+{
+ if( StreamValid(psStream) &&
+ ((psStream->ui32Flags & DEBUG_FLAGS_READONLY) == 0) )
+ {
+ return(IMG_TRUE);
+ }
+
+ return(IMG_FALSE);
+}
+
+/*!****************************************************************************
+ @name Write
+ @brief Copies data from a buffer into selected stream. Stream size is fixed.
+ @param psStream - stream for output
+ @param pui8Data - input buffer
+ @param ui32InBuffSize - size of input
+ @return none
+*****************************************************************************/
+static void Write(PDBG_STREAM psStream,IMG_PUINT8 pui8Data,IMG_UINT32 ui32InBuffSize)
+{
+ /*
+ Split copy into two bits as necessary (if we're allowed to wrap).
+ */
+ if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) == 0)
+ {
+ PVR_ASSERT( (psStream->ui32WPtr + ui32InBuffSize) < psStream->ui32Size );
+ }
+
+ if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size)
+ {
+ /* Yes we need two bits, calculate their sizes */
+ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr;
+ IMG_UINT32 ui32B2 = ui32InBuffSize - ui32B1;
+
+ /* Copy first block to current location */
+ HostMemCopy((void *)((uintptr_t)psStream->pvBase + psStream->ui32WPtr),
+ (void *) pui8Data,
+ ui32B1);
+
+ /* Copy second block to start of buffer */
+ HostMemCopy(psStream->pvBase,
+ (void *)(pui8Data + ui32B1),
+ ui32B2);
+
+ /* Set pointer to be the new end point */
+ psStream->ui32WPtr = ui32B2;
+ }
+ else
+ { /* Can fit block in single chunk */
+ HostMemCopy((void *)((uintptr_t)psStream->pvBase + psStream->ui32WPtr),
+ (void *) pui8Data,
+ ui32InBuffSize);
+
+ psStream->ui32WPtr += ui32InBuffSize;
+
+ if (psStream->ui32WPtr == psStream->ui32Size)
+ {
+ psStream->ui32WPtr = 0;
+ }
+ }
+}
+
+
+/*!****************************************************************************
+ @name WriteExpandingBuffer
+ @brief Copies data from a buffer into selected stream. Stream size may be expandable.
+ @param psStream - stream for output
+ @param pui8InBuf - input buffer
+ @param ui32InBuffSize - size of input
+ @return bytes copied
+*****************************************************************************/
+static IMG_UINT32 WriteExpandingBuffer(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+ IMG_UINT ui32Space;
+
+ /*
+ How much space have we got in the buffer ?
+ */
+ ui32Space = SpaceInStream(psStream);
+
+ /*
+ Check if we can expand the buffer
+ */
+ if (psStream->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION)
+ {
+ /*
+ Don't do anything if we've got less that 32 ui8tes of space and
+ we're not allowing expansion of buffer space...
+ */
+ if (ui32Space < 32)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %p is full and isn't expandable", psStream));
+ return(0);
+ }
+ }
+ else
+ {
+ if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4)))
+ {
+ IMG_UINT32 ui32NewBufSize;
+
+ /*
+ Find new buffer size, double the current size or increase by 1MB
+ */
+ ui32NewBufSize = MIN(psStream->ui32Size<<1,psStream->ui32Size+(1<<20));
+ ui32NewBufSize = MIN(ui32NewBufSize, PDUMP_STREAMBUF_MAX_SIZE_MB<<20);
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanding buffer size = %x, new size = %x",
+ psStream->ui32Size, ui32NewBufSize));
+
+ if (ui32InBuffSize > psStream->ui32Size)
+ {
+ ui32NewBufSize += ui32InBuffSize;
+ PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %p is expanding by size of input buffer %u", psStream, ui32NewBufSize));
+ }
+
+ /*
+ Attempt to expand the buffer
+ */
+ if ((ui32NewBufSize < psStream->ui32Size) ||
+ !ExpandStreamBuffer(psStream,ui32NewBufSize))
+ {
+ if (ui32Space < 32)
+ {
+ if((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+ {
+ return(0);
+ }
+ else
+ {
+ /* out of memory */
+ PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: Unable to expand %p. Out of memory.", psStream));
+ InvalidateAllStreams();
+ return (0xFFFFFFFFUL);
+ }
+ }
+ }
+
+ /*
+ Recalc the space in the buffer
+ */
+ ui32Space = SpaceInStream(psStream);
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanded buffer, free space = %x",
+ ui32Space));
+ }
+ }
+
+ /*
+ Only copy what we can..
+ */
+ if (ui32Space <= (ui32InBuffSize + 4))
+ {
+ ui32InBuffSize = ui32Space - 4;
+ }
+
+ /*
+ Write the stuff...
+ */
+ Write(psStream,pui8InBuf,ui32InBuffSize);
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ if (ui32InBuffSize)
+ {
+ HostSignalEvent(DBG_EVENT_STREAM_DATA);
+ }
+#endif
+ return(ui32InBuffSize);
+}
+
+/*****************************************************************************
+******************************************************************************
+******************************************************************************
+ THE ACTUAL FUNCTIONS
+******************************************************************************
+******************************************************************************
+*****************************************************************************/
+
+static void DBGDrivSetStreamName(PDBG_STREAM psStream,
+ IMG_CHAR* pszBase,
+ IMG_CHAR* pszExt)
+{
+ IMG_CHAR* pCh = psStream->szName;
+ IMG_CHAR* pChEnd = psStream->szName+DEBUG_STREAM_NAME_MAX-8;
+ IMG_CHAR* pSrcCh;
+ IMG_CHAR* pSrcChEnd;
+
+ for (pSrcCh = pszBase, pSrcChEnd = pszBase+strlen(pszBase);
+ (pSrcCh < pSrcChEnd) && (pCh < pChEnd) ;
+ pSrcCh++, pCh++)
+ {
+ *pCh = *pSrcCh;
+ }
+
+ for (pSrcCh = pszExt, pSrcChEnd = pszExt+strlen(pszExt);
+ (pSrcCh < pSrcChEnd) && (pCh < pChEnd) ;
+ pSrcCh++, pCh++)
+ {
+ *pCh = *pSrcCh;
+ }
+
+ *pCh = '\0';
+}
+
+/*!****************************************************************************
+ @name DBGDrivCreateStream
+ @brief Creates a pdump/debug stream
+ @param pszName - stream name
+ @param ui32Flags - output flags, text stream bit is set for pdumping
+ @param ui32Size - size of stream buffer in pages
+ @return none
+*****************************************************************************/
+IMG_BOOL IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName,
+ IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32Size,
+ IMG_HANDLE* phInit,
+ IMG_HANDLE* phMain,
+ IMG_HANDLE* phDeinit)
+{
+ IMG_BOOL bUseNonPagedMem4Buffers = ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0);
+ PDBG_STREAM psStream = NULL;
+ PDBG_STREAM psInitStream = NULL;
+ PDBG_STREAM psStreamDeinit = NULL;
+ void* pvBase = NULL;
+
+ /*
+ If we already have a buffer using this name just return
+ its handle.
+ */
+ psStream = (PDBG_STREAM) DBGDrivFindStream(pszName, IMG_FALSE);
+ if (psStream)
+ {
+ *phInit = psStream->psInitStream;
+ *phMain = psStream;
+ *phDeinit = psStream->psDeinitStream;
+ return IMG_TRUE;
+ }
+
+ /*
+ Allocate memory for control structures
+ */
+ psStream = HostNonPageablePageAlloc(1);
+ if (!psStream)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc control structs\n\r"));
+ goto errCleanup;
+ }
+ psInitStream = psStream+1;
+ psStreamDeinit = psStream+2;
+
+
+ /* Allocate memory for Main buffer */
+ psStream->pvBase = NULL;
+ if (bUseNonPagedMem4Buffers)
+ {
+ pvBase = HostNonPageablePageAlloc(ui32Size);
+ }
+ else
+ {
+ pvBase = HostPageablePageAlloc(ui32Size);
+ }
+
+ if (!pvBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Stream buffer\n\r"));
+ goto errCleanup;
+ }
+
+ /*
+ Setup debug buffer state.
+ */
+ psStream->psNext = 0;
+ psStream->pvBase = pvBase;
+ psStream->ui32Flags = ui32Flags | DEBUG_FLAGS_CIRCULAR;
+ psStream->ui32Size = ui32Size * HOST_PAGESIZE;
+ psStream->ui32RPtr = 0;
+ psStream->ui32WPtr = 0;
+ psStream->ui32Marker = 0;
+ psStream->ui32InitPhaseWOff = 0;
+ DBGDrivSetStreamName(psStream, pszName, "");
+ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with deinit name (%s)\n\r", psStream->szName));
+
+ /* Allocate memory for Init buffer */
+ psInitStream->pvBase = NULL;
+ if (bUseNonPagedMem4Buffers)
+ {
+ pvBase = HostNonPageablePageAlloc(ui32Size);
+ }
+ else
+ {
+ pvBase = HostPageablePageAlloc(ui32Size);
+ }
+
+ if (!pvBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc InitStream buffer\n\r"));
+ goto errCleanup;
+ }
+
+ /* Initialise the stream for the Init phase */
+ psInitStream->psNext = psInitStream->psInitStream = psInitStream->psDeinitStream = NULL;
+ psInitStream->ui32Flags = ui32Flags;
+ psInitStream->pvBase = pvBase;
+ psInitStream->ui32Size = ui32Size * HOST_PAGESIZE;
+ psInitStream->ui32RPtr = 0;
+ psInitStream->ui32WPtr = 0;
+ psInitStream->ui32Marker = 0;
+ psInitStream->ui32InitPhaseWOff = 0;
+ DBGDrivSetStreamName(psInitStream, pszName, "_Init");
+ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with init name (%s)\n\r", psInitStream->szName));
+ psStream->psInitStream = psInitStream;
+
+ /* Allocate memory for Deinit buffer */
+ psStreamDeinit->pvBase = NULL;
+ if (bUseNonPagedMem4Buffers)
+ {
+ pvBase = HostNonPageablePageAlloc(1);
+ }
+ else
+ {
+ pvBase = HostPageablePageAlloc(1);
+ }
+
+ if (!pvBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc DeinitStream buffer\n\r"));
+ goto errCleanup;
+ }
+
+ /* Initialise the stream for the Deinit phase */
+ psStreamDeinit->psNext = psStreamDeinit->psInitStream = psStreamDeinit->psDeinitStream = NULL;
+ psStreamDeinit->pvBase = pvBase;
+ psStreamDeinit->ui32Flags = ui32Flags;
+ psStreamDeinit->ui32Size = HOST_PAGESIZE;
+ psStreamDeinit->ui32RPtr = 0;
+ psStreamDeinit->ui32WPtr = 0;
+ psStreamDeinit->ui32Marker = 0;
+ psStreamDeinit->ui32InitPhaseWOff = 0;
+ DBGDrivSetStreamName(psStreamDeinit, pszName, "_Deinit");
+ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with deinit name (%s)\n\r", psStreamDeinit->szName));
+
+ psStream->psDeinitStream = psStreamDeinit;
+
+ /*
+ Insert into list.
+ */
+ psStream->psNext = g_psStreamList;
+ g_psStreamList = psStream;
+
+ AddSIDEntry(psStream);
+
+ *phInit = psStream->psInitStream;
+ *phMain = psStream;
+ *phDeinit = psStream->psDeinitStream;
+
+ return IMG_TRUE;
+
+errCleanup:
+ if (bUseNonPagedMem4Buffers)
+ {
+ if (psStream) HostNonPageablePageFree(psStream->pvBase);
+ if (psInitStream) HostNonPageablePageFree(psInitStream->pvBase);
+ if (psStreamDeinit) HostNonPageablePageFree(psStreamDeinit->pvBase);
+ }
+ else
+ {
+ if (psStream) HostPageablePageFree(psStream->pvBase);
+ if (psInitStream) HostPageablePageFree(psInitStream->pvBase);
+ if (psStreamDeinit) HostPageablePageFree(psStreamDeinit->pvBase);
+ }
+ HostNonPageablePageFree(psStream);
+ psStream = psInitStream = psStreamDeinit = NULL;
+ return IMG_FALSE;
+}
+
+/*!****************************************************************************
+ @name DBGDrivDestroyStream
+ @brief Delete a stream and free its memory
+ @param psStream - stream to be removed
+ @return none
+*****************************************************************************/
+void IMG_CALLCONV DBGDrivDestroyStream(IMG_HANDLE hInit,IMG_HANDLE hMain, IMG_HANDLE hDeinit)
+{
+ PDBG_STREAM psStreamInit = (PDBG_STREAM) hInit;
+ PDBG_STREAM psStream = (PDBG_STREAM) hMain;
+ PDBG_STREAM psStreamDeinit = (PDBG_STREAM) hDeinit;
+ PDBG_STREAM psStreamThis;
+ PDBG_STREAM psStreamPrev;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n", psStream->szName ));
+
+ /*
+ Validate buffer.
+ */
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ RemoveSIDEntry(psStream);
+
+ /*
+ Remove from linked list.
+ */
+ psStreamThis = g_psStreamList;
+ psStreamPrev = 0;
+
+ while (psStreamThis)
+ {
+ if (psStreamThis == psStream)
+ {
+ if (psStreamPrev)
+ {
+ psStreamPrev->psNext = psStreamThis->psNext;
+ }
+ else
+ {
+ g_psStreamList = psStreamThis->psNext;
+ }
+
+ psStreamThis = 0;
+ }
+ else
+ {
+ psStreamPrev = psStreamThis;
+ psStreamThis = psStreamThis->psNext;
+ }
+ }
+
+ /*
+ And free its memory.
+ */
+ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ HostNonPageablePageFree(psStream->pvBase);
+ HostNonPageablePageFree(psStreamInit->pvBase);
+ HostNonPageablePageFree(psStreamDeinit->pvBase);
+ }
+ else
+ {
+ HostPageablePageFree(psStream->pvBase);
+ HostPageablePageFree(psStreamInit->pvBase);
+ HostPageablePageFree(psStreamDeinit->pvBase);
+ }
+
+ /* Free the shared page used for the three stream tuple */
+ HostNonPageablePageFree(psStream);
+ psStream = psStreamInit = psStreamDeinit = NULL;
+
+ if (g_psStreamList == 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Stream list now empty" ));
+ }
+
+ return;
+}
+
+/*!****************************************************************************
+ @name DBGDrivFindStream
+ @brief Finds/resets a named stream
+ @param pszName - stream name
+ @param bResetStream - whether to reset the stream, e.g. to end pdump init phase
+ @return none
+*****************************************************************************/
+void * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+ PDBG_STREAM psStream;
+ PDBG_STREAM psThis;
+ IMG_UINT32 ui32Off;
+ IMG_BOOL bAreSame;
+
+ psStream = 0;
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "PDump client connecting to %s %s",
+ pszName,
+ (bResetStream == IMG_TRUE) ? "with reset" : "no reset"));
+
+ /*
+ Scan buffer names for supplied one.
+ */
+ for (psThis = g_psStreamList; psThis != NULL; psThis = psThis->psNext)
+ {
+ bAreSame = IMG_TRUE;
+ ui32Off = 0;
+
+ if (strlen(psThis->szName) == strlen(pszName))
+ {
+ while ((ui32Off < DEBUG_STREAM_NAME_MAX) && (psThis->szName[ui32Off] != 0) && (pszName[ui32Off] != 0) && bAreSame)
+ {
+ if (psThis->szName[ui32Off] != pszName[ui32Off])
+ {
+ bAreSame = IMG_FALSE;
+ }
+
+ ui32Off++;
+ }
+ }
+ else
+ {
+ bAreSame = IMG_FALSE;
+ }
+
+ if (bAreSame)
+ {
+ psStream = psThis;
+ break;
+ }
+ }
+
+ if(psStream)
+ {
+ psStream->psInitStream->ui32RPtr = 0;
+ psStream->psDeinitStream->ui32RPtr = 0;
+ psStream->ui32RPtr = 0;
+ if (bResetStream)
+ {
+ /* This will erase any data written to the main stream
+ * before the client starts. */
+ psStream->ui32WPtr = 0;
+ }
+ psStream->ui32Marker = psStream->psInitStream->ui32Marker = 0;
+
+
+ /* mark init stream to prevent further reading by pdump client */
+ /* Check for possible race condition */
+ psStream->psInitStream->ui32InitPhaseWOff = psStream->psInitStream->ui32WPtr;
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Set %s client marker bo %x",
+ psStream->szName,
+ psStream->psInitStream->ui32InitPhaseWOff));
+ }
+
+ return((void *) psStream);
+}
+
+static void IMG_CALLCONV DBGDrivInvalidateStream(PDBG_STREAM psStream)
+{
+ IMG_CHAR pszErrorMsg[] = "**OUTOFMEM\n";
+ IMG_UINT32 ui32Space;
+ IMG_UINT32 ui32Off = 0;
+ IMG_UINT32 ui32WPtr = psStream->ui32WPtr;
+ IMG_PUINT8 pui8Buffer = (IMG_UINT8 *) psStream->pvBase;
+
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: An error occurred for stream %s", psStream->szName ));
+
+ /*
+ Validate buffer.
+ */
+ /*
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+*/
+ /* Write what we can of the error message */
+ ui32Space = SpaceInStream(psStream);
+
+ /* Make sure there's space for termination character */
+ if(ui32Space > 0)
+ {
+ ui32Space--;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: Buffer full."));
+ }
+
+ while((pszErrorMsg[ui32Off] != 0) && (ui32Off < ui32Space))
+ {
+ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszErrorMsg[ui32Off];
+ ui32Off++;
+ ui32WPtr++;
+ }
+ pui8Buffer[ui32WPtr++] = '\0';
+ psStream->ui32WPtr = ui32WPtr;
+
+ /* Buffer will accept no more params from Services/client driver */
+ psStream->ui32Flags |= DEBUG_FLAGS_READONLY;
+}
+
+/*!****************************************************************************
+ @name InvalidateAllStreams
+ @brief invalidate all streams in list
+ @return none
+*****************************************************************************/
+static void InvalidateAllStreams(void)
+{
+ PDBG_STREAM psStream = g_psStreamList;
+ while (psStream != NULL)
+ {
+ DBGDrivInvalidateStream(psStream);
+ DBGDrivInvalidateStream(psStream->psInitStream);
+ DBGDrivInvalidateStream(psStream->psDeinitStream);
+ psStream = psStream->psNext;
+ }
+ return;
+}
+
+/*!****************************************************************************
+ @name DBGDrivWrite2
+ @brief Copies data from a buffer into selected (expandable) stream.
+ @param psStream - stream for output
+ @param pui8InBuf - input buffer
+ @param ui32InBuffSize - size of input
+ @return bytes copied, 0 if recoverable error, -1 if unrecoverable error
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+
+ /*
+ Validate buffer.
+ */
+ if (!StreamValidForWrite(psStream))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite2: stream not valid"));
+ return(0xFFFFFFFFUL);
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Recv(exp) %d b for %s: Roff = %x, WOff = %x",
+ ui32InBuffSize,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr));
+
+ return( WriteExpandingBuffer(psStream, pui8InBuf, ui32InBuffSize) );
+}
+
+/*!****************************************************************************
+ @name DBGDrivRead
+ @brief Read from debug driver buffers
+ @param psMainStream - stream
+ @param ui32BufID - on of the DEBUG_READ_BUFID flags to indicate which buffer
+ @param ui32OutBuffSize - available space in client buffer
+ @param pui8OutBuf - output buffer
+ @return bytes read, 0 if failure occurred
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psMainStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+ IMG_UINT32 ui32Data;
+ DBG_STREAM *psStream;
+
+ /*
+ Validate buffer.
+ */
+ if (!StreamValidForRead(psMainStream))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivRead: buffer %p is invalid", psMainStream));
+ return(0);
+ }
+
+ if(ui32BufID == DEBUG_READ_BUFID_INIT)
+ {
+ psStream = psMainStream->psInitStream;
+ }
+ else if (ui32BufID == DEBUG_READ_BUFID_DEINIT)
+ {
+ psStream = psMainStream->psDeinitStream;
+ }
+ else
+ {
+ psStream = psMainStream;
+ }
+
+ /* Don't read beyond the init phase marker point */
+ if (psStream->ui32RPtr == psStream->ui32WPtr ||
+ ((psStream->ui32InitPhaseWOff > 0) &&
+ (psStream->ui32RPtr >= psStream->ui32InitPhaseWOff)) )
+ {
+ return(0);
+ }
+
+ /*
+ Get amount of data in buffer.
+ */
+ if (psStream->ui32RPtr <= psStream->ui32WPtr)
+ {
+ ui32Data = psStream->ui32WPtr - psStream->ui32RPtr;
+ }
+ else
+ {
+ ui32Data = psStream->ui32WPtr + (psStream->ui32Size - psStream->ui32RPtr);
+ }
+
+ /*
+ Don't read beyond the init phase marker point
+ */
+ if ((psStream->ui32InitPhaseWOff > 0) &&
+ (psStream->ui32InitPhaseWOff < psStream->ui32WPtr))
+ {
+ ui32Data = psStream->ui32InitPhaseWOff - psStream->ui32RPtr;
+ }
+
+ /*
+ Only transfer what target buffer can handle.
+ */
+ if (ui32Data > ui32OutBuffSize)
+ {
+ ui32Data = ui32OutBuffSize;
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Send %x b from %s: Roff = %x, WOff = %x",
+ ui32Data,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr));
+
+ /*
+ Split copy into two bits or one depending on W/R position.
+ */
+ if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size)
+ { /* Calc block 1 and block 2 sizes */
+ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr;
+ IMG_UINT32 ui32B2 = ui32Data - ui32B1;
+
+ /* Copy up to end of circular buffer */
+ HostMemCopy((void *) pui8OutBuf,
+ (void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+ ui32B1);
+
+ /* Copy from start of circular buffer */
+ HostMemCopy((void *)(pui8OutBuf + ui32B1),
+ psStream->pvBase,
+ ui32B2);
+
+ /* Update read pointer now that we've copied the data out */
+ psStream->ui32RPtr = ui32B2;
+ }
+ else
+ { /* Copy data from wherever */
+ HostMemCopy((void *) pui8OutBuf,
+ (void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+ ui32Data);
+
+ /* Update read pointer now that we've copied the data out */
+ psStream->ui32RPtr += ui32Data;
+
+ /* Check for wrapping */
+ if ((psStream->ui32RPtr != psStream->ui32WPtr) &&
+ (psStream->ui32RPtr >= psStream->ui32Size))
+ {
+ psStream->ui32RPtr = 0;
+ }
+ }
+
+ return(ui32Data);
+}
+
+/*!****************************************************************************
+ @name DBGDrivSetMarker
+ @brief Sets the marker in the stream to split output files
+ @param psStream, ui32Marker
+ @return nothing
+*****************************************************************************/
+void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+ /*
+ Validate buffer
+ */
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ /* Called by PDump client to reset the marker to zero after a file split */
+ if ((ui32Marker == 0) && (psStream->ui32Marker == 0))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivSetMarker: Client resetting marker that is already zero!"));
+ }
+ /* Called by pvrsrvkm to set the marker to signal a file split is required */
+ if ((ui32Marker != 0) && (psStream->ui32Marker != 0))
+ {
+ /* In this case a previous split request is still outstanding. The
+ * client has not yet actioned and acknowledged the previous
+ * marker. This may be an error if the client does not catch-up and
+ * the stream's written data is allowed to pass the max file
+ * size again. If this happens the PDump is invalid as the offsets
+ * from the script file will be incorrect.
+ */
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivSetMarker: Server setting marker that is already set!"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "DBGDrivSetMarker: Setting stream split marker to %d (was %d)", ui32Marker, psStream->ui32Marker));
+ }
+
+ psStream->ui32Marker = ui32Marker;
+}
+
+/*!****************************************************************************
+ @name DBGDrivGetMarker
+ @brief Gets the marker in the stream to split output files
+ @param psStream - stream
+ @return marker offset
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream)
+{
+ /*
+ Validate buffer
+ */
+ if (!StreamValid(psStream))
+ {
+ return 0;
+ }
+
+ return psStream->ui32Marker;
+}
+
+/*!****************************************************************************
+ @name DBGDrivGetServiceTable
+ @brief get jump table for Services driver
+ @return pointer to jump table
+*****************************************************************************/
+void * IMG_CALLCONV DBGDrivGetServiceTable(void)
+{
+ return &g_sDBGKMServices;
+}
+
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+/*!****************************************************************************
+ @name DBGDrivWaitForEvent
+ @brief waits for an event
+ @param eEvent - debug driver event
+ @return void
+*****************************************************************************/
+void IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+ HostWaitForEvent(eEvent);
+}
+#endif
+
+/*!****************************************************************************
+ @name DBGDrivGetCtrlState
+ @brief Gets a state value from the debug driver or stream
+ @param psStream - stream
+ @param ui32StateID - state ID
+ @return Nothing
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID)
+{
+ /* Validate buffer */
+ if (!StreamValid(psStream))
+ {
+ return (0xFFFFFFFF);
+ }
+
+ /* Retrieve the state asked for */
+ switch (ui32StateID)
+ {
+ case DBG_GET_STATE_FLAG_IS_READONLY:
+ return ((psStream->ui32Flags & DEBUG_FLAGS_READONLY) != 0);
+
+ case 0xFE: /* Dump the current stream state */
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "------ PDUMP DBGDriv: psStream( %p ) ( -- %s -- ) ui32Flags( %x )",
+ psStream, psStream->szName, psStream->ui32Flags));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "------ PDUMP DBGDriv: psStream->pvBase( %p ) psStream->ui32Size( %u )",
+ psStream->pvBase, psStream->ui32Size));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "------ PDUMP DBGDriv: psStream->ui32RPtr( %u ) psStream->ui32WPtr( %u )",
+ psStream->ui32RPtr, psStream->ui32WPtr));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "------ PDUMP DBGDriv: psStream->ui32Marker( %u ) psStream->ui32InitPhaseWOff( %u )",
+ psStream->ui32Marker, psStream->ui32InitPhaseWOff));
+ if (psStream->psInitStream)
+ {
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "-------- PDUMP DBGDriv: psInitStream( %p ) ( -- %s -- ) ui32Flags( %x )",
+ psStream->psInitStream, psStream->psInitStream->szName, psStream->ui32Flags));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "-------- PDUMP DBGDriv: psInitStream->pvBase( %p ) psInitStream->ui32Size( %u )",
+ psStream->psInitStream->pvBase, psStream->psInitStream->ui32Size));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "-------- PDUMP DBGDriv: psInitStream->ui32RPtr( %u ) psInitStream->ui32WPtr( %u )",
+ psStream->psInitStream->ui32RPtr, psStream->psInitStream->ui32WPtr));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "-------- PDUMP DBGDriv: psInitStream->ui32Marker( %u ) psInitStream->ui32InitPhaseWOff( %u ) ",
+ psStream->psInitStream->ui32Marker, psStream->psInitStream->ui32InitPhaseWOff));
+ }
+
+ break;
+
+ case 0xFF: /* Dump driver state not in a stream */
+ {
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "------ PDUMP DBGDriv: g_psStreamList( head %p ) g_pvAPIMutex( %p ) g_PDumpCurrentFrameNo( %u )",
+ g_psStreamList, g_pvAPIMutex, g_PDumpCurrentFrameNo));
+ }
+ break;
+
+ default:
+ PVR_ASSERT(0);
+ }
+
+ return (0xFFFFFFFF);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(void)
+{
+ return g_PDumpCurrentFrameNo;
+}
+
+void IMG_CALLCONV DBGDrivSetFrame(IMG_UINT32 ui32Frame)
+{
+ g_PDumpCurrentFrameNo = ui32Frame;
+}
+
+
+/*!****************************************************************************
+ @name ExpandStreamBuffer
+ @brief allocates a new buffer when the current one is full
+ @param psStream - stream
+ @param ui32NewSize - new size
+ @return IMG_TRUE - if allocation succeeded, IMG_FALSE - if not
+*****************************************************************************/
+static IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize)
+{
+ void * pvNewBuf;
+ IMG_UINT32 ui32NewSizeInPages;
+ IMG_UINT32 ui32NewWOffset;
+ IMG_UINT32 ui32NewROffset;
+ IMG_UINT32 ui32SpaceInOldBuf;
+
+ /*
+ First check new size is bigger than existing size
+ */
+ if (psStream->ui32Size >= ui32NewSize)
+ {
+ return IMG_FALSE;
+ }
+
+ /*
+ Calc space in old buffer
+ */
+ ui32SpaceInOldBuf = SpaceInStream(psStream);
+
+ /*
+ Allocate new buffer
+ */
+ ui32NewSizeInPages = ((ui32NewSize + 0xfffUL) & ~0xfffUL) / 4096UL;
+
+ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages);
+ }
+ else
+ {
+ pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages);
+ }
+
+ if (pvNewBuf == NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+ {
+ /*
+ Copy over old buffer to new one, we place data at start of buffer
+ even if Read offset is not at start of buffer
+ */
+ if (psStream->ui32RPtr <= psStream->ui32WPtr)
+ {
+ /*
+ No wrapping of data so copy data to start of new buffer
+ */
+ HostMemCopy(pvNewBuf,
+ (void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+ psStream->ui32WPtr - psStream->ui32RPtr);
+ }
+ else
+ {
+ IMG_UINT32 ui32FirstCopySize;
+
+ /*
+ The data has wrapped around the buffer, copy beginning of buffer first
+ */
+ ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr;
+
+ HostMemCopy(pvNewBuf,
+ (void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+ ui32FirstCopySize);
+
+ /*
+ Now second half
+ */
+ HostMemCopy((void *)((uintptr_t)pvNewBuf + ui32FirstCopySize),
+ (void *)(IMG_PBYTE)psStream->pvBase,
+ psStream->ui32WPtr);
+ }
+ ui32NewROffset = 0;
+ }
+ else
+ {
+ /* Copy everything in the old buffer to the new one */
+ HostMemCopy(pvNewBuf, psStream->pvBase, psStream->ui32WPtr);
+ ui32NewROffset = psStream->ui32RPtr;
+ }
+
+ /*
+ New Write offset is at end of data
+ */
+ ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf;
+
+ /*
+ Free old buffer
+ */
+ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ HostNonPageablePageFree(psStream->pvBase);
+ }
+ else
+ {
+ HostPageablePageFree(psStream->pvBase);
+ }
+
+ /*
+ Now set new params up
+ */
+ psStream->pvBase = pvNewBuf;
+ psStream->ui32RPtr = ui32NewROffset;
+ psStream->ui32WPtr = ui32NewWOffset;
+ psStream->ui32Size = ui32NewSizeInPages * 4096;
+
+ return IMG_TRUE;
+}
+
+/*!****************************************************************************
+ @name SpaceInStream
+ @brief remaining space in stream
+ @param psStream - stream
+ @return bytes remaining
+*****************************************************************************/
+static IMG_UINT32 SpaceInStream(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Space;
+
+ if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+ {
+ /* Allow overwriting the buffer which was already read */
+ if (psStream->ui32RPtr > psStream->ui32WPtr)
+ {
+ ui32Space = psStream->ui32RPtr - psStream->ui32WPtr;
+ }
+ else
+ {
+ ui32Space = psStream->ui32RPtr + (psStream->ui32Size - psStream->ui32WPtr);
+ }
+ }
+ else
+ {
+ /* Don't overwrite anything */
+ ui32Space = psStream->ui32Size - psStream->ui32WPtr;
+ }
+
+ return ui32Space;
+}
+
+
+/*!****************************************************************************
+ @name DestroyAllStreams
+ @brief delete all streams in list
+ @return none
+*****************************************************************************/
+void DestroyAllStreams(void)
+{
+ PDBG_STREAM psStream = g_psStreamList;
+ PDBG_STREAM psStreamToFree;
+
+ while (psStream != NULL)
+ {
+ psStreamToFree = psStream;
+ psStream = psStream->psNext;
+ DBGDrivDestroyStream(psStreamToFree->psInitStream, psStreamToFree, psStreamToFree->psDeinitStream);
+ }
+ g_psStreamList = NULL;
+ return;
+}
+
+/******************************************************************************
+ End of file (DBGDRIV.C)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRIV_
+#define _DBGDRIV_
+
+/*****************************************************************************
+ The odd constant or two
+*****************************************************************************/
+
+#define DBGDRIV_VERSION 0x100
+#define MAX_PROCESSES 2
+#define BLOCK_USED 0x01
+#define BLOCK_LOCKED 0x02
+#define DBGDRIV_MONOBASE 0x000B0000
+
+
+/*****************************************************************************
+ * OS-specific declarations and init/cleanup functions
+*****************************************************************************/
+extern void * g_pvAPIMutex;
+
+extern IMG_INT dbgdrv_init(void);
+extern void dbgdrv_cleanup(void);
+
+/*****************************************************************************
+ Internal debug driver core functions
+*****************************************************************************/
+/* Called by WDDM debug driver win7/hostfunc.c */
+IMG_BOOL IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Pages,
+ IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+
+/* Called by Linux debug driver main.c to allow the API mutex lock to be used
+ * to protect the common IOCTL read buffer while avoiding deadlock in the Ext
+ * layer
+ */
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID,
+ IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
+
+/* Used in ioctl.c in DBGDIOCDrivGetServiceTable() which is called in WDDM PDump files */
+void * IMG_CALLCONV DBGDrivGetServiceTable(void);
+
+/* Used in WDDM version of debug driver win7/main.c */
+void DestroyAllStreams(void);
+
+/*****************************************************************************
+ Function prototypes
+*****************************************************************************/
+IMG_UINT32 AtoI(IMG_CHAR *szIn);
+
+void HostMemSet(void *pvDest,IMG_UINT8 ui8Value,IMG_UINT32 ui32Size);
+void HostMemCopy(void *pvDest,void *pvSrc,IMG_UINT32 ui32Size);
+
+/*****************************************************************************
+ Secure handle Function prototypes
+*****************************************************************************/
+IMG_SID PStream2SID(PDBG_STREAM psStream);
+PDBG_STREAM SID2PStream(IMG_SID hStream);
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream);
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream);
+
+/*****************************************************************************
+ Declarations for IOCTL Service table and KM table entry points
+*****************************************************************************/
+IMG_BOOL IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+void IMG_CALLCONV ExtDBGDrivDestroyStream(IMG_HANDLE hInit, IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+void * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 *pui8OutBuf);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream);
+void IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(void);
+void IMG_CALLCONV ExtDBGDrivSetFrame(IMG_UINT32 ui32Frame);
+
+#endif
+
+/*****************************************************************************
+ End of file (DBGDRIV.H)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Resource Handle Manager
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provide resource handle management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_defs.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+
+/* max number of streams held in SID info table */
+#define MAX_SID_ENTRIES 8
+
+typedef struct _SID_INFO
+{
+ PDBG_STREAM psStream;
+} SID_INFO, *PSID_INFO;
+
+static SID_INFO gaSID_Xlat_Table[MAX_SID_ENTRIES];
+
+IMG_SID PStream2SID(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+ /* idx is one based */
+ return (IMG_SID)iIdx+1;
+ }
+ }
+ }
+
+ return (IMG_SID)0;
+}
+
+
+PDBG_STREAM SID2PStream(IMG_SID hStream)
+{
+ /* changed to zero based */
+ IMG_INT32 iIdx = (IMG_INT32)hStream-1;
+
+ if (iIdx >= 0 && iIdx < MAX_SID_ENTRIES)
+ {
+ return gaSID_Xlat_Table[iIdx].psStream;
+ }
+ else
+ {
+ return (PDBG_STREAM)NULL;
+ }
+}
+
+
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+ /* already created */
+ return IMG_TRUE;
+ }
+
+ if (gaSID_Xlat_Table[iIdx].psStream == (PDBG_STREAM)NULL)
+ {
+ /* free entry */
+ gaSID_Xlat_Table[iIdx].psStream = psStream;
+ return IMG_TRUE;
+ }
+ }
+ }
+
+ return IMG_FALSE;
+}
+
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+ gaSID_Xlat_Table[iIdx].psStream = (PDBG_STREAM)NULL;
+ return IMG_TRUE;
+ }
+ }
+ }
+
+ return IMG_FALSE;
+}
+
+
+/******************************************************************************
+ End of file (handle.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRIV_IOCTL_H_
+#define _DBGDRIV_IOCTL_H_
+
+#include "dbgdrvif_srv5.h"
+
+
+/* Share this debug driver global with the OS layer so that IOCTL calls
+ * coming from the OS enter the common table of entry points.
+ */
+extern IMG_UINT32 (*g_DBGDrivProc[DEBUG_SERVICE_MAX_API])(void *, void *, IMG_BOOL);
+
+
+#endif /* _DBGDRIV_IOCTL_H_ */
+
+/*****************************************************************************
+ End of file
+ *****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Debug driver for Services 5
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Debug Driver Interface
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRVIF_SRV5_
+#define _DBGDRVIF_SRV5_
+
+#if defined(_MSC_VER)
+#pragma warning(disable:4200)
+#endif
+
+#if defined(__linux__)
+
+#define FILE_DEVICE_UNKNOWN 0
+#define METHOD_BUFFERED 0
+#define FILE_ANY_ACCESS 0
+
+#define CTL_CODE( DeviceType, Function, Method, Access ) (Function)
+#define MAKEIOCTLINDEX(i) ((i) & 0xFFF)
+
+#else
+
+#include "ioctldef.h"
+
+#endif
+
+#include "img_defs.h"
+
+
+/*****************************************************************************
+ Stream mode stuff.
+*****************************************************************************/
+#define DEBUG_CAPMODE_FRAMED 0x00000001UL /* Default capture mode, set when streams created */
+#define DEBUG_CAPMODE_CONTINUOUS 0x00000002UL /* Only set in WDDM, streams created with it set to this mode */
+
+#define DEBUG_FLAGS_USE_NONPAGED_MEM 0x00000001UL /* Only set in WDDM */
+#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002UL
+#define DEBUG_FLAGS_READONLY 0x00000008UL
+#define DEBUG_FLAGS_WRITEONLY 0x00000010UL
+#define DEBUG_FLAGS_CIRCULAR 0x00000020UL
+
+/* Stream name maximum length */
+#define DEBUG_STREAM_NAME_MAX 32
+
+/*****************************************************************************
+ IOCTL values.
+*****************************************************************************/
+/* IOCTL values defined here so that the windows based OS layer of PDump
+ in the server can access the GetServiceTable method.
+ */
+#define DEBUG_SERVICE_IOCTL_BASE 0x800UL
+#define DEBUG_SERVICE_GETSERVICETABLE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETSTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_READ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_SETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_WAITFOREVENT CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#if defined(__QNXNTO__)
+#define DEBUG_SERVICE_CREATESTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_MAX_API 8
+#else
+#define DEBUG_SERVICE_MAX_API 9
+#endif
+
+
+#if defined(_WIN32)
+/*****************************************************************************
+ Debug driver device name
+*****************************************************************************/
+#if defined (DBGDRV_MODULE_NAME)
+#define REGISTRY_PATH_TO_DEBUG_DRIVER \
+ L"\\Registry\\Machine\\System\\CurrentControlSet\\Services\\" DBGDRV_MODULE_NAME
+#define DBGDRV_NT_DEVICE_NAME L"\\Device\\" DBGDRV_MODULE_NAME
+#define DBGDRV_NT_SYMLINK L"\\DosDevices\\" DBGDRV_MODULE_NAME
+#else
+#error Debug driver name must be specified
+/*
+#define DBGDRV_NT_DEVICE_NAME L"\\Device\\VLDbgDrv"
+#define DBGDRV_NT_SYMLINK L"\\DosDevices\\VLDBGDRV"
+*/
+#endif
+
+/* symbolic link name */
+#define DBGDRV_WIN32_DEVICE_NAME "\\\\.\\VLDBGDRV"
+
+#define DBGDRV_WINCE_DEVICE_NAME L"DBD1:"
+#endif
+
+/* A pointer type which is at least 64 bits wide. The fixed width ensures
+ * consistency in structures between 32 and 64-bit code.
+ * The UM code (be it 32 or 64 bit) can simply write to the native pointer type (pvPtr).
+ * 64-bit KM code must read ui32Ptr if in the case of a 32-bit client, otherwise it can
+ * just read pvPtr if the client is also 64-bit
+ *
+ * ui64Ptr ensures the union is 64-bits wide in a 32-bit client.
+ *
+ * The union is explicitly 64-bit aligned as it was found gcc on x32 only
+ * aligns it to 32-bit, as the ABI permits aligning 64-bit types to a 32-bit
+ * boundary.
+ */
+typedef union
+{
+ /* native pointer type for UM to write to */
+ void *pvPtr;
+ /* the pointer written by a 32-bit client */
+ IMG_UINT32 ui32Ptr;
+ /* force the union width */
+ IMG_UINT64 ui64Ptr;
+} DBG_WIDEPTR __aligned(8);
+
+/* Helper macro for dbgdriv (KM) to get the pointer value from the WIDEPTR type,
+ * depending on whether the client is 32 or 64-bit.
+ *
+ * note: double cast is required to avoid
+ * 'cast to pointer from integer of different size' warning.
+ * this is solved by first casting to an integer type.
+ */
+
+#if defined(CONFIG_COMPAT)
+#define WIDEPTR_GET_PTR(p, bCompat) (bCompat ? \
+ (void *) (uintptr_t) (p).ui32Ptr : \
+ (p).pvPtr)
+#else
+#define WIDEPTR_GET_PTR(p, bCompat) (p).pvPtr
+#endif
+
+typedef enum _DBG_EVENT_
+{
+ DBG_EVENT_STREAM_DATA = 1
+} DBG_EVENT;
+
+
+/*****************************************************************************
+ In/Out Structures
+*****************************************************************************/
+#if defined(__QNXNTO__)
+typedef struct _DBG_IN_CREATESTREAM_
+{
+ union
+ {
+ IMG_CHAR *pszName;
+ IMG_UINT64 ui64Name;
+ } u;
+ IMG_UINT32 ui32Pages;
+ IMG_UINT32 ui32CapMode;
+ IMG_UINT32 ui32OutMode;
+}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM;
+
+typedef struct _DBG_OUT_CREATESTREAM_
+{
+ IMG_HANDLE phInit;
+ IMG_HANDLE phMain;
+ IMG_HANDLE phDeinit;
+} DBG_OUT_CREATESTREAM, *PDBG_OUT_CREATESTREAM;
+#endif
+
+typedef struct _DBG_IN_FINDSTREAM_
+{
+ IMG_CHAR pszName[DEBUG_STREAM_NAME_MAX];
+ IMG_BOOL bResetStream;
+}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM;
+
+#define DEBUG_READ_BUFID_MAIN 0
+#define DEBUG_READ_BUFID_INIT 1
+#define DEBUG_READ_BUFID_DEINIT 2
+
+typedef struct _DBG_IN_READ_
+{
+ DBG_WIDEPTR pui8OutBuffer;
+ IMG_SID hStream;
+ IMG_UINT32 ui32BufID;
+ IMG_UINT32 ui32OutBufferSize;
+} DBG_IN_READ, *PDBG_IN_READ;
+
+typedef struct _DBG_OUT_READ_
+{
+ IMG_UINT32 ui32DataRead;
+ IMG_UINT32 ui32SplitMarker;
+} DBG_OUT_READ, *PDBG_OUT_READ;
+
+typedef struct _DBG_IN_SETMARKER_
+{
+ IMG_SID hStream;
+ IMG_UINT32 ui32Marker;
+} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER;
+
+/*
+ DBG STREAM abstract types
+*/
+
+typedef struct _DBG_STREAM_CONTROL_* PDBG_STREAM_CONTROL;
+typedef struct _DBG_STREAM_* PDBG_STREAM;
+
+/*
+ Lookup identifiers for the GetState method in the KM service table.
+ */
+#define DBG_GET_STATE_FLAG_IS_READONLY 0x03
+
+
+/*****************************************************************************
+ Kernel mode service table
+*****************************************************************************/
+typedef struct _DBGKM_SERVICE_TABLE_
+{
+ IMG_UINT32 ui32Size;
+ IMG_BOOL (IMG_CALLCONV *pfnCreateStream) (IMG_CHAR * pszName,IMG_UINT32 ui32Flags,IMG_UINT32 ui32Pages, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+ void (IMG_CALLCONV *pfnDestroyStream) (IMG_HANDLE hInit, IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+ IMG_UINT32 (IMG_CALLCONV *pfnDBGDrivWrite2) (PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+ void (IMG_CALLCONV *pfnSetMarker) (PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+ void (IMG_CALLCONV *pfnWaitForEvent) (DBG_EVENT eEvent);
+ IMG_UINT32 (IMG_CALLCONV *pfnGetCtrlState) (PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+ void (IMG_CALLCONV *pfnSetFrame) (IMG_UINT32 ui32Frame);
+} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE;
+
+#if defined(_MSC_VER)
+#pragma warning(default:4200)
+#endif
+
+#endif
+
+/*****************************************************************************
+ End of file
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Debugging and miscellaneous functions server implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Kernel services functions for debugging and other
+ miscellaneous functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "debugmisc_server.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "pdump_km.h"
+#include "mmu_common.h"
+#include "devicemem_server.h"
+#include "osfunc.h"
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDebugMiscSLCSetBypassStateKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiFlags,
+ IMG_BOOL bSetBypassed)
+{
+ RGXFWIF_KCCB_CMD sSLCBPCtlCmd;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ sSLCBPCtlCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCBPCTL;
+ sSLCBPCtlCmd.uCmdData.sSLCBPCtlData.bSetBypassed = bSetBypassed;
+ sSLCBPCtlCmd.uCmdData.sSLCBPCtlData.uiFlags = uiFlags;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sSLCBPCtlCmd,
+ sizeof(sSLCBPCtlCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscSLCSetEnableStateKM: RGXScheduleCommandfailed. Error:%u", eError));
+ }
+ else
+ {
+ /* Wait for the SLC flush to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDebugMiscSLCSetEnableStateKM: Waiting for value aborted with error (%u)", eError));
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscQueryFWLogKM(
+ const CONNECTION_DATA *psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *pui32RGXFWLogType)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* Guest drivers do not support tracebuf */
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_UNREFERENCED_PARAMETER(pui32RGXFWLogType);
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#else
+ if (!psDeviceNode || !pui32RGXFWLogType)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ if (!psDevInfo || !psDevInfo->psRGXFWIfTraceBuf)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *pui32RGXFWLogType = psDevInfo->psRGXFWIfTraceBuf->ui32LogType;
+ return PVRSRV_OK;
+#endif
+}
+
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetFWLogKM(
+ const CONNECTION_DATA * psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32RGXFWLogType)
+{
+ RGXFWIF_KCCB_CMD sLogTypeUpdateCmd;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* check log type is valid */
+ if (ui32RGXFWLogType & ~RGXFWIF_LOG_TYPE_MASK)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* Guest drivers do not support tracebuf */
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_UNREFERENCED_PARAMETER(sLogTypeUpdateCmd);
+ eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+#else
+ /* set the new log type */
+ psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32RGXFWLogType;
+
+ /* Allocate firmware trace buffer resource(s) if not already done */
+ if (RGXTraceBufferIsInitRequired(psDevInfo))
+ {
+ RGXTraceBufferInitOnDemandResources(psDevInfo);
+ }
+
+ /* Ask the FW to update its cached version of logType value */
+ sLogTypeUpdateCmd.eCmdType = RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE;
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sLogTypeUpdateCmd,
+ sizeof(sLogTypeUpdateCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: RGXScheduleCommandfailed. Error:%u", __FUNCTION__, eError));
+ }
+ else
+ {
+ /* Wait for the LogType value to be updated */
+ eError = RGXWaitForFWOp(psDevInfo, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Waiting for value aborted with error (%u)", __FUNCTION__, eError));
+ }
+ }
+#endif
+
+ return eError;
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetHCSDeadlineKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32HCSDeadlineMS)
+{
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadlineMS);
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSidPriorityKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32OSidPriority)
+{
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32OSidPriority);
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSNewOnlineStateKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32OSNewState)
+{
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (ui32OSNewState)
+ {
+ return RGXFWSetVMOnlineState(psDevInfo, ui32OSid, RGXFWIF_OS_ONLINE);
+ }
+
+ return RGXFWSetVMOnlineState(psDevInfo, ui32OSid, RGXFWIF_OS_OFFLINE);
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscDumpFreelistPageListKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ DLLIST_NODE *psNode, *psNext;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (dllist_is_empty(&psDevInfo->sFreeListHead))
+ {
+ return PVRSRV_OK;
+ }
+
+ PVR_LOG(("---------------[ Begin Freelist Page List Dump ]------------------"));
+
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+ {
+ RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+ RGXDumpFreeListPageList(psFreeList);
+ }
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+ PVR_LOG(("----------------[ End Freelist Page List Dump ]-------------------"));
+
+ return PVRSRV_OK;
+
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Debugging and miscellaneous functions server interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Kernel services functions for debugging and other
+ miscellaneous functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined(DEBUGMISC_SERVER_H)
+#define DEBUGMISC_SERVER_H
+
+#include <img_defs.h>
+#include <pvrsrv_error.h>
+#include <device.h>
+#include <pmr.h>
+
+#include "connection_server.h"
+
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDebugMiscSLCSetBypassStateKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiFlags,
+ IMG_BOOL bSetBypassed);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDebugMiscInitFWImageKM(
+ PMR *psFWImgDestPMR,
+ PMR *psFWImgSrcPMR,
+ IMG_UINT64 ui64FWImgLen,
+ PMR *psFWImgSigPMR,
+ IMG_UINT64 ui64FWSigLen);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscQueryFWLogKM(
+ const CONNECTION_DATA *psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *pui32RGXFWLogType);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetFWLogKM(
+ const CONNECTION_DATA *psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32RGXFWLogType);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetHCSDeadlineKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32HCSDeadlineMS);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSidPriorityKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32OSidPriority);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSNewOnlineStateKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32OSNewState);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscDumpFreelistPageListKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Common Device header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device related function templates and defines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICE_H__
+#define __DEVICE_H__
+
+
+#include "devicemem_heapcfg.h"
+#include "mmu_common.h"
+#include "ra.h" /* RA_ARENA */
+#include "pvrsrv_device.h"
+#include "srvkm.h"
+#include "physheap.h"
+#include <powervr/sync_external.h>
+#include "sysinfo.h"
+#include "dllist.h"
+#include "cache_km.h"
+
+#include "lock.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+struct pvr_buffer_sync_context;
+#endif
+
+typedef struct _PVRSRV_POWER_DEV_TAG_ PVRSRV_POWER_DEV;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+struct SYNC_RECORD;
+#endif
+
+/*********************************************************************/ /*!
+ @Function AllocUFOCallback
+ @Description Device specific callback for allocation of an UFO block
+
+ @Input psDeviceNode Pointer to device node to allocate
+ the UFO for.
+ @Output ppsMemDesc Pointer to pointer for the memdesc of
+ the allocation
+ @Output pui32SyncAddr FW Base address of the UFO block
+ @Output puiSyncPrimBlockSize Size of the UFO block
+
+ @Return PVRSRV_OK if allocation was successful
+ */
+/*********************************************************************/
+typedef PVRSRV_ERROR (*AllocUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ DEVMEM_MEMDESC **ppsMemDesc,
+ IMG_UINT32 *pui32SyncAddr,
+ IMG_UINT32 *puiSyncPrimBlockSize);
+
+/*********************************************************************/ /*!
+ @Function FreeUFOCallback
+ @Description Device specific callback for freeing of an UFO
+
+ @Input psDeviceNode Pointer to device node that the UFO block was
+ allocated from.
+ @Input psMemDesc Pointer to pointer for the memdesc of
+ the UFO block to free.
+ */
+/*********************************************************************/
+typedef void (*FreeUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ DEVMEM_MEMDESC *psMemDesc);
+
+typedef struct _PVRSRV_DEVICE_IDENTIFIER_
+{
+ /* Pdump memory and register bank names */
+ IMG_CHAR *pszPDumpDevName;
+ IMG_CHAR *pszPDumpRegName;
+} PVRSRV_DEVICE_IDENTIFIER;
+
+typedef struct _DEVICE_MEMORY_INFO_
+{
+ /* heap count. Doesn't include additional heaps from PVRSRVCreateDeviceMemHeap */
+ IMG_UINT32 ui32HeapCount;
+
+ /* Blueprints for creating new device memory contexts */
+ IMG_UINT32 uiNumHeapConfigs;
+ DEVMEM_HEAP_CONFIG *psDeviceMemoryHeapConfigArray;
+ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeap;
+} DEVICE_MEMORY_INFO;
+
+
+typedef struct _PG_HANDLE_
+{
+ union
+ {
+ void *pvHandle;
+ IMG_UINT64 ui64Handle;
+ }u;
+ /*Order of the corresponding allocation */
+ IMG_UINT32 ui32Order;
+} PG_HANDLE;
+
+#define MMU_BAD_PHYS_ADDR (0xbadbad00badULL)
+typedef struct __DUMMY_PAGE__
+{
+ /*Page handle for the dummy page allocated (UMA/LMA)*/
+ PG_HANDLE sDummyPageHandle;
+ POS_LOCK psDummyPgLock;
+ ATOMIC_T atRefCounter;
+ /*Dummy page size in terms of log2 */
+ IMG_UINT32 ui32Log2DummyPgSize;
+ IMG_UINT64 ui64DummyPgPhysAddr;
+#if defined(PDUMP)
+#define DUMMY_PAGE ("DUMMY_PAGE")
+ IMG_HANDLE hPdumpDummyPg;
+#endif
+} PVRSRV_DUMMY_PAGE ;
+
+typedef enum _PVRSRV_DEVICE_STATE_
+{
+ PVRSRV_DEVICE_STATE_UNDEFINED = 0,
+ PVRSRV_DEVICE_STATE_INIT,
+ PVRSRV_DEVICE_STATE_ACTIVE,
+ PVRSRV_DEVICE_STATE_DEINIT,
+ PVRSRV_DEVICE_STATE_BAD,
+} PVRSRV_DEVICE_STATE;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_STATUS_
+{
+ PVRSRV_DEVICE_HEALTH_STATUS_OK = 0,
+ PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING,
+ PVRSRV_DEVICE_HEALTH_STATUS_DEAD
+} PVRSRV_DEVICE_HEALTH_STATUS;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_REASON_
+{
+ PVRSRV_DEVICE_HEALTH_REASON_NONE = 0,
+ PVRSRV_DEVICE_HEALTH_REASON_ASSERTED,
+ PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING,
+ PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS,
+ PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT,
+ PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED
+} PVRSRV_DEVICE_HEALTH_REASON;
+
+typedef PVRSRV_ERROR (*FN_CREATERAMBACKEDPMR)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ PMR **ppsPMRPtr);
+
+typedef struct _PVRSRV_DEVICE_NODE_
+{
+ PVRSRV_DEVICE_IDENTIFIER sDevId;
+
+ PVRSRV_DEVICE_STATE eDevState;
+ ATOMIC_T eHealthStatus; /* Holds values from PVRSRV_DEVICE_HEALTH_STATUS */
+ ATOMIC_T eHealthReason; /* Holds values from PVRSRV_DEVICE_HEALTH_REASON */
+
+ IMG_HANDLE *hDebugTable;
+
+ /* device specific MMU attributes */
+ MMU_DEVICEATTRIBS *psMMUDevAttrs;
+ /* device specific MMU firmware atrributes, used only in some devices*/
+ MMU_DEVICEATTRIBS *psFirmwareMMUDevAttrs;
+
+ /* lock for power state transitions */
+ POS_LOCK hPowerLock;
+ /* current system device power state */
+ PVRSRV_SYS_POWER_STATE eCurrentSysPowerState;
+ PVRSRV_POWER_DEV *psPowerDev;
+
+ /*
+ callbacks the device must support:
+ */
+
+ FN_CREATERAMBACKEDPMR pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+ PVRSRV_ERROR (*pfnDevPxAlloc)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, size_t uiSize,
+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+ void (*pfnDevPxFree)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *psMemHandle);
+
+ PVRSRV_ERROR (*pfnDevPxMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *pshMemHandle,
+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+ void **pvPtr);
+
+ void (*pfnDevPxUnMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ PG_HANDLE *psMemHandle, void *pvPtr);
+
+ PVRSRV_ERROR (*pfnDevPxClean)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ PG_HANDLE *pshMemHandle,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 uiLength);
+
+ IMG_UINT32 uiMMUPxLog2AllocGran;
+
+ void (*pfnMMUCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_HANDLE hDeviceData,
+ MMU_LEVEL eLevel,
+ IMG_BOOL bUnmap);
+
+ PVRSRV_ERROR (*pfnMMUCacheInvalidateKick)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_UINT32 *pui32NextMMUInvalidateUpdate,
+ IMG_BOOL bInterrupt);
+
+ IMG_UINT32 (*pfnMMUCacheGetInvalidateCounter)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+
+ void (*pfnDumpDebugInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+ PVRSRV_ERROR (*pfnUpdateHealthStatus)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_BOOL bIsTimerPoll);
+
+ PVRSRV_ERROR (*pfnResetHWRLogs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+ /* Method to drain device HWPerf packets from firmware buffer to host buffer */
+ PVRSRV_ERROR (*pfnServiceHWPerf)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+ PVRSRV_ERROR (*pfnDeviceVersionString)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_CHAR **ppszVersionString);
+
+ PVRSRV_ERROR (*pfnDeviceClockSpeed)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_PUINT32 pui32RGXClockSpeed);
+
+ PVRSRV_ERROR (*pfnSoftReset)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2);
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(RGXFW_ALIGNCHECKS)
+ PVRSRV_ERROR (*pfnAlignmentCheck)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32FWAlignChecksSize, IMG_UINT32 aui32FWAlignChecks[]);
+#endif
+ IMG_BOOL (*pfnCheckDeviceFeature)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64FeatureMask);
+
+ IMG_INT32 (*pfnGetDeviceFeatureValue)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64FeatureMask);
+
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+
+ /* device post-finalise compatibility check */
+ PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*);
+
+ /* information about the device's address space and heaps */
+ DEVICE_MEMORY_INFO sDevMemoryInfo;
+
+ /* device's shared-virtual-memory heap size */
+ IMG_UINT64 ui64GeneralSVMHeapSize;
+
+ /* private device information */
+ void *pvDevice;
+
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ RA_ARENA *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS];
+#endif
+
+
+#define PVRSRV_MAX_RA_NAME_LENGTH (50)
+ RA_ARENA **apsLocalDevMemArenas;
+ IMG_CHAR **apszRANames;
+ IMG_UINT32 ui32NumOfLocalMemArenas;
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ IMG_CHAR szKernelFwRAName[RGXFW_NUM_OS][PVRSRV_MAX_RA_NAME_LENGTH];
+ RA_ARENA *psKernelFwMemArena[RGXFW_NUM_OS];
+ IMG_UINT32 uiKernelFwRAIdx;
+ RA_BASE_T ui64RABase[RGXFW_NUM_OS];
+#endif
+
+ IMG_UINT32 ui32RegisteredPhysHeaps;
+ PHYS_HEAP **papsRegisteredPhysHeaps;
+
+ /*
+ * Pointers to the device's physical memory heap(s)
+ * The first entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) will be used for allocations
+ * where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is not set. Normally this will be an LMA heap
+ * (but the device configuration could specify a UMA heap here, if desired)
+ * The second entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) will be used for allocations
+ * where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is set. Normally this will be a UMA heap
+ * (but the configuration could specify an LMA heap here, if desired)
+ * The third entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]) will be used for allocations
+ * where the PVRSRV_MEMALLOCFLAG_FW_LOCAL flag is set; this is used when SUPPORT_PVRSRV_GPUVIRT is enabled
+ * The device configuration will always specify two physical heap IDs - in the event of the device
+ * only using one physical heap, both of these IDs will be the same, and hence both pointers below
+ * will also be the same; when SUPPORT_PVRSRV_GPUVIRT is enabled the device configuration specifies
+ * three physical heap IDs, the last being for PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL allocations
+ */
+ PHYS_HEAP *apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+ struct _PVRSRV_DEVICE_NODE_ *psNext;
+ struct _PVRSRV_DEVICE_NODE_ **ppsThis;
+
+ /* Functions for notification about memory contexts */
+ PVRSRV_ERROR (*pfnRegisterMemoryContext)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ MMU_CONTEXT *psMMUContext,
+ IMG_HANDLE *hPrivData);
+ void (*pfnUnregisterMemoryContext)(IMG_HANDLE hPrivData);
+
+ /* Functions for allocation/freeing of UFOs */
+ AllocUFOBlockCallback pfnAllocUFOBlock; /*!< Callback for allocation of a block of UFO memory */
+ FreeUFOBlockCallback pfnFreeUFOBlock; /*!< Callback for freeing of a block of UFO memory */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+
+ IMG_HANDLE hSyncServerNotify;
+ POS_LOCK hSyncServerListLock;
+ DLLIST_NODE sSyncServerSyncsList;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ IMG_HANDLE hSyncServerRecordNotify;
+ POS_LOCK hSyncServerRecordLock;
+ DLLIST_NODE sSyncServerRecordList;
+ struct SYNC_RECORD *apsSyncServerRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN];
+ IMG_UINT32 uiSyncServerRecordFreeIdx;
+#endif
+
+ PSYNC_PRIM_CONTEXT hSyncPrimContext;
+
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim;
+ /* With this sync-prim we make sure the MMU cache is flushed
+ * before we free the page table memory */
+ PVRSRV_CLIENT_SYNC_PRIM *psMMUCacheSyncPrim;
+ IMG_UINT32 ui32NextMMUInvalidateUpdate;
+
+ IMG_HANDLE hCmdCompNotify;
+ IMG_HANDLE hDbgReqNotify;
+ IMG_HANDLE hHtbDbgReqNotify;
+ IMG_HANDLE hAppHintDbgReqNotify;
+
+ PVRSRV_DUMMY_PAGE sDummyPage;
+
+ DLLIST_NODE sMemoryContextPageFaultNotifyListHead;
+
+#if defined(PDUMP)
+ /* device-level callback which is called when pdump.exe starts.
+ * Should be implemented in device-specific init code, e.g. rgxinit.c
+ */
+ PVRSRV_ERROR (*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+ /* device-level callback to return pdump ID associated to a memory context */
+ IMG_UINT32 (*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext);
+#endif
+} PVRSRV_DEVICE_NODE;
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bInitSuccessful);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR IMG_CALLCONV RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions);
+
+
+#endif /* __DEVICE_H__ */
+
+/******************************************************************************
+ End of file (device.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File device_connection.h
+@Title
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__DEVICE_CONNECTION_H__)
+#define __DEVICE_CONNECTION_H__
+
+#include "img_types.h"
+
+#if defined(__KERNEL__)
+typedef struct _PVRSRV_DEVICE_NODE_ *SHARED_DEV_CONNECTION;
+#else
+typedef IMG_HANDLE SHARED_DEV_CONNECTION;
+#endif
+
+/******************************************************************************
+ * Device capability flags and masks
+ *****************************************************************************/
+
+/* Flag to be passed over the bridge during connection stating whether CPU cache coherent is available*/
+#define PVRSRV_CACHE_COHERENT_SHIFT (0)
+#define PVRSRV_CACHE_COHERENT_DEVICE_FLAG (1U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define PVRSRV_CACHE_COHERENT_CPU_FLAG (2U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define PVRSRV_CACHE_COHERENT_MASK (3U << PVRSRV_CACHE_COHERENT_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating whether CPU non-mappable memory is present */
+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT (3)
+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG (1U << PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating SVM allocation availability */
+#define PVRSRV_DEVMEM_SVM_ALLOC_SHIFT (4)
+#define PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED (1U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+#define PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED (2U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+#define PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL (4U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+
+#endif /* !defined(__DEVICE_CONNECTION_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Front End (nominally Client side part, but now invokable
+ from server too) of device memory management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "osmmap.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+#include "client_cache_bridge.h"
+#include "services_km.h"
+
+#if defined(PDUMP)
+#if defined(__KERNEL__)
+#include "pdump_km.h"
+#else
+#include "client_pdump_bridge.h"
+#endif
+#include "devicemem_pdump.h"
+#endif
+#if defined(PVR_RI_DEBUG)
+#include "client_ri_bridge.h"
+#endif
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "client_devicememhistory_bridge.h"
+#endif
+
+#include "rgx_heaps.h"
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#include "rgxdefs_km.h"
+#include "rgx_bvnc_defs_km.h"
+#if defined(LINUX)
+#include "linux/kernel.h"
+#endif
+#else
+#include "rgxdefs.h"
+#endif
+
+#if defined(__KERNEL__) && defined(PVR_RI_DEBUG)
+extern PVRSRV_ERROR RIDumpAllKM(void);
+#endif
+
+/*****************************************************************************
+ * Sub allocation internals *
+ *****************************************************************************/
+
+static PVRSRV_ERROR
+_AllocateDeviceMemory(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiLog2Quantum,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T uiFlags,
+ IMG_BOOL bExportable,
+ const IMG_CHAR *pszAnnotation,
+ DEVMEM_IMPORT **ppsImport)
+{
+ DEVMEM_IMPORT *psImport;
+ DEVMEM_FLAGS_T uiPMRFlags;
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+
+ eError = _DevmemImportStructAlloc(hDevConnection,
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto failAlloc;
+ }
+
+ /* Check the size is a multiple of the quantum */
+ PVR_ASSERT((uiSize & ((1ULL<<uiLog2Quantum)-1)) == 0);
+
+ /* Pass only the PMR flags down */
+ uiPMRFlags = uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK;
+ eError = BridgePhysmemNewRamBackedPMR(hDevConnection,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2Quantum,
+ uiPMRFlags,
+#if defined(PDUMP)
+ OSStringLength(pszAnnotation) + 1,
+ pszAnnotation,
+ &hPMR);
+#else
+ 1,
+ "",
+ &hPMR);
+
+ PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+#endif
+
+
+ if (eError != PVRSRV_OK)
+ {
+ /* Our check above should have ensured this the "not page
+ multiple" error never happens */
+ PVR_ASSERT(eError != PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE);
+
+ goto failPMR;
+ }
+
+ _DevmemImportStructInit(psImport,
+ uiSize,
+ uiAlign,
+ uiFlags,
+ hPMR,
+ bExportable ? DEVMEM_PROPERTIES_EXPORTABLE : 0);
+
+ *ppsImport = psImport;
+ return PVRSRV_OK;
+
+failPMR:
+ _DevmemImportDiscard(psImport);
+failAlloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+
+/*****************************************************************************
+ * Sub allocation internals *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *paui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pauiFreePageIndices,
+ SPARSE_MEM_RESIZE_FLAGS uiSparseFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+ DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+ SHARED_DEV_CONNECTION hDevConnection;
+ IMG_HANDLE hPMR;
+ IMG_HANDLE hSrvDevMemHeap;
+ POS_LOCK hLock;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_CPU_VIRTADDR sCpuVAddr;
+
+ if (NULL == psImport)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Sparse memory import", __func__));
+ goto e0;
+ }
+
+ hDevConnection = psImport->hDevConnection;
+ hPMR = psImport->hPMR;
+ hLock = psImport->hLock;
+ sDevVAddr = psImport->sDeviceImport.sDevVAddr;
+ sCpuVAddr = psImport->sCPUImport.pvCPUVAddr;
+
+ if (NULL == hDevConnection)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Bridge handle", __func__));
+ goto e0;
+ }
+
+ if (NULL == hPMR)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid PMR handle", __func__));
+ goto e0;
+ }
+
+ if ((uiSparseFlags & SPARSE_RESIZE_BOTH) && (0 == sDevVAddr.uiAddr))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Device Virtual Map", __func__));
+ goto e0;
+ }
+
+ if ((uiSparseFlags & SPARSE_MAP_CPU_ADDR) && (0 == sCpuVAddr))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CPU Virtual Map", __func__));
+ goto e0;
+ }
+
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SECURE)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Secure buffers currently do not support sparse changes",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ hSrvDevMemHeap = psImport->sDeviceImport.psHeap->hDevMemServerHeap;
+
+ OSLockAcquire(hLock);
+
+ eError = BridgeChangeSparseMem(hDevConnection,
+ hSrvDevMemHeap,
+ hPMR,
+ ui32AllocPageCount,
+ paui32AllocPageIndices,
+ ui32FreePageCount,
+ pauiFreePageIndices,
+ uiSparseFlags,
+ psImport->uiFlags,
+ sDevVAddr,
+ (IMG_UINT64)((uintptr_t)sCpuVAddr));
+
+ OSLockRelease(hLock);
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ BridgeRIUpdateMEMDESCBacking(psImport->hDevConnection,
+ psMemDesc->hRIHandle,
+ ((IMG_INT32) ui32AllocPageCount - (IMG_INT32) ui32FreePageCount)
+ * (1 << psImport->sDeviceImport.psHeap->uiLog2Quantum));
+ }
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ static IMG_BOOL bHaveNewAPI = IMG_TRUE;
+ PVRSRV_ERROR eError;
+
+ if(bHaveNewAPI)
+ {
+ eError = BridgeDevicememHistorySparseChange(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset,
+ psMemDesc->sDeviceMemDesc.sDevVAddr,
+ psMemDesc->uiAllocSize,
+ psMemDesc->sTraceData.szText,
+ DevmemGetHeapLog2PageSize(psImport->sDeviceImport.psHeap),
+ ui32AllocPageCount,
+ paui32AllocPageIndices,
+ ui32FreePageCount,
+ pauiFreePageIndices,
+ psMemDesc->sTraceData.ui32AllocationIndex,
+ &psMemDesc->sTraceData.ui32AllocationIndex);
+
+ if(eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+ {
+ bHaveNewAPI = IMG_FALSE;
+ }
+ }
+
+ /* no fallback required here.
+ * the old version of devicememhistory doesn't have entry
+ * points for SparseChange
+ */
+ }
+#endif
+
+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+ if ((PVRSRV_OK == eError) && (psMemDesc->sCPUMemDesc.ui32RefCount))
+ {
+ /*
+ * Release the CPU Virtual mapping here
+ * the caller is supposed to map entire range again
+ */
+ DevmemReleaseCpuVirtAddr(psMemDesc);
+ }
+#endif
+
+e0:
+ return eError;
+}
+
+static void
+_FreeDeviceMemory(DEVMEM_IMPORT *psImport)
+{
+ _DevmemImportStructRelease(psImport);
+}
+
+static PVRSRV_ERROR
+_SubAllocImportAlloc(RA_PERARENA_HANDLE hArena,
+ RA_LENGTH_T uiSize,
+ RA_FLAGS_T _flags,
+ const IMG_CHAR *pszAnnotation,
+ /* returned data */
+ RA_BASE_T *puiBase,
+ RA_LENGTH_T *puiActualSize,
+ RA_PERISPAN_HANDLE *phImport)
+{
+ /* When suballocations need a new lump of memory, the RA calls
+ back here. Later, in the kernel, we must construct a new PMR
+ and a pairing between the new lump of virtual memory and the
+ PMR (whether or not such PMR is backed by physical memory) */
+ DEVMEM_HEAP *psHeap;
+ DEVMEM_IMPORT *psImport;
+ IMG_DEVMEM_ALIGN_T uiAlign;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32MappingTable = 0;
+ DEVMEM_FLAGS_T uiFlags = (DEVMEM_FLAGS_T) _flags;
+ IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS;
+
+ /* Per-arena private handle is, for us, the heap */
+ psHeap = hArena;
+
+ /* align to the l.s.b. of the size... e.g. 96kiB aligned to
+ 32kiB. NB: There is an argument to say that the RA should never
+ ask us for Non-power-of-2 size anyway, but I don't want to make
+ that restriction arbitrarily now */
+ uiAlign = uiSize & ~(uiSize-1);
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ /* Technically this is only required for guest drivers due to
+ fw heaps being pre-allocated and pre-mapped resulting in
+ a 1:1 (i.e. virtual : physical) offset correlation but we
+ force this behaviour for all drivers to maintain consistency
+ (i.e. heap->VA uiAlign <= heap->PA uiLog2Quantum) */
+ if (uiAlign > (IMG_DEVMEM_ALIGN_T)(1 << psHeap->uiLog2Quantum))
+ {
+ uiAlign = (IMG_DEVMEM_ALIGN_T)(1 << psHeap->uiLog2Quantum);
+ }
+#endif
+
+ /* The RA should not have invoked us with a size that is not a
+ multiple of the quantum anyway */
+ PVR_ASSERT((uiSize & ((1ULL<<psHeap->uiLog2Quantum)-1)) == 0);
+
+ eError = _AllocateDeviceMemory(psHeap->psCtx->hDevConnection,
+ psHeap->uiLog2Quantum,
+ uiSize,
+ uiSize,
+ 1,
+ 1,
+ &ui32MappingTable,
+ uiAlign,
+ uiFlags,
+ IMG_FALSE,
+ pszAnnotation,
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto failAlloc;
+ }
+
+#if defined (PDUMP)
+ /* Keep the annotation in the Devmem layer so we know where suballocations were done from*/
+ psImport->pszAnnotation = OSAllocMem(OSStringLength(pszAnnotation)+1);
+ if (psImport->pszAnnotation == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto failAllocMem;
+ }
+ OSStringNCopy(psImport->pszAnnotation, pszAnnotation, OSStringLength(pszAnnotation)+1);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ eError = BridgeRIWritePMREntry (psImport->hDevConnection,
+ psImport->hPMR,
+ sizeof("PMR sub-allocated"),
+ "PMR sub-allocated",
+ psImport->uiSize);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+ }
+ }
+#endif
+
+ /*
+ Suballocations always get mapped into the device was we need to
+ key the RA off something and as we can't export suballocations
+ there is no valid reason to request an allocation an not map it
+ */
+ eError = _DevmemImportStructDevMap(psHeap,
+ IMG_TRUE,
+ psImport,
+ ui64OptionalMapAddress);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+
+ /* Mark this import struct as zeroed so we can save some PDump LDBs
+ * and do not have to CPU map + memset()*/
+ if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+ {
+ psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_ZEROED;
+ }
+ psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_CLEAN;
+
+ *puiBase = psImport->sDeviceImport.sDevVAddr.uiAddr;
+ *puiActualSize = uiSize;
+ *phImport = psImport;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+failMap:
+#if defined(PDUMP)
+failAllocMem:
+ OSFreeMem(psImport->pszAnnotation);
+ psImport->pszAnnotation = NULL;
+#endif
+ _FreeDeviceMemory(psImport);
+failAlloc:
+
+ return eError;
+}
+
+static void
+_SubAllocImportFree(RA_PERARENA_HANDLE hArena,
+ RA_BASE_T uiBase,
+ RA_PERISPAN_HANDLE hImport)
+{
+ DEVMEM_IMPORT *psImport = hImport;
+
+ PVR_ASSERT(psImport != NULL);
+ PVR_ASSERT(hArena == psImport->sDeviceImport.psHeap);
+ PVR_ASSERT(uiBase == psImport->sDeviceImport.sDevVAddr.uiAddr);
+
+ _DevmemImportStructDevUnmap(psImport);
+ _DevmemImportStructRelease(psImport);
+}
+
+/*****************************************************************************
+ * Devmem context internals *
+ *****************************************************************************/
+
+static PVRSRV_ERROR
+_PopulateContextFromBlueprint(struct _DEVMEM_CONTEXT_ *psCtx,
+ DEVMEM_HEAPCFGID uiHeapBlueprintID)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ struct _DEVMEM_HEAP_ **ppsHeapArray;
+ IMG_UINT32 uiNumHeaps;
+ IMG_UINT32 uiHeapsToUnwindOnError;
+ IMG_UINT32 uiHeapIndex;
+ IMG_DEV_VIRTADDR sDevVAddrBase;
+ IMG_CHAR aszHeapName[DEVMEM_HEAPNAME_MAXLENGTH];
+ IMG_DEVMEM_SIZE_T uiHeapLength;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2DataPageSize;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2ImportAlignment;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2TilingStrideFactor;
+
+ eError = DevmemHeapCount(psCtx->hDevConnection,
+ uiHeapBlueprintID,
+ &uiNumHeaps);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ if (uiNumHeaps == 0)
+ {
+ ppsHeapArray = NULL;
+ }
+ else
+ {
+ ppsHeapArray = OSAllocMem(sizeof(*ppsHeapArray) * uiNumHeaps);
+ if (ppsHeapArray == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ }
+
+ uiHeapsToUnwindOnError = 0;
+
+ for (uiHeapIndex = 0; uiHeapIndex < uiNumHeaps; uiHeapIndex++)
+ {
+ eError = DevmemHeapDetails(psCtx->hDevConnection,
+ uiHeapBlueprintID,
+ uiHeapIndex,
+ &aszHeapName[0],
+ sizeof(aszHeapName),
+ &sDevVAddrBase,
+ &uiHeapLength,
+ &uiLog2DataPageSize,
+ &uiLog2ImportAlignment,
+ &uiLog2TilingStrideFactor);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ eError = DevmemCreateHeap(psCtx,
+ sDevVAddrBase,
+ uiHeapLength,
+ uiLog2DataPageSize,
+ uiLog2ImportAlignment,
+ uiLog2TilingStrideFactor,
+ aszHeapName,
+ uiHeapBlueprintID,
+ &ppsHeapArray[uiHeapIndex]);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ uiHeapsToUnwindOnError = uiHeapIndex + 1;
+ }
+
+ psCtx->uiAutoHeapCount = uiNumHeaps;
+ psCtx->ppsAutoHeapArray = ppsHeapArray;
+
+ PVR_ASSERT(psCtx->uiNumHeaps >= psCtx->uiAutoHeapCount);
+ PVR_ASSERT(psCtx->uiAutoHeapCount == uiNumHeaps);
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths
+ */
+ e1:
+ for (uiHeapIndex = 0; uiHeapIndex < uiHeapsToUnwindOnError; uiHeapIndex++)
+ {
+ eError2 = DevmemDestroyHeap(ppsHeapArray[uiHeapIndex]);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+ }
+
+ if (uiNumHeaps != 0)
+ {
+ OSFreeMem(ppsHeapArray);
+ }
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static PVRSRV_ERROR
+_UnpopulateContextFromBlueprint(struct _DEVMEM_CONTEXT_ *psCtx)
+{
+ PVRSRV_ERROR eReturn = PVRSRV_OK;
+ PVRSRV_ERROR eError2;
+ IMG_UINT32 uiHeapIndex;
+ IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__)
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ bDoCheck = IMG_FALSE;
+ }
+#endif
+
+ for (uiHeapIndex = 0; uiHeapIndex < psCtx->uiAutoHeapCount; uiHeapIndex++)
+ {
+ if (!psCtx->ppsAutoHeapArray[uiHeapIndex])
+ {
+ continue;
+ }
+
+ eError2 = DevmemDestroyHeap(psCtx->ppsAutoHeapArray[uiHeapIndex]);
+ if (eError2 != PVRSRV_OK)
+ {
+ eReturn = eError2;
+ }
+ else
+ {
+ psCtx->ppsAutoHeapArray[uiHeapIndex] = NULL;
+ }
+ }
+
+ if ((!bDoCheck || (eReturn == PVRSRV_OK)) && psCtx->ppsAutoHeapArray)
+ {
+ OSFreeMem(psCtx->ppsAutoHeapArray);
+ psCtx->ppsAutoHeapArray = NULL;
+ psCtx->uiAutoHeapCount = 0;
+ }
+
+ return eReturn;
+}
+
+
+/*****************************************************************************
+ * Devmem context functions *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_HEAPCFGID uiHeapBlueprintID,
+ DEVMEM_CONTEXT **ppsCtxPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_CONTEXT *psCtx;
+ /* handle to the server-side counterpart of the device memory
+ context (specifically, for handling mapping to device MMU) */
+ IMG_HANDLE hDevMemServerContext;
+ IMG_HANDLE hPrivData;
+ IMG_BOOL bHeapCfgMetaId = (uiHeapBlueprintID == DEVMEM_HEAPCFG_META);
+
+ if (ppsCtxPtr == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ psCtx = OSAllocMem(sizeof *psCtx);
+ if (psCtx == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ psCtx->uiNumHeaps = 0;
+
+ psCtx->hDevConnection = hDevConnection;
+
+ /* Create (server-side) Device Memory context */
+ eError = BridgeDevmemIntCtxCreate(psCtx->hDevConnection,
+ bHeapCfgMetaId,
+ &hDevMemServerContext,
+ &hPrivData,
+ &psCtx->ui32CPUCacheLineSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ psCtx->hDevMemServerContext = hDevMemServerContext;
+ psCtx->hPrivData = hPrivData;
+
+ /* automagic heap creation */
+ psCtx->uiAutoHeapCount = 0;
+
+ eError = _PopulateContextFromBlueprint(psCtx, uiHeapBlueprintID);
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+
+ *ppsCtxPtr = psCtx;
+
+
+ PVR_ASSERT(psCtx->uiNumHeaps == psCtx->uiAutoHeapCount);
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e2:
+ PVR_ASSERT(psCtx->uiAutoHeapCount == 0);
+ PVR_ASSERT(psCtx->uiNumHeaps == 0);
+ BridgeDevmemIntCtxDestroy(psCtx->hDevConnection, hDevMemServerContext);
+
+ e1:
+ OSFreeMem(psCtx);
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+ IMG_HANDLE *hPrivData)
+{
+ PVRSRV_ERROR eError;
+
+ if ((psCtx == NULL) || (hPrivData == NULL))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ *hPrivData = psCtx->hPrivData;
+ return PVRSRV_OK;
+
+e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx)
+{
+ PVRSRV_ERROR eError;
+
+ if (psCtx == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ return PVRSRV_OK;
+
+e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemFindHeapByName(const struct _DEVMEM_CONTEXT_ *psCtx,
+ const IMG_CHAR *pszHeapName,
+ struct _DEVMEM_HEAP_ **ppsHeapRet)
+{
+ IMG_UINT32 uiHeapIndex;
+
+ /* N.B. This func is only useful for finding "automagic" heaps by name */
+ for (uiHeapIndex = 0;
+ uiHeapIndex < psCtx->uiAutoHeapCount;
+ uiHeapIndex++)
+ {
+ if (!OSStringCompare(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName, pszHeapName))
+ {
+ *ppsHeapRet = psCtx->ppsAutoHeapArray[uiHeapIndex];
+ return PVRSRV_OK;
+ }
+ }
+
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx)
+{
+ PVRSRV_ERROR eError;
+ IMG_BOOL bDoCheck = IMG_TRUE;
+
+#if defined(__KERNEL__)
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ bDoCheck = IMG_FALSE;
+ }
+#endif
+
+ if (psCtx == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = _UnpopulateContextFromBlueprint(psCtx);
+ if (bDoCheck && eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: _UnpopulateContextFromBlueprint failed (%d) leaving %d heaps",
+ __func__, eError, psCtx->uiNumHeaps));
+ goto e1;
+ }
+
+ eError = BridgeDevmemIntCtxDestroy(psCtx->hDevConnection,
+ psCtx->hDevMemServerContext);
+ if (bDoCheck && eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: BridgeDevmemIntCtxDestroy failed (%d)",
+ __func__, eError));
+ goto e1;
+ }
+
+ /* should be no more heaps left */
+ if (bDoCheck && psCtx->uiNumHeaps)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Additional heaps remain in DEVMEM_CONTEXT",
+ __func__));
+ eError = PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT;
+ goto e1;
+ }
+
+ OSDeviceMemSet(psCtx, 0, sizeof(*psCtx));
+ OSFreeMem(psCtx);
+
+e1:
+ return eError;
+}
+
+/*****************************************************************************
+ * Devmem heap query functions *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 *puiNumHeapConfigsOut)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeHeapCfgHeapConfigCount(hDevConnection,
+ puiNumHeapConfigsOut);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 *puiNumHeapsOut)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeHeapCfgHeapCount(hDevConnection,
+ uiHeapConfigIndex,
+ puiNumHeapsOut);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigName(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_CHAR *pszConfigNameOut,
+ IMG_UINT32 uiConfigNameBufSz)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeHeapCfgHeapConfigName(hDevConnection,
+ uiHeapConfigIndex,
+ uiConfigNameBufSz,
+ pszConfigNameOut);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
+ IMG_CHAR *pszHeapNameOut,
+ IMG_UINT32 uiHeapNameBufSz,
+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+ IMG_UINT32 *puiLog2DataPageSizeOut,
+ IMG_UINT32 *puiLog2ImportAlignmentOut,
+ IMG_UINT32 *puiLog2TilingStrideFactor)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeHeapCfgHeapDetails(hDevConnection,
+ uiHeapConfigIndex,
+ uiHeapIndex,
+ uiHeapNameBufSz,
+ pszHeapNameOut,
+ psDevVAddrBaseOut,
+ puiHeapLengthOut,
+ puiLog2DataPageSizeOut,
+ puiLog2ImportAlignmentOut);
+
+ /* REL/1.8 maintain bridge compatibility
+ * 4:0 - uiLog2ImportAlignment (13--20)
+ * 18:16 - uiLog2TilingStrideFactor (3--4)
+ */
+ *puiLog2TilingStrideFactor = (*puiLog2ImportAlignmentOut >> 16);
+ *puiLog2ImportAlignmentOut &= 0xffff;
+
+ /* NB: *puiLog2TilingStrideFactor is either 3 or 4 (tiling mode 1 or 0).
+ * If reading from an older KM, *puiLog2TilingStrideFactor will not be set.
+ * If so force to 4 (tiling mode 0), which was the original assumption
+ * before puiLog2TilingStrideFactor was queried.
+ */
+ if (!*puiLog2TilingStrideFactor)
+ {
+ *puiLog2TilingStrideFactor = 4;
+ }
+
+ VG_MARK_INITIALIZED(pszHeapNameOut,uiHeapNameBufSz);
+
+ return eError;
+}
+
+/*****************************************************************************
+ * Devmem heap functions *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapInt(DEVMEM_HEAP *psHeap,
+ IMG_HANDLE *phDevmemHeap)
+{
+ if (psHeap == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ *phDevmemHeap = psHeap->hDevMemServerHeap;
+ return PVRSRV_OK;
+}
+
+/* See devicemem.h for important notes regarding the arguments
+ to this function */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtx,
+ IMG_DEV_VIRTADDR sBaseAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_UINT32 ui32Log2Quantum,
+ IMG_UINT32 ui32Log2ImportAlignment,
+ IMG_UINT32 ui32Log2TilingStrideFactor,
+ const IMG_CHAR *pszName,
+ DEVMEM_HEAPCFGID uiHeapBlueprintID,
+ DEVMEM_HEAP **ppsHeapPtr)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eError2;
+ DEVMEM_HEAP *psHeap;
+ /* handle to the server-side counterpart of the device memory
+ heap (specifically, for handling mapping to device MMU */
+ IMG_HANDLE hDevMemServerHeap;
+ IMG_BOOL bRANoSplit = IMG_FALSE;
+
+ IMG_CHAR aszBuf[100];
+ IMG_CHAR *pszStr;
+
+ if (ppsHeapPtr == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ psHeap = OSAllocMem(sizeof *psHeap);
+ if (psHeap == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ /* Need to keep local copy of heap name, so caller may free
+ theirs */
+ pszStr = OSAllocMem(OSStringLength(pszName)+1);
+ if (pszStr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+ OSStringCopy(pszStr, pszName);
+ psHeap->pszName = pszStr;
+
+ psHeap->uiSize = uiLength;
+ psHeap->sBaseAddress = sBaseAddress;
+ OSAtomicWrite(&psHeap->hImportCount,0);
+
+ OSSNPrintf(aszBuf, sizeof(aszBuf),
+ "NDM heap '%s' (suballocs) ctx:%p",
+ pszName, psCtx);
+ pszStr = OSAllocMem(OSStringLength(aszBuf)+1);
+ if (pszStr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e2;
+ }
+ OSStringCopy(pszStr, aszBuf);
+ psHeap->pszSubAllocRAName = pszStr;
+
+#if defined(PDUMP)
+ /* the META heap is shared globally so a single
+ * physical memory import may be used to satisfy
+ * allocations of different processes.
+ * This is problematic when PDumping because the
+ * physical memory import used to satisfy a new allocation
+ * may actually have been imported (and thus the PDump MALLOC
+ * generated) before the PDump client was started, leading to the
+ * MALLOC being missing.
+ * This is solved by disabling splitting of imports for the META physmem
+ * RA, meaning that every firmware allocation gets its own import, thus
+ * ensuring the MALLOC is present for every allocation made within the
+ * pdump capture range
+ */
+ if(uiHeapBlueprintID == DEVMEM_HEAPCFG_META)
+ {
+ bRANoSplit = IMG_TRUE;
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(uiHeapBlueprintID);
+#endif
+
+
+ psHeap->psSubAllocRA = RA_Create(psHeap->pszSubAllocRAName,
+ /* Subsequent imports: */
+ ui32Log2Quantum,
+ RA_LOCKCLASS_2,
+ _SubAllocImportAlloc,
+ _SubAllocImportFree,
+ (RA_PERARENA_HANDLE) psHeap,
+ bRANoSplit);
+ if (psHeap->psSubAllocRA == NULL)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+ goto e3;
+ }
+
+ psHeap->uiLog2ImportAlignment = ui32Log2ImportAlignment;
+ psHeap->uiLog2TilingStrideFactor = ui32Log2TilingStrideFactor;
+ psHeap->uiLog2Quantum = ui32Log2Quantum;
+
+ if (! OSStringCompare(pszName, RGX_GENERAL_SVM_HEAP_IDENT))
+ {
+ /* The SVM heap normally starts out as this type though
+ it may transition to DEVMEM_HEAP_TYPE_USER_MANAGED
+ on platforms with more processor virtual address
+ bits than device virtual address bits */
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_KERNEL_MANAGED;
+ }
+ else
+ {
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_UNKNOWN;
+ }
+
+ OSSNPrintf(aszBuf, sizeof(aszBuf),
+ "NDM heap '%s' (QVM) ctx:%p",
+ pszName, psCtx);
+ pszStr = OSAllocMem(OSStringLength(aszBuf)+1);
+ if (pszStr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e4;
+ }
+ OSStringCopy(pszStr, aszBuf);
+ psHeap->pszQuantizedVMRAName = pszStr;
+
+ psHeap->psQuantizedVMRA = RA_Create(psHeap->pszQuantizedVMRAName,
+ /* Subsequent import: */
+ 0, RA_LOCKCLASS_1, NULL, NULL,
+ (RA_PERARENA_HANDLE) psHeap,
+ IMG_FALSE);
+ if (psHeap->psQuantizedVMRA == NULL)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+ goto e5;
+ }
+
+ if (!RA_Add(psHeap->psQuantizedVMRA,
+ (RA_BASE_T)sBaseAddress.uiAddr,
+ (RA_LENGTH_T)uiLength,
+ (RA_FLAGS_T)0, /* This RA doesn't use or need flags */
+ NULL /* per ispan handle */))
+ {
+ RA_Delete(psHeap->psQuantizedVMRA);
+ eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+ goto e5;
+ }
+
+ psHeap->psCtx = psCtx;
+
+
+ /* Create server-side counterpart of Device Memory heap */
+ eError = BridgeDevmemIntHeapCreate(psCtx->hDevConnection,
+ psCtx->hDevMemServerContext,
+ sBaseAddress,
+ uiLength,
+ ui32Log2Quantum,
+ &hDevMemServerHeap);
+ if (eError != PVRSRV_OK)
+ {
+ goto e6;
+ }
+ psHeap->hDevMemServerHeap = hDevMemServerHeap;
+
+ eError = OSLockCreate(&psHeap->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e7;
+ }
+
+ psHeap->psCtx->uiNumHeaps ++;
+ *ppsHeapPtr = psHeap;
+
+#if defined PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING
+ psHeap->psMemDescList = NULL;
+#endif /* PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING */
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths
+ */
+ e7:
+ eError2 = BridgeDevmemIntHeapDestroy(psCtx->hDevConnection,
+ psHeap->hDevMemServerHeap);
+ PVR_ASSERT (eError2 == PVRSRV_OK);
+ e6:
+ if (psHeap->psQuantizedVMRA)
+ RA_Delete(psHeap->psQuantizedVMRA);
+ e5:
+ if (psHeap->pszQuantizedVMRAName)
+ OSFreeMem(psHeap->pszQuantizedVMRAName);
+ e4:
+ RA_Delete(psHeap->psSubAllocRA);
+ e3:
+ OSFreeMem(psHeap->pszSubAllocRAName);
+ e2:
+ OSFreeMem(psHeap->pszName);
+ e1:
+ OSFreeMem(psHeap);
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(struct _DEVMEM_HEAP_ *psHeap,
+ IMG_DEV_VIRTADDR *pDevVAddr)
+{
+ if (psHeap == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *pDevVAddr = psHeap->sBaseAddress;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL void
+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ IMG_DEVMEM_SIZE_T uiSize = *puiSize;
+ IMG_DEVMEM_ALIGN_T uiAlign = *puiAlign;
+
+ if ((1ULL << uiLog2Quantum) > uiAlign)
+ {
+ uiAlign = 1ULL << uiLog2Quantum;
+ }
+ uiSize = (uiSize + uiAlign - 1) & ~(uiAlign - 1);
+
+ *puiSize = uiSize;
+ *puiAlign = uiAlign;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap)
+{
+ PVRSRV_ERROR eError;
+ IMG_INT uiImportCount;
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__)
+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ bDoCheck = IMG_FALSE;
+ }
+#endif
+#endif
+
+ if (psHeap == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ uiImportCount = OSAtomicRead(&psHeap->hImportCount);
+ if (uiImportCount > 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%d(%s) leaks remain", uiImportCount, psHeap->pszName));
+#if defined(__KERNEL__)
+#if defined(PVR_RI_DEBUG)
+ PVR_DPF((PVR_DBG_ERROR, "Details of remaining allocated device memory (for all processes):"));
+ RIDumpAllKM();
+#else
+ PVR_DPF((PVR_DBG_ERROR, "Compile with PVR_RI_DEBUG=1 to get a full "
+ "list of all driver allocations."));
+#endif
+#endif
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (bDoCheck)
+#endif
+ {
+ return PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+ }
+ }
+
+ eError = BridgeDevmemIntHeapDestroy(psHeap->psCtx->hDevConnection,
+ psHeap->hDevMemServerHeap);
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (bDoCheck)
+#endif
+ {
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: BridgeDevmemIntHeapDestroy failed (%d)",
+ __func__, eError));
+ return eError;
+ }
+ }
+
+ PVR_ASSERT(psHeap->psCtx->uiNumHeaps > 0);
+ psHeap->psCtx->uiNumHeaps--;
+
+ OSLockDestroy(psHeap->hLock);
+
+ if (psHeap->psQuantizedVMRA)
+ {
+ RA_Delete(psHeap->psQuantizedVMRA);
+ }
+ if (psHeap->pszQuantizedVMRAName)
+ {
+ OSFreeMem(psHeap->pszQuantizedVMRAName);
+ }
+
+ RA_Delete(psHeap->psSubAllocRA);
+ OSFreeMem(psHeap->pszSubAllocRAName);
+
+ OSFreeMem(psHeap->pszName);
+
+ OSDeviceMemSet(psHeap, 0, sizeof(*psHeap));
+ OSFreeMem(psHeap);
+
+ return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ * Devmem allocation/free functions *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ RA_BASE_T uiAllocatedAddr;
+ RA_LENGTH_T uiAllocatedSize;
+ RA_PERISPAN_HANDLE hImport; /* the "import" from which this sub-allocation came */
+ PVRSRV_ERROR eError;
+ DEVMEM_MEMDESC *psMemDesc = NULL;
+ IMG_DEVMEM_OFFSET_T uiOffset = 0;
+ DEVMEM_IMPORT *psImport;
+ IMG_UINT32 ui32CPUCacheLineSize;
+ void *pvAddr;
+
+ IMG_BOOL bImportClean;
+ IMG_BOOL bCPUCleanFlag = PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags);
+ IMG_BOOL bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags);
+ IMG_BOOL bCPUCached = (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) ||
+ PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags));
+ IMG_BOOL bGPUCached = (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) ||
+ PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags));
+ PVRSRV_CACHE_OP eOp = PVRSRV_CACHE_OP_INVALIDATE;
+ IMG_UINT32 ui32CacheLineSize;
+
+ if (uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+ {
+ /* Deferred Allocation not supported on SubAllocs*/
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ if (psHeap == NULL || psHeap->psCtx == NULL ||ppsMemDescPtr == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+#if defined(__KERNEL__)
+ {
+ /* The hDevConnection holds two different types of pointers depending on the
+ * address space in which it is used.
+ * In this instance the variable points to the device node in server */
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection;
+ ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(psDevNode->pfnGetDeviceFeatureValue(psDevNode, \
+ RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK));
+ }
+#else
+ ui32CacheLineSize = ROGUE_CACHE_LINE_SIZE;
+#endif
+
+ /* The following logic makes sure that any cached memory is aligned to both the CPU and GPU.
+ * To be aligned on both you have to take the Lowest Common Multiple (LCM) of the cache line sizes of each.
+ * As the possibilities are all powers of 2 then simply the largest number can be picked as the LCM.
+ * Therefore this algorithm just picks the highest from the CPU, GPU and given alignments.
+ */
+ ui32CPUCacheLineSize = psHeap->psCtx->ui32CPUCacheLineSize;
+ /* If the CPU cache line size is larger than the alignment given then it is the lowest common multiple
+ * Also checking if the allocation is going to be cached on the CPU
+ * Currently there is no check for the validity of the cache coherent option.
+ * In this case, the alignment could be applied but the mode could still fall back to uncached.
+ */
+ if (ui32CPUCacheLineSize > uiAlign && bCPUCached)
+ {
+ uiAlign = ui32CPUCacheLineSize;
+ }
+
+ /* If the GPU cache line size is larger than the alignment given then it is the lowest common multiple
+ * Also checking if the allocation is going to be cached on the GPU via checking for any of the cached options.
+ * Currently there is no check for the validity of the cache coherent option.
+ * In this case, the alignment could be applied but the mode could still fall back to uncached.
+ */
+ if (ui32CacheLineSize > uiAlign && bGPUCached)
+ {
+ uiAlign = ui32CacheLineSize;
+ }
+
+ eError = _DevmemValidateParams(uiSize,
+ uiAlign,
+ &uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto failParams;
+ }
+
+ eError =_DevmemMemDescAlloc(&psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMemDescAlloc;
+ }
+
+ /* No request for exportable memory so use the RA */
+ eError = RA_Alloc(psHeap->psSubAllocRA,
+ uiSize,
+ uiPreAllocMultiplier,
+ uiFlags,
+ uiAlign,
+ pszText,
+ &uiAllocatedAddr,
+ &uiAllocatedSize,
+ &hImport);
+ if (PVRSRV_OK != eError)
+ {
+ goto failDeviceMemAlloc;
+ }
+
+ psImport = hImport;
+
+ /* This assignment is assuming the RA returns an hImport where suballocations
+ * can be made from if uiSize is NOT a page multiple of the passed heap.
+ *
+ * So we check if uiSize is a page multiple and mark it as exportable
+ * if it is not.
+ * */
+ if (!(uiSize & ((1 << psHeap->uiLog2Quantum) - 1)) &&
+ (uiPreAllocMultiplier == RA_NO_IMPORT_MULTIPLIER) )
+ {
+ psImport->uiProperties |= DEVMEM_PROPERTIES_EXPORTABLE;
+ }
+ psImport->uiProperties |= DEVMEM_PROPERTIES_SUBALLOCATABLE;
+
+ uiOffset = uiAllocatedAddr - psImport->sDeviceImport.sDevVAddr.uiAddr;
+
+#if defined(PDUMP)
+#if defined(__KERNEL__)
+ PDumpCommentWithFlags(PDUMP_NONE,
+ "Suballocated %u Byte for \"%s\" from physical allocation \"%s\"",
+ (IMG_UINT32) uiSize, pszText, psImport->pszAnnotation);
+#else
+ {
+ IMG_CHAR pszComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
+ OSSNPrintf(pszComment,
+ PVRSRV_PDUMP_MAX_COMMENT_SIZE,
+ "Suballocated %u Byte for \"%s\" from physical allocation \"%s\"",
+ (IMG_UINT32) uiSize,
+ pszText,
+ psImport->pszAnnotation);
+
+ BridgePVRSRVPDumpComment(psHeap->psCtx->hDevConnection, pszComment, IMG_FALSE);
+ }
+#endif
+#endif
+
+ _DevmemMemDescInit(psMemDesc,
+ uiOffset,
+ psImport,
+ uiSize);
+
+ bImportClean = ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_CLEAN) != 0);
+
+ /* Zero the memory */
+ if (bZero)
+ {
+ /* Has the import been zeroed on allocation and were no suballocations returned to it so far? */
+ bImportClean = bImportClean && ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_ZEROED) != 0);
+
+ if(!bImportClean)
+ {
+ eOp = PVRSRV_CACHE_OP_FLUSH;
+
+ eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMaintenance;
+ }
+
+ /* uiSize is a 64-bit quantity whereas the 3rd argument
+ * to OSDeviceMemSet is a 32-bit quantity on 32-bit systems
+ * hence a compiler warning of implicit cast and loss of data.
+ * Added explicit cast and assert to remove warning.
+ */
+ PVR_ASSERT(uiSize < IMG_UINT32_MAX);
+
+ OSDeviceMemSet(pvAddr, 0x0, (size_t) uiSize);
+
+ DevmemReleaseCpuVirtAddr(psMemDesc);
+
+#if defined(PDUMP)
+ DevmemPDumpLoadZeroMem(psMemDesc, 0, uiSize, PDUMP_FLAGS_CONTINUOUS);
+#endif
+ }
+ }
+
+ /* Flush or invalidate */
+ if (bCPUCached && !bImportClean && (bZero || bCPUCleanFlag))
+ {
+ /* BridgeCacheOpQueue _may_ be deferred so use BridgeCacheOpExec
+ to ensure this cache maintenance is actioned immediately */
+ eError = BridgeCacheOpExec (psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset,
+ psMemDesc->uiAllocSize,
+ eOp);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMaintenance;
+ }
+ }
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+ * the allocation gets mapped/unmapped
+ */
+ OSStringNCopy(psMemDesc->sTraceData.szText, pszText, sizeof(psMemDesc->sTraceData.szText) - 1);
+ }
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ /* Attach RI information */
+ eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ OSStringNLength(pszText, RI_MAX_TEXT_LEN),
+ pszText,
+ psMemDesc->uiOffset,
+ uiAllocatedSize,
+ uiAllocatedSize,
+ IMG_FALSE,
+ IMG_FALSE,
+ &(psMemDesc->hRIHandle));
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+#else /* if defined(PVR_RI_DEBUG) */
+ PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ *ppsMemDescPtr = psMemDesc;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+failMaintenance:
+ _DevmemMemDescRelease(psMemDesc);
+ psMemDesc = NULL; /* Make sure we don't do a discard after the release */
+failDeviceMemAlloc:
+ if (psMemDesc)
+ {
+ _DevmemMemDescDiscard(psMemDesc);
+ }
+failMemDescAlloc:
+failParams:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed! Error is %s. Allocation size: %#llX",
+ __func__,
+ PVRSRVGETERRORSTRING(eError),
+ (unsigned long long) uiSize));
+ return eError;
+}
+
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_UINT32 uiLog2HeapPageSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_MEMDESC *psMemDesc = NULL;
+ DEVMEM_IMPORT *psImport;
+ IMG_UINT32 ui32MappingTable = 0;
+
+ DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
+ &uiSize,
+ &uiAlign);
+
+ eError = _DevmemValidateParams(uiSize,
+ uiAlign,
+ &uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto failParams;
+ }
+
+ eError =_DevmemMemDescAlloc(&psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMemDescAlloc;
+ }
+
+ eError = _AllocateDeviceMemory(hDevConnection,
+ uiLog2HeapPageSize,
+ uiSize,
+ uiSize,
+ 1,
+ 1,
+ &ui32MappingTable,
+ uiAlign,
+ uiFlags,
+ IMG_TRUE,
+ pszText,
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto failDeviceMemAlloc;
+ }
+
+ _DevmemMemDescInit(psMemDesc,
+ 0,
+ psImport,
+ uiSize);
+
+ *ppsMemDescPtr = psMemDesc;
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+ * the allocation gets mapped/unmapped
+ */
+ OSStringNCopy(psMemDesc->sTraceData.szText, pszText, sizeof(psMemDesc->sTraceData.szText) - 1);
+ }
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ eError = BridgeRIWritePMREntry (psImport->hDevConnection,
+ psImport->hPMR,
+ OSStringNLength(pszText, RI_MAX_TEXT_LEN),
+ (IMG_CHAR *)pszText,
+ psImport->uiSize);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+ }
+
+ /* Attach RI information */
+ eError = BridgeRIWriteMEMDESCEntry (psImport->hDevConnection,
+ psImport->hPMR,
+ sizeof("^"),
+ "^",
+ psMemDesc->uiOffset,
+ uiSize,
+ uiSize,
+ IMG_FALSE,
+ IMG_TRUE,
+ &psMemDesc->hRIHandle);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+#else /* if defined(PVR_RI_DEBUG) */
+ PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+failDeviceMemAlloc:
+ _DevmemMemDescDiscard(psMemDesc);
+
+failMemDescAlloc:
+failParams:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed! Error is %s. Allocation size: %#llX",
+ __func__,
+ PVRSRVGETERRORSTRING(eError),
+ (unsigned long long) uiSize));
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_UINT32 uiLog2HeapPageSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_MEMDESC *psMemDesc = NULL;
+ DEVMEM_IMPORT *psImport;
+ IMG_UINT32 i;
+
+ for (i = 0; i < ui32NumPhysChunks; i++)
+ {
+ if (!(pui32MappingTable[i] < ui32NumVirtChunks))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "A mapping table index exceeds the size of the allocation:"
+ " pui32MappingTable[%u] %u, ui32NumVirtChunks %u ",
+ i,
+ pui32MappingTable[i],
+ ui32NumVirtChunks));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failMemDescAlloc;
+ }
+ }
+
+ DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
+ &uiSize,
+ &uiAlign);
+
+ eError = _DevmemValidateParams(uiSize,
+ uiAlign,
+ &uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto failParams;
+ }
+
+ eError =_DevmemMemDescAlloc(&psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMemDescAlloc;
+ }
+
+ eError = _AllocateDeviceMemory(hDevConnection,
+ uiLog2HeapPageSize,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiAlign,
+ uiFlags,
+ IMG_TRUE,
+ pszText,
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto failDeviceMemAlloc;
+ }
+
+ _DevmemMemDescInit(psMemDesc,
+ 0,
+ psImport,
+ uiSize);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+ * the allocation gets mapped/unmapped
+ */
+ OSStringNCopy(psMemDesc->sTraceData.szText, pszText, sizeof(psMemDesc->sTraceData.szText) - 1);
+ }
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ eError = BridgeRIWritePMREntry (psImport->hDevConnection,
+ psImport->hPMR,
+ OSStringNLength(pszText, RI_MAX_TEXT_LEN),
+ (IMG_CHAR *)pszText,
+ psImport->uiSize);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+ }
+
+ /* Attach RI information */
+ eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ sizeof("^"),
+ "^",
+ psMemDesc->uiOffset,
+ uiSize,
+ ui32NumPhysChunks * uiChunkSize,
+ IMG_FALSE,
+ IMG_TRUE,
+ &psMemDesc->hRIHandle);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+#else /* if defined(PVR_RI_DEBUG) */
+ PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ *ppsMemDescPtr = psMemDesc;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+failDeviceMemAlloc:
+ _DevmemMemDescDiscard(psMemDesc);
+
+failMemDescAlloc:
+failParams:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed! Error is %s. Allocation size: %#llX",
+ __func__,
+ PVRSRVGETERRORSTRING(eError),
+ (unsigned long long) uiSize));
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hBridge,
+ IMG_HANDLE hServerHandle,
+ IMG_HANDLE *hLocalImportHandle)
+{
+ return BridgePMRMakeLocalImportHandle(hBridge,
+ hServerHandle,
+ hLocalImportHandle);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hBridge,
+ IMG_HANDLE hLocalImportHandle)
+{
+ return BridgePMRUnmakeLocalImportHandle(hBridge, hLocalImportHandle);
+}
+
+/*****************************************************************************
+ * Devmem unsecure export functions *
+ *****************************************************************************/
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+
+static PVRSRV_ERROR
+_Mapping_Export(DEVMEM_IMPORT *psImport,
+ DEVMEM_EXPORTHANDLE *phPMRExportHandlePtr,
+ DEVMEM_EXPORTKEY *puiExportKeyPtr,
+ DEVMEM_SIZE_T *puiSize,
+ DEVMEM_LOG2ALIGN_T *puiLog2Contig)
+{
+ /* Gets an export handle and key for the PMR used for this mapping */
+ /* Can only be done if there are no suballocations for this mapping */
+
+ PVRSRV_ERROR eError;
+ DEVMEM_EXPORTHANDLE hPMRExportHandle;
+ DEVMEM_EXPORTKEY uiExportKey;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig;
+
+ if (psImport == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ if ((psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+ goto failParams;
+ }
+
+ eError = BridgePMRExportPMR(psImport->hDevConnection,
+ psImport->hPMR,
+ &hPMRExportHandle,
+ &uiSize,
+ &uiLog2Contig,
+ &uiExportKey);
+ if (eError != PVRSRV_OK)
+ {
+ goto failExport;
+ }
+
+ PVR_ASSERT(uiSize == psImport->uiSize);
+
+ *phPMRExportHandlePtr = hPMRExportHandle;
+ *puiExportKeyPtr = uiExportKey;
+ *puiSize = uiSize;
+ *puiLog2Contig = uiLog2Contig;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+failExport:
+failParams:
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+
+}
+
+static void
+_Mapping_Unexport(DEVMEM_IMPORT *psImport,
+ DEVMEM_EXPORTHANDLE hPMRExportHandle)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT (psImport != NULL);
+
+ eError = BridgePMRUnexportPMR(psImport->hDevConnection,
+ hPMRExportHandle);
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+ /* Caller to provide storage for export cookie struct */
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hPMRExportHandle = 0;
+ IMG_UINT64 uiPMRExportPassword = 0;
+ IMG_DEVMEM_SIZE_T uiSize = 0;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig = 0;
+
+ if (psMemDesc == NULL || psExportCookie == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ eError = _Mapping_Export(psMemDesc->psImport,
+ &hPMRExportHandle,
+ &uiPMRExportPassword,
+ &uiSize,
+ &uiLog2Contig);
+ if (eError != PVRSRV_OK)
+ {
+ psExportCookie->uiSize = 0;
+ goto e0;
+ }
+
+ psExportCookie->hPMRExportHandle = hPMRExportHandle;
+ psExportCookie->uiPMRExportPassword = uiPMRExportPassword;
+ psExportCookie->uiSize = uiSize;
+ psExportCookie->uiLog2ContiguityGuarantee = uiLog2Contig;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL void
+DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+ _Mapping_Unexport(psMemDesc->psImport,
+ psExportCookie->hPMRExportHandle);
+
+ psExportCookie->uiSize = 0;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemImport(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_EXPORTCOOKIE *psCookie,
+ DEVMEM_FLAGS_T uiFlags,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ DEVMEM_MEMDESC *psMemDesc = NULL;
+ DEVMEM_IMPORT *psImport;
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+
+ if (ppsMemDescPtr == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ eError =_DevmemMemDescAlloc(&psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMemDescAlloc;
+ }
+
+ eError = _DevmemImportStructAlloc(hDevConnection,
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto failImportAlloc;
+ }
+
+ /* Get a handle to the PMR (inc refcount) */
+ eError = BridgePMRImportPMR(hDevConnection,
+ psCookie->hPMRExportHandle,
+ psCookie->uiPMRExportPassword,
+ psCookie->uiSize, /* not trusted - just for sanity checks */
+ psCookie->uiLog2ContiguityGuarantee, /* not trusted - just for sanity checks */
+ &hPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto failImport;
+ }
+
+ _DevmemImportStructInit(psImport,
+ psCookie->uiSize,
+ 1ULL << psCookie->uiLog2ContiguityGuarantee,
+ uiFlags,
+ hPMR,
+ DEVMEM_PROPERTIES_IMPORTED |
+ DEVMEM_PROPERTIES_EXPORTABLE);
+
+ _DevmemMemDescInit(psMemDesc,
+ 0,
+ psImport,
+ psImport->uiSize);
+
+ *ppsMemDescPtr = psMemDesc;
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ /* Attach RI information */
+ eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ sizeof("^"),
+ "^",
+ psMemDesc->uiOffset,
+ psMemDesc->psImport->uiSize,
+ psMemDesc->psImport->uiSize,
+ IMG_TRUE,
+ IMG_FALSE,
+ &psMemDesc->hRIHandle);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+failImport:
+ _DevmemImportDiscard(psImport);
+failImportAlloc:
+ _DevmemMemDescDiscard(psMemDesc);
+failMemDescAlloc:
+failParams:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/*****************************************************************************
+ * Common MemDesc functions *
+ *****************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnpin(DEVMEM_MEMDESC *psMemDesc)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+
+ /* Stop if the allocation might have suballocations. */
+ if (!(psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: The passed allocation is not valid to unpin because "
+ "there might be suballocations on it. Make sure you allocate a page multiple "
+ "of the heap when using PVRSRVAllocDeviceMem()",
+ __FUNCTION__));
+
+ goto e_exit;
+ }
+
+ /* Stop if the Import is still mapped to CPU */
+ if (psImport->sCPUImport.ui32RefCount)
+ {
+ eError = PVRSRV_ERROR_STILL_MAPPED;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: There are still %u references on the CPU mapping. "
+ "Please remove all CPU mappings before unpinning.",
+ __FUNCTION__,
+ psImport->sCPUImport.ui32RefCount));
+
+ goto e_exit;
+ }
+
+ /* Only unpin if it is not already unpinned
+ * Return PVRSRV_OK */
+ if (psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+ {
+ goto e_exit;
+ }
+
+ /* Unpin it and invalidate mapping */
+ if (psImport->sDeviceImport.bMapped == IMG_TRUE)
+ {
+ eError = BridgeDevmemIntUnpinInvalidate(psImport->hDevConnection,
+ psImport->sDeviceImport.hMapping,
+ psImport->hPMR);
+ }
+ else
+ {
+ /* Or just unpin it */
+ eError = BridgeDevmemIntUnpin(psImport->hDevConnection,
+ psImport->hPMR);
+ }
+
+ /* Update flags and RI when call was successful */
+ if (eError == PVRSRV_OK)
+ {
+ psImport->uiProperties |= DEVMEM_PROPERTIES_UNPINNED;
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ if (psMemDesc->hRIHandle)
+ {
+ PVRSRV_ERROR eError2;
+
+ eError2 = BridgeRIUpdateMEMDESCPinning(psMemDesc->psImport->hDevConnection,
+ psMemDesc->hRIHandle,
+ IMG_FALSE);
+
+ if( eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCPinningKM failed (eError=%d)",
+ __func__,
+ eError));
+ }
+ }
+ }
+#endif
+ }
+ else
+ {
+ /* Or just show what went wrong */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Unpin aborted because of error %d",
+ __func__,
+ eError));
+ }
+
+e_exit:
+ return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPin(DEVMEM_MEMDESC *psMemDesc)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+
+ /* Only pin if it is unpinned */
+ if ((psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED) == 0)
+ {
+ goto e_exit;
+ }
+
+ /* Pin it and make mapping valid */
+ if (psImport->sDeviceImport.bMapped)
+ {
+ eError = BridgeDevmemIntPinValidate(psImport->hDevConnection,
+ psImport->sDeviceImport.hMapping,
+ psImport->hPMR);
+ }
+ else
+ {
+ /* Or just pin it */
+ eError = BridgeDevmemIntPin(psImport->hDevConnection,
+ psImport->hPMR);
+ }
+
+ if ( (eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_PMR_NEW_MEMORY) )
+ {
+ psImport->uiProperties &= ~DEVMEM_PROPERTIES_UNPINNED;
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ if (psMemDesc->hRIHandle)
+ {
+ PVRSRV_ERROR eError2;
+
+ eError2 = BridgeRIUpdateMEMDESCPinning(psMemDesc->psImport->hDevConnection,
+ psMemDesc->hRIHandle,
+ IMG_TRUE);
+
+ if( eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCPinningKM failed (eError=%d)",
+ __func__,
+ eError));
+ }
+ }
+ }
+#endif
+ }
+ else
+ {
+ /* Or just show what went wrong */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Pin aborted because of error %d",
+ __func__,
+ eError));
+ }
+
+e_exit:
+ return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, IMG_DEVMEM_SIZE_T* puiSize)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ *puiSize = psMemDesc->uiAllocSize;
+
+ return eError;
+}
+
+/*
+ This function is called for freeing any class of memory
+*/
+IMG_INTERNAL void
+DevmemFree(DEVMEM_MEMDESC *psMemDesc)
+{
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SECURE)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Please use methods dedicated to secure buffers.",
+ __func__));
+ return;
+ }
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ if (psMemDesc->hRIHandle)
+ {
+ PVRSRV_ERROR eError;
+
+ eError = BridgeRIDeleteMEMDESCEntry(psMemDesc->psImport->hDevConnection,
+ psMemDesc->hRIHandle);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIDeleteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+ }
+#endif /* if defined(PVR_RI_DEBUG) */
+ _DevmemMemDescRelease(psMemDesc);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+ DEVMEM_IMPORT *psImport;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bMap = IMG_TRUE;
+
+ /* Do not try to map unpinned memory */
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+ {
+ eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+ goto failFlags;
+ }
+
+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+ if (psHeap == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+ goto failCheck;
+ }
+
+ /* Don't map memory for deferred allocations */
+ if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+ {
+ PVR_ASSERT(psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
+ bMap = IMG_FALSE;
+ }
+
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sDeviceMemDesc.ui32RefCount,
+ psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+ psImport = psMemDesc->psImport;
+ _DevmemMemDescAcquire(psMemDesc);
+
+ eError = _DevmemImportStructDevMap(psHeap,
+ bMap,
+ psImport,
+ DEVICEMEM_UTILS_NO_ADDRESS);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+
+ sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+ sDevVAddr.uiAddr += psMemDesc->uiOffset;
+ psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr;
+ psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+ *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ static IMG_BOOL bHaveNewAPI = IMG_TRUE;
+ PVRSRV_ERROR eError;
+
+ if(bHaveNewAPI)
+ {
+ eError = BridgeDevicememHistoryMapNew(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset,
+ psMemDesc->sDeviceMemDesc.sDevVAddr,
+ psMemDesc->uiAllocSize,
+ psMemDesc->sTraceData.szText,
+ DevmemGetHeapLog2PageSize(psHeap),
+ psMemDesc->sTraceData.ui32AllocationIndex,
+ &psMemDesc->sTraceData.ui32AllocationIndex);
+
+ if(eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+ {
+ bHaveNewAPI = IMG_FALSE;
+ }
+ }
+
+ if(!bHaveNewAPI)
+ {
+ BridgeDevicememHistoryMap(psMemDesc->psImport->hDevConnection,
+ psMemDesc->sDeviceMemDesc.sDevVAddr,
+ psMemDesc->uiAllocSize,
+ psMemDesc->sTraceData.szText);
+ }
+ }
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ if (psMemDesc->hRIHandle)
+ {
+ eError = BridgeRIUpdateMEMDESCAddr(psImport->hDevConnection,
+ psMemDesc->hRIHandle,
+ psImport->sDeviceImport.sDevVAddr);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCAddr failed (eError=%d)", __func__, eError));
+ }
+ }
+ }
+#endif
+
+ return PVRSRV_OK;
+
+failMap:
+ _DevmemMemDescRelease(psMemDesc);
+failCheck:
+failParams:
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+failFlags:
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR sDevVirtAddr)
+{
+ DEVMEM_IMPORT *psImport;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bMap = IMG_TRUE;
+
+ /* Do not try to map unpinned memory */
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+ {
+ eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+ goto failFlags;
+ }
+
+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+ if (psHeap == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+ goto failCheck;
+ }
+
+ /* Don't map memory for deferred allocations */
+ if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+ {
+ PVR_ASSERT(psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
+ bMap = IMG_FALSE;
+ }
+
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sDeviceMemDesc.ui32RefCount,
+ psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+ psImport = psMemDesc->psImport;
+ _DevmemMemDescAcquire(psMemDesc);
+
+ eError = _DevmemImportStructDevMap(psHeap,
+ bMap,
+ psImport,
+ sDevVirtAddr.uiAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+
+ sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+ sDevVAddr.uiAddr += psMemDesc->uiOffset;
+ psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr;
+ psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ static IMG_BOOL bHaveNewAPI = IMG_TRUE;
+ PVRSRV_ERROR eError;
+
+ if(bHaveNewAPI)
+ {
+ eError = BridgeDevicememHistoryMapNew(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset,
+ psMemDesc->sDeviceMemDesc.sDevVAddr,
+ psMemDesc->uiAllocSize,
+ psMemDesc->sTraceData.szText,
+ DevmemGetHeapLog2PageSize(psHeap),
+ psMemDesc->sTraceData.ui32AllocationIndex,
+ &psMemDesc->sTraceData.ui32AllocationIndex);
+
+ if(eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+ {
+ bHaveNewAPI = IMG_FALSE;
+ }
+ }
+
+ if(!bHaveNewAPI)
+ {
+ BridgeDevicememHistoryMap(psMemDesc->psImport->hDevConnection,
+ psMemDesc->sDeviceMemDesc.sDevVAddr,
+ psMemDesc->uiAllocSize,
+ psMemDesc->sTraceData.szText);
+ }
+ }
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ if (psMemDesc->hRIHandle)
+ {
+ eError = BridgeRIUpdateMEMDESCAddr(psImport->hDevConnection,
+ psMemDesc->hRIHandle,
+ psImport->sDeviceImport.sDevVAddr);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCAddr failed (eError=%d)", __func__, eError));
+ }
+ }
+ }
+#endif
+
+ return PVRSRV_OK;
+
+failMap:
+ _DevmemMemDescRelease(psMemDesc);
+failCheck:
+failParams:
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+failFlags:
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+ PVRSRV_ERROR eError;
+
+ /* Do not try to map unpinned memory */
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+ {
+ eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+ goto failCheck;
+ }
+
+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sDeviceMemDesc.ui32RefCount,
+ psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+ if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_NO_MAPPING;
+ goto failRelease;
+ }
+ psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+ *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+ return PVRSRV_OK;
+
+failRelease:
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+failCheck:
+ return eError;
+}
+
+IMG_INTERNAL void
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+ PVR_ASSERT(psMemDesc != NULL);
+
+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sDeviceMemDesc.ui32RefCount,
+ psMemDesc->sDeviceMemDesc.ui32RefCount-1);
+
+ PVR_ASSERT(psMemDesc->sDeviceMemDesc.ui32RefCount != 0);
+
+ if (--psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+ {
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ static IMG_BOOL bHaveNewAPI = IMG_TRUE;
+ PVRSRV_ERROR eError;
+
+ if(bHaveNewAPI)
+ {
+ eError = BridgeDevicememHistoryUnmapNew(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset,
+ psMemDesc->sDeviceMemDesc.sDevVAddr,
+ psMemDesc->uiAllocSize,
+ psMemDesc->sTraceData.szText,
+ DevmemGetHeapLog2PageSize(psMemDesc->psImport->sDeviceImport.psHeap),
+ psMemDesc->sTraceData.ui32AllocationIndex,
+ &psMemDesc->sTraceData.ui32AllocationIndex);
+
+ if(eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+ {
+ bHaveNewAPI = IMG_FALSE;
+ }
+ }
+
+ if(!bHaveNewAPI)
+ {
+ BridgeDevicememHistoryUnmap(psMemDesc->psImport->hDevConnection,
+ psMemDesc->sDeviceMemDesc.sDevVAddr,
+ psMemDesc->uiAllocSize,
+ psMemDesc->sTraceData.szText);
+ }
+ }
+#endif
+ _DevmemImportStructDevUnmap(psMemDesc->psImport);
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+ _DevmemMemDescRelease(psMemDesc);
+ }
+ else
+ {
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+ }
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+ void **ppvCpuVirtAddr)
+{
+ PVRSRV_ERROR eError;
+
+ if ( psMemDesc->psImport->uiProperties &
+ (DEVMEM_PROPERTIES_UNPINNED | DEVMEM_PROPERTIES_SECURE) )
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Allocation is currently unpinned or a secure buffer. "
+ "Not possible to map to CPU!",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+ goto failFlags;
+ }
+
+ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sCPUMemDesc.ui32RefCount,
+ psMemDesc->sCPUMemDesc.ui32RefCount+1);
+
+ if (psMemDesc->sCPUMemDesc.ui32RefCount++ == 0)
+ {
+ DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+ IMG_UINT8 *pui8CPUVAddr;
+
+ _DevmemMemDescAcquire(psMemDesc);
+ eError = _DevmemImportStructCPUMap(psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+
+ pui8CPUVAddr = psImport->sCPUImport.pvCPUVAddr;
+ pui8CPUVAddr += psMemDesc->uiOffset;
+ psMemDesc->sCPUMemDesc.pvCPUVAddr = pui8CPUVAddr;
+ }
+ *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr;
+
+ VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize);
+
+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+
+ return PVRSRV_OK;
+
+failMap:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ psMemDesc->sCPUMemDesc.ui32RefCount--;
+ _DevmemMemDescRelease(psMemDesc);
+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+failFlags:
+ return eError;
+}
+
+IMG_INTERNAL void
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+ PVR_ASSERT(psMemDesc != NULL);
+
+ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sCPUMemDesc.ui32RefCount,
+ psMemDesc->sCPUMemDesc.ui32RefCount-1);
+
+ PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount != 0);
+
+ if (--psMemDesc->sCPUMemDesc.ui32RefCount == 0)
+ {
+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+ _DevmemImportStructCPUUnmap(psMemDesc->psImport);
+ _DevmemMemDescRelease(psMemDesc);
+ }
+ else
+ {
+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+ }
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *phImport)
+{
+ if ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+ }
+
+ *phImport = psMemDesc->psImport->hPMR;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+ IMG_UINT64 *pui64UID)
+{
+ DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+ PVRSRV_ERROR eError;
+
+ eError = BridgePMRGetUID(psImport->hDevConnection,
+ psImport->hPMR,
+ pui64UID);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *hReservation)
+{
+ DEVMEM_IMPORT *psImport;
+
+ PVR_ASSERT(psMemDesc);
+ psImport = psMemDesc->psImport;
+
+ PVR_ASSERT(psImport);
+ *hReservation = psImport->sDeviceImport.hReservation;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *phPMR,
+ IMG_DEVMEM_OFFSET_T *puiPMROffset)
+{
+ DEVMEM_IMPORT *psImport;
+
+ PVR_ASSERT(psMemDesc);
+ *puiPMROffset = psMemDesc->uiOffset;
+ psImport = psMemDesc->psImport;
+
+ PVR_ASSERT(psImport);
+ *phPMR = psImport->hPMR;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_FLAGS_T *puiFlags)
+{
+ DEVMEM_IMPORT *psImport;
+
+ PVR_ASSERT(psMemDesc);
+ psImport = psMemDesc->psImport;
+
+ PVR_ASSERT(psImport);
+ *puiFlags = psImport->uiFlags;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL IMG_HANDLE
+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc)
+{
+ return psMemDesc->psImport->hDevConnection;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalImport(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtHandle,
+ DEVMEM_FLAGS_T uiFlags,
+ DEVMEM_MEMDESC **ppsMemDescPtr,
+ IMG_DEVMEM_SIZE_T *puiSizePtr,
+ const IMG_CHAR *pszAnnotation)
+{
+ DEVMEM_MEMDESC *psMemDesc = NULL;
+ DEVMEM_IMPORT *psImport;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_ALIGN_T uiAlign;
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+
+ if (ppsMemDescPtr == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ eError =_DevmemMemDescAlloc(&psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMemDescAlloc;
+ }
+
+ eError = _DevmemImportStructAlloc(hBridge,
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto failImportAlloc;
+ }
+
+ /* Get the PMR handle and its size from the server */
+ eError = BridgePMRLocalImportPMR(hBridge,
+ hExtHandle,
+ &hPMR,
+ &uiSize,
+ &uiAlign);
+ if (eError != PVRSRV_OK)
+ {
+ goto failImport;
+ }
+
+ _DevmemImportStructInit(psImport,
+ uiSize,
+ uiAlign,
+ uiFlags,
+ hPMR,
+ DEVMEM_PROPERTIES_IMPORTED |
+ DEVMEM_PROPERTIES_EXPORTABLE);
+
+ _DevmemMemDescInit(psMemDesc,
+ 0,
+ psImport,
+ uiSize);
+
+ *ppsMemDescPtr = psMemDesc;
+ if (puiSizePtr)
+ *puiSizePtr = uiSize;
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ /* Attach RI information.
+ * Set backed size to 0 since this allocation has been allocated
+ * by the same process and has been accounted for. */
+ eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ sizeof("^"),
+ "^",
+ psMemDesc->uiOffset,
+ psMemDesc->psImport->uiSize,
+ 0,
+ IMG_TRUE,
+ IMG_FALSE,
+ &(psMemDesc->hRIHandle));
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+#endif /* if defined(PVR_RI_DEBUG) */
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+ * the allocation gets mapped/unmapped
+ */
+ OSStringNCopy(psMemDesc->sTraceData.szText, pszAnnotation, sizeof(psMemDesc->sTraceData.szText) - 1);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+#endif
+
+ return PVRSRV_OK;
+
+failImport:
+ _DevmemImportDiscard(psImport);
+failImportAlloc:
+ _DevmemMemDescDiscard(psMemDesc);
+failMemDescAlloc:
+failParams:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+ IMG_DEV_VIRTADDR sDevVAddr)
+{
+ return BridgeDevmemIsVDevAddrValid(psContext->hDevConnection,
+ psContext->hDevMemServerContext,
+ sDevVAddr);
+}
+
+IMG_INTERNAL IMG_UINT32
+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap)
+{
+ return psHeap->uiLog2Quantum;
+}
+
+IMG_INTERNAL IMG_UINT32
+DevmemGetHeapTilingProperties(DEVMEM_HEAP *psHeap,
+ IMG_UINT32 *puiLog2ImportAlignment,
+ IMG_UINT32 *puiLog2TilingStrideFactor)
+{
+ *puiLog2ImportAlignment = psHeap->uiLog2ImportAlignment;
+ *puiLog2TilingStrideFactor = psHeap->uiLog2TilingStrideFactor;
+ return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function RegisterDevMemPFNotify
+@Description Registers that the application wants to be signaled when a page
+ fault occurs.
+
+@Input psContext Memory context the process that would like to
+ be notified about.
+@Input ui32PID The PID of the calling process.
+@Input bRegister If true, register. If false, de-register.
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
+ IMG_UINT32 ui32PID,
+ IMG_BOOL bRegister)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeDevmemIntRegisterPFNotifyKM(psContext->hDevConnection,
+ psContext->hDevMemServerContext,
+ ui32PID,
+ bRegister);
+ if (eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Bridge Call Failed: This could suggest a UM/KM miss-match (%d)",
+ __func__,
+ (IMG_INT)(eError)));
+ }
+
+ return eError;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management core internal
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services internal interface to core device memory management
+ functions that are shared between client and server code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SRVCLIENT_DEVICEMEM_H
+#define SRVCLIENT_DEVICEMEM_H
+
+/********************************************************************************
+ * *
+ * +------------+ +------------+ +--------------+ +--------------+ *
+ * | a sub- | | a sub- | | an | | allocation | *
+ * | allocation | | allocation | | allocation | | also mapped | *
+ * | | | | | in proc 1 | | into proc 2 | *
+ * +------------+ +------------+ +--------------+ +--------------+ *
+ * | | | | *
+ * +--------------+ +--------------+ +--------------+ *
+ * | page gran- | | page gran- | | page gran- | *
+ * | ular mapping | | ular mapping | | ular mapping | *
+ * +--------------+ +--------------+ +--------------+ *
+ * | | | *
+ * | | | *
+ * | | | *
+ * +--------------+ +--------------+ *
+ * | | | | *
+ * | A "P.M.R." | | A "P.M.R." | *
+ * | | | | *
+ * +--------------+ +--------------+ *
+ * *
+ ********************************************************************************/
+
+/*
+ All device memory allocations are ultimately a view upon (not
+ necessarily the whole of) a "PMR".
+
+ A PMR is a "Physical Memory Resource", which may be a
+ "pre-faulted" lump of physical memory, or it may be a
+ representation of some physical memory that will be instantiated
+ at some future time.
+
+ PMRs always represent multiple of some power-of-2 "contiguity"
+ promised by the PMR, which will allow them to be mapped in whole
+ pages into the device MMU. As memory allocations may be smaller
+ than a page, these mappings may be suballocated and thus shared
+ between multiple allocations in one process. A PMR may also be
+ mapped simultaneously into multiple device memory contexts
+ (cross-process scenario), however, for security reasons, it is not
+ legal to share a PMR "both ways" at once, that is, mapped into
+ multiple processes and divided up amongst several suballocations.
+
+ This PMR terminology is introduced here for background
+ information, but is generally of little concern to the caller of
+ this API. This API handles suballocations and mappings, and the
+ caller thus deals primarily with MEMORY DESCRIPTORS representing
+ an allocation or suballocation, HEAPS representing ranges of
+ virtual addresses in a CONTEXT.
+*/
+
+/*
+ |<---------------------------context------------------------------>|
+ |<-------heap------->| |<-------heap------->|<-------heap------->|
+ |<-alloc->| | |<-alloc->|<-alloc->|| |<-alloc->| |
+*/
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "pdump.h"
+
+#include "device_connection.h"
+
+
+typedef IMG_UINT32 DEVMEM_HEAPCFGID;
+#define DEVMEM_HEAPCFG_FORCLIENTS 0
+#define DEVMEM_HEAPCFG_META 1
+
+
+
+
+
+/*
+ In order to call the server side functions, we need a bridge handle.
+ We abstract that here, as we may wish to change its form.
+ */
+
+typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE;
+
+/**************************************************************************/ /*!
+@Function DevmemUnpin
+@Description This is the counterpart to DevmemPin(). It is meant to be
+ called before repinning an allocation.
+
+ For a detailed description see client API documentation.
+
+@Input phMemDesc The MemDesc that is going to be unpinned.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
+ registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnpin(DEVMEM_MEMDESC *psMemDesc);
+
+/**************************************************************************/ /*!
+@Function DevmemPin
+@Description This is the counterpart to DevmemUnpin(). It is meant to be
+ called after unpinning an allocation.
+
+ For a detailed description see client API documentation.
+
+@Input phMemDesc The MemDesc that is going to be pinned.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
+ was successfully restored.
+
+ PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+ could not be restored and new physical memory
+ was allocated.
+
+ A different error otherwise.
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPin(DEVMEM_MEMDESC *psMemDesc);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapInt(DEVMEM_HEAP *psHeap,
+ IMG_HANDLE *phDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_SIZE_T* puiSize);
+
+/*
+ * DevmemCreateContext()
+ *
+ * Create a device memory context
+ *
+ * This must be called before any heap is created in this context
+ *
+ * Caller to provide bridge handle which will be squirreled away
+ * internally and used for all future operations on items from this
+ * memory context. Caller also to provide devicenode handle, as this
+ * is used for MMU configuration and also to determine the heap
+ * configuration for the auto-instantiated heaps.
+ *
+ * Note that when compiled in services/server, the hBridge is not used
+ * and is thrown away by the "fake" direct bridge. (This may change.
+ * It is recommended that NULL be passed for the handle for now)
+ *
+ * hDeviceNode and uiHeapBlueprintID shall together dictate which
+ * heap-config to use.
+ *
+ * This will cause the server side counterpart to be created also.
+ *
+ * If you call DevmemCreateContext() (and the call succeeds) you
+ * are promising that you will later call Devmem_ContextDestroy(),
+ * except for abnormal process termination in which case it is
+ * expected it will be destroyed as part of handle clean up.
+ *
+ * Caller to provide storage for the pointer to the NEWDEVMEM_CONTEXT
+ * object thusly created.
+ */
+extern PVRSRV_ERROR
+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_HEAPCFGID uiHeapBlueprintID,
+ DEVMEM_CONTEXT **ppsCtxPtr);
+
+/*
+ * DevmemAcquireDevPrivData()
+ *
+ * Acquire the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+ IMG_HANDLE *hPrivData);
+
+/*
+ * DevmemReleaseDevPrivData()
+ *
+ * Release the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemDestroyContext()
+ *
+ * Undoes that done by DevmemCreateContext()
+ */
+extern PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemCreateHeap()
+ *
+ * Create a heap in the given context.
+ *
+ * N.B. Not intended to be called directly, though it can be.
+ * Normally, heaps are instantiated at context creation time according
+ * to the specified blueprint. See DevmemCreateContext() for details.
+ *
+ * This will cause MMU code to set up data structures for the heap,
+ * but may not cause page tables to be modified until allocations are
+ * made from the heap.
+ *
+ * The "Quantum" is both the device MMU page size to be configured for
+ * this heap, and the unit multiples of which "quantized" allocations
+ * are made (allocations smaller than this, known as "suballocations"
+ * will be made from a "sub alloc RA" and will "import" chunks
+ * according to this quantum)
+ *
+ * Where imported PMRs (or, for example, PMRs created by device class
+ * buffers) are mapped into this heap, it is important that the
+ * physical contiguity guarantee offered by the PMR is greater than or
+ * equal to the quantum size specified here, otherwise the attempt to
+ * map it will fail. "Normal" allocations via Devmem_Allocate
+ * shall automatically meet this requirement, as each "import" will
+ * trigger the creation of a PMR with the desired contiguity. The
+ * supported quantum sizes in that case shall be dictated by the OS
+ * specific implementation of PhysmemNewOSRamBackedPMR() (see)
+ */
+extern PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtxPtr,
+ /* base and length of heap */
+ IMG_DEV_VIRTADDR sBaseAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ /* log2 of allocation quantum, i.e. "page" size.
+ All allocations (that go to server side) are
+ multiples of this. We use a client-side RA to
+ make sub-allocations from this */
+ IMG_UINT32 ui32Log2Quantum,
+ /* The minimum import alignment for this heap */
+ IMG_UINT32 ui32Log2ImportAlignment,
+ /* (For tiling heaps) the factor to use to convert
+ alignment to optimum buffer stride */
+ IMG_UINT32 ui32Log2TilingStrideFactor,
+ /* Name of heap for debug */
+ /* N.B. Okay to exist on caller's stack - this
+ func takes a copy if it needs it. */
+ const IMG_CHAR *pszName,
+ DEVMEM_HEAPCFGID uiHeapBlueprintID,
+ DEVMEM_HEAP **ppsHeapPtr);
+/*
+ * DevmemDestroyHeap()
+ *
+ * Reverses DevmemCreateHeap()
+ *
+ * N.B. All allocations must have been freed and all mappings must
+ * have been unmapped before invoking this call
+ */
+extern PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap);
+
+/*
+ * DevmemExportalignAdjustSizeAndAlign()
+ * Compute the Size and Align passed to avoid suballocations (used when allocation with PVRSRV_MEMALLOCFLAG_EXPORTALIGN)
+ */
+IMG_INTERNAL void
+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * DevmemSubAllocate()
+ *
+ * Makes an allocation (possibly a "suballocation", as described
+ * below) of device virtual memory from this heap.
+ *
+ * The size and alignment of the allocation will be honoured by the RA
+ * that allocates the "suballocation". The resulting allocation will
+ * be mapped into GPU virtual memory and the physical memory to back
+ * it will exist, by the time this call successfully completes.
+ *
+ * The size must be a positive integer multiple of the alignment.
+ * (i.e. the aligment specifies the alignment of both the start and
+ * the end of the resulting allocation.)
+ *
+ * Allocations made via this API are routed though a "suballocation
+ * RA" which is responsible for ensuring that small allocations can be
+ * made without wasting physical memory in the server. Furthermore,
+ * such suballocations can be made entirely client side without
+ * needing to go to the server unless the allocation spills into a new
+ * page.
+ *
+ * Such suballocations cause many allocations to share the same "PMR".
+ * This happens only when the flags match exactly.
+ *
+ */
+
+PVRSRV_ERROR
+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr);
+
+#define DevmemAllocate(...) \
+ DevmemSubAllocate(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__)
+
+PVRSRV_ERROR
+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_UINT32 uiLog2HeapPageSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr);
+
+PVRSRV_ERROR
+DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *paui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pauiFreePageIndices,
+ SPARSE_MEM_RESIZE_FLAGS uiFlags);
+
+PVRSRV_ERROR
+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_UINT32 uiLog2HeapPageSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr);
+
+/*
+ * DevmemFree()
+ *
+ * Reverses that done by DevmemSubAllocate() N.B. The underlying
+ * mapping and server side allocation _may_ not be torn down, for
+ * example, if the allocation has been exported, or if multiple
+ * allocations were suballocated from the same mapping, but this is
+ * properly refcounted, so the caller does not have to care.
+ */
+
+extern void
+DevmemFree(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+ DevmemMapToDevice:
+
+ Map an allocation to the device it was allocated from.
+ This function _must_ be called before any call to
+ DevmemAcquireDevVirtAddr is made as it binds the allocation
+ to the heap.
+ DevmemReleaseDevVirtAddr is used to release the reference
+ to the device mapping this function created, but it doesn't
+ mean that the memory will actually be unmapped from the
+ device as other references to the mapping obtained via
+ DevmemAcquireDevVirtAddr could still be active.
+*/
+PVRSRV_ERROR DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR *psDevVirtAddr);
+
+/*
+ DevmemMapToDeviceAddress:
+
+ Same as DevmemMapToDevice but the caller chooses the address
+ to map to.
+*/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR sDevVirtAddr);
+
+/*
+ DevmemAcquireDevVirtAddr
+
+ Acquire the MemDesc's device virtual address.
+ This function _must_ be called after DevmemMapToDevice
+ and is expected to be used be functions which didn't allocate
+ the MemDesc but need to know it's address
+ */
+PVRSRV_ERROR DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEV_VIRTADDR *psDevVirtAddrRet);
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the device virtual address that was
+ * acquired by "Acquire" or "MapToDevice"
+ */
+extern void
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+ * DevmemAcquireCpuVirtAddr()
+ *
+ * Acquires a license to use the cpu virtual address of this mapping.
+ * Note that the memory may not have been mapped into cpu virtual
+ * memory prior to this call. On first "acquire" the memory will be
+ * mapped in (if it wasn't statically mapped in) and on last put it
+ * _may_ become unmapped. Later calling "Acquire" again, _may_ cause
+ * the memory to be mapped at a different address.
+ */
+PVRSRV_ERROR DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+ void **ppvCpuVirtAddr);
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the cpu virtual address that was granted
+ * with the "Get" call.
+ */
+extern void
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+/*
+ * DevmemExport()
+ *
+ * Given a memory allocation allocated with DevmemAllocateExportable()
+ * create a "cookie" that can be passed intact by the caller's own choice
+ * of secure IPC to another process and used as the argument to "map"
+ * to map this memory into a heap in the target processes. N.B. This can
+ * also be used to map into multiple heaps in one process, though that's not
+ * the intention.
+ *
+ * Note, the caller must later call Unexport before freeing the
+ * memory.
+ */
+PVRSRV_ERROR DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+
+void DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+PVRSRV_ERROR
+DevmemImport(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_EXPORTCOOKIE *psCookie,
+ DEVMEM_FLAGS_T uiFlags,
+ DEVMEM_MEMDESC **ppsMemDescPtr);
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/*
+ * DevmemMakeLocalImportHandle()
+ *
+ * This is a "special case" function for making a server export cookie
+ * which went through the direct bridge into an export cookie that can
+ * be passed through the client bridge.
+ */
+PVRSRV_ERROR
+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_HANDLE hServerExport,
+ IMG_HANDLE *hClientExport);
+
+/*
+ * DevmemUnmakeLocalImportHandle()
+ *
+ * Free any resource associated with the Make operation
+ */
+PVRSRV_ERROR
+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_HANDLE hClientExport);
+
+/*
+ *
+ * The following set of functions is specific to the heap "blueprint"
+ * stuff, for automatic creation of heaps when a context is created
+ *
+ */
+
+
+/* Devmem_HeapConfigCount: returns the number of heap configs that
+ this device has. Note that there is no acquire/release semantics
+ required, as this data is guaranteed to be constant for the
+ lifetime of the device node */
+extern PVRSRV_ERROR
+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 *puiNumHeapConfigsOut);
+
+/* Devmem_HeapCount: returns the number of heaps that a given heap
+ config on this device has. Note that there is no acquire/release
+ semantics required, as this data is guaranteed to be constant for
+ the lifetime of the device node */
+extern PVRSRV_ERROR
+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 *puiNumHeapsOut);
+/* Devmem_HeapConfigName: return the name of the given heap config.
+ The caller is to provide the storage for the returned string and
+ indicate the number of bytes (including null terminator) for such
+ string in the BufSz arg. Note that there is no acquire/release
+ semantics required, as this data is guaranteed to be constant for
+ the lifetime of the device node.
+ */
+extern PVRSRV_ERROR
+DevmemHeapConfigName(SHARED_DEV_CONNECTION hsDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_CHAR *pszConfigNameOut,
+ IMG_UINT32 uiConfigNameBufSz);
+
+/* Devmem_HeapDetails: fetches all the metadata that is recorded in
+ this heap "blueprint". Namely: heap name (caller to provide
+ storage, and indicate buffer size (including null terminator) in
+ BufSz arg), device virtual address and length, log2 of data page
+ size (will be one of 12, 14, 16, 18, 20, 21, at time of writing).
+ Note that there is no acquire/release semantics required, as this
+ data is guaranteed to be constant for the lifetime of the device
+ node. */
+extern PVRSRV_ERROR
+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
+ IMG_CHAR *pszHeapNameOut,
+ IMG_UINT32 uiHeapNameBufSz,
+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+ IMG_UINT32 *puiLog2DataPageSize,
+ IMG_UINT32 *puiLog2ImportAlignmentOut,
+ IMG_UINT32 *puiLog2TilingStrideFactor);
+
+/*
+ * Devmem_FindHeapByName()
+ *
+ * returns the heap handle for the named _automagic_ heap in this
+ * context. "automagic" heaps are those that are born with the
+ * context from a blueprint
+ */
+extern PVRSRV_ERROR
+DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx,
+ const IMG_CHAR *pszHeapName,
+ DEVMEM_HEAP **ppsHeapRet);
+
+/*
+ * DevmemGetHeapBaseDevVAddr()
+ *
+ * returns the device virtual address of the base of the heap.
+ */
+
+PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR *pDevVAddr);
+
+extern PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *phImport);
+
+extern PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+ IMG_UINT64 *pui64UID);
+
+PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *hPMR,
+ IMG_DEVMEM_OFFSET_T *puiPMROffset);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_FLAGS_T *puiFlags);
+
+IMG_INTERNAL IMG_HANDLE
+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc);
+
+PVRSRV_ERROR
+DevmemLocalImport(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtHandle,
+ DEVMEM_FLAGS_T uiFlags,
+ DEVMEM_MEMDESC **ppsMemDescPtr,
+ IMG_DEVMEM_SIZE_T *puiSizePtr,
+ const IMG_CHAR *pszAnnotation);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+ IMG_DEV_VIRTADDR sDevVAddr);
+
+/* DevmemGetHeapLog2PageSize()
+ *
+ * Get the page size used for a certain heap.
+ */
+IMG_UINT32
+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap);
+
+/* DevmemGetHeapTilingProperties()
+ *
+ * Get the import alignment and tiling stride factor used for a certain heap.
+ */
+IMG_UINT32
+DevmemGetHeapTilingProperties(DEVMEM_HEAP *psHeap,
+ IMG_UINT32 *puiLog2ImportAlignment,
+ IMG_UINT32 *puiLog2TilingStrideFactor);
+
+/**************************************************************************/ /*!
+@Function RegisterDevMemPFNotify
+@Description Registers that the application wants to be signaled when a page
+ fault occurs.
+
+@Input psContext Memory context the process that would like to
+ be notified about.
+@Input ui32PID The PID of the calling process.
+@Input bRegister If true, register. If false, de-register.
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
+ IMG_UINT32 ui32PID,
+ IMG_BOOL bRegister);
+
+#endif /* #ifndef SRVCLIENT_DEVICEMEM_CLIENT_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File devicemem_heapcfg.c
+@Title Temporary Device Memory 2 stuff
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device memory management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* our exported API */
+#include "devicemem_heapcfg.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "connection_server.h"
+
+PVRSRV_ERROR
+HeapCfgHeapConfigCount(CONNECTION_DATA * psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *puiNumHeapConfigsOut
+)
+{
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ *puiNumHeapConfigsOut = psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapCount(CONNECTION_DATA * psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 *puiNumHeapsOut
+)
+{
+ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+ }
+
+ *puiNumHeapsOut = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapConfigName(CONNECTION_DATA * psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapConfigNameBufSz,
+ IMG_CHAR *pszHeapConfigNameOut
+)
+{
+ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+ }
+
+ OSSNPrintf(pszHeapConfigNameOut, uiHeapConfigNameBufSz, "%s", psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].pszName);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapDetails(CONNECTION_DATA * psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
+ IMG_UINT32 uiHeapNameBufSz,
+ IMG_CHAR *pszHeapNameOut,
+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+ IMG_UINT32 *puiLog2DataPageSizeOut,
+ IMG_UINT32 *puiLog2ImportAlignmentOut
+)
+{
+ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint;
+
+ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+ }
+
+ if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+ }
+
+ psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex];
+
+ OSSNPrintf(pszHeapNameOut, uiHeapNameBufSz, "%s", psHeapBlueprint->pszName);
+ *psDevVAddrBaseOut = psHeapBlueprint->sHeapBaseAddr;
+ *puiHeapLengthOut = psHeapBlueprint->uiHeapLength;
+ *puiLog2DataPageSizeOut = psHeapBlueprint->uiLog2DataPageSize;
+ *puiLog2ImportAlignmentOut = psHeapBlueprint->uiLog2ImportAlignment;
+
+ /* REL/1.8 maintain bridge compatibility
+ * 4:0 - uiLog2ImportAlignment (13--20)
+ * 18:16 - uiLog2TilingStrideFactor (3--4)
+ */
+ *puiLog2ImportAlignmentOut |= (psHeapBlueprint->uiLog2TilingStrideFactor << 16);
+
+ return PVRSRV_OK;
+}
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Temporary Device Memory 2 stuff
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device memory management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICEMEMHEAPCFG_H__
+#define __DEVICEMEMHEAPCFG_H__
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+/* FIXME: Find a better way of defining _PVRSRV_DEVICE_NODE_ */
+struct _PVRSRV_DEVICE_NODE_;
+/* FIXME: Find a better way of defining _CONNECTION_DATA_ */
+struct _CONNECTION_DATA_;
+
+
+/*
+ A "heap config" is a blueprint to be used for initial setting up of
+ heaps when a device memory context is created.
+
+ We define a data structure to define this, but it's really down to
+ the caller to populate it. This is all expected to be in-kernel.
+ We provide an API that client code can use to enquire about the
+ blueprint, such that it may do the heap setup during the context
+ creation call on behalf of the user */
+
+/* blueprint for a single heap */
+typedef struct _DEVMEM_HEAP_BLUEPRINT_
+{
+ /* Name of this heap - for debug purposes, and perhaps for lookup
+ by name? */
+ const IMG_CHAR *pszName;
+
+ /* Virtual address of the beginning of the heap. This _must_ be a
+ multiple of the data page size for the heap. It is
+ _recommended_ that it be coarser than that - especially, it
+ should begin on a boundary appropriate to the MMU for the
+ device. For Rogue, this is a Page Directory boundary, or 1GB
+ (virtual address a multiple of 0x0040000000). */
+ IMG_DEV_VIRTADDR sHeapBaseAddr;
+
+ /* Length of the heap. Given that the END address of the heap has
+ a similar restriction to that of the _beginning_ of the heap.
+ That is the heap length _must_ be a whole number of data pages.
+ Again, the recommendation is that it ends on a 1GB boundary.
+ Again, this is not essential, but we do know that (at the time
+ of writing) the current implementation of mmu_common.c is such
+ that no two heaps may share a page directory, thus the
+ remaining virtual space would be wasted if the length were not
+ a multiple of 1GB */
+ IMG_DEVMEM_SIZE_T uiHeapLength;
+
+ /* Data page size. This is the page size that is going to get
+ programmed into the MMU, so it needs to be a valid one for the
+ device. Importantly, the start address and length _must_ be
+ multiples of this page size. Note that the page size is
+ specified as the log 2 relative to 1 byte (e.g. 12 indicates
+ 4kB) */
+ IMG_UINT32 uiLog2DataPageSize;
+
+ /* Import alignment. Force imports to this heap to be
+ aligned to at least this value */
+ IMG_UINT32 uiLog2ImportAlignment;
+
+ /* Tiled heaps have an optimum byte-stride, this can be derived from
+ the heap alignment and tiling mode. This is abstracted here such that
+ Log2ByteStride = Log2Alignment - Log2TilingStrideFactor */
+ IMG_UINT32 uiLog2TilingStrideFactor;
+} DEVMEM_HEAP_BLUEPRINT;
+
+/* entire named heap config */
+typedef struct _DEVMEM_HEAP_CONFIG_
+{
+ /* Name of this heap config - for debug and maybe lookup */
+ const IMG_CHAR *pszName;
+
+ /* Number of heaps in this config */
+ IMG_UINT32 uiNumHeaps;
+
+ /* Array of individual heap blueprints as defined above */
+ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprintArray;
+} DEVMEM_HEAP_CONFIG;
+
+
+extern PVRSRV_ERROR
+HeapCfgHeapConfigCount(struct _CONNECTION_DATA_ * psConnection,
+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ IMG_UINT32 *puiNumHeapConfigsOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapCount(struct _CONNECTION_DATA_ * psConnection,
+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 *puiNumHeapsOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapConfigName(struct _CONNECTION_DATA_ * psConnection,
+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapConfigNameBufSz,
+ IMG_CHAR *pszHeapConfigNameOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapDetails(struct _CONNECTION_DATA_ * psConnection,
+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
+ IMG_UINT32 uiHeapNameBufSz,
+ IMG_CHAR *pszHeapNameOut,
+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+ IMG_UINT32 *puiLog2DataPageSizeOut,
+ IMG_UINT32 *puiLog2ImportAlignmentOut
+);
+
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Devicemem history functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Devicemem history functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "devicemem_server.h"
+#include "lock.h"
+#include "devicemem_history_server.h"
+#include "pdump_km.h"
+
+#define ALLOCATION_LIST_NUM_ENTRIES 10000
+
+/* data type to hold an allocation index.
+ * we make it 16 bits wide if possible
+ */
+#if ALLOCATION_LIST_NUM_ENTRIES <= 0xFFFF
+typedef uint16_t ALLOC_INDEX_T;
+#else
+typedef uint32_t ALLOC_INDEX_T;
+#endif
+
+/* a record describing a single allocation known to DeviceMemHistory.
+ * this is an element in a doubly linked list of allocations
+ */
+typedef struct _RECORD_ALLOCATION_
+{
+ /* time when this RECORD_ALLOCATION was created/initialised */
+ IMG_UINT64 ui64CreationTime;
+ /* serial number of the PMR relating to this allocation */
+ IMG_UINT64 ui64Serial;
+ /* base DevVAddr of this allocation */
+ IMG_DEV_VIRTADDR sDevVAddr;
+ /* size in bytes of this allocation */
+ IMG_DEVMEM_SIZE_T uiSize;
+ /* Log2 page size of this allocation's GPU pages */
+ IMG_UINT32 ui32Log2PageSize;
+ /* Process ID (PID) this allocation belongs to */
+ IMG_PID uiPID;
+ /* index of previous allocation in the list */
+ ALLOC_INDEX_T ui32Prev;
+ /* index of next allocation in the list */
+ ALLOC_INDEX_T ui32Next;
+ /* annotation/name of this allocation */
+ IMG_CHAR szName[DEVICEMEM_HISTORY_TEXT_BUFSZ];
+} RECORD_ALLOCATION;
+
+/* each command in the circular buffer is prefixed with an 8-bit value
+ * denoting the command type
+ */
+typedef enum _COMMAND_TYPE_
+{
+ COMMAND_TYPE_NONE,
+ COMMAND_TYPE_TIMESTAMP,
+ COMMAND_TYPE_MAP_ALL,
+ COMMAND_TYPE_UNMAP_ALL,
+ COMMAND_TYPE_MAP_RANGE,
+ COMMAND_TYPE_UNMAP_RANGE,
+ /* sentinel value */
+ COMMAND_TYPE_COUNT,
+} COMMAND_TYPE;
+
+/* Timestamp command:
+ * This command is inserted into the circular buffer to provide an updated
+ * timestamp.
+ * The nanosecond-accuracy timestamp is packed into a 56-bit integer, in order
+ * for the whole command to fit into 8 bytes.
+ */
+typedef struct _COMMAND_TIMESTAMP_
+{
+ IMG_UINT8 aui8TimeNs[7];
+} COMMAND_TIMESTAMP;
+
+/* MAP_ALL command:
+ * This command denotes the allocation at the given index was wholly mapped
+ * in to the GPU MMU
+ */
+typedef struct _COMMAND_MAP_ALL_
+{
+ ALLOC_INDEX_T uiAllocIndex;
+} COMMAND_MAP_ALL;
+
+/* UNMAP_ALL command:
+ * This command denotes the allocation at the given index was wholly unmapped
+ * from the GPU MMU
+ * Note: COMMAND_MAP_ALL and COMMAND_UNMAP_ALL commands have the same layout.
+ */
+typedef COMMAND_MAP_ALL COMMAND_UNMAP_ALL;
+
+/* packing attributes for the MAP_RANGE command */
+#define MAP_RANGE_MAX_START ((1 << 18) - 1)
+#define MAP_RANGE_MAX_RANGE ((1 << 12) - 1)
+
+/* MAP_RANGE command:
+ * Denotes a range of pages within the given allocation being mapped.
+ * The range is expressed as [Page Index] + [Page Count]
+ * This information is packed into a 40-bit integer, in order to make
+ * the command size 8 bytes.
+ */
+
+typedef struct _COMMAND_MAP_RANGE_
+{
+ IMG_UINT8 aui8Data[5];
+ ALLOC_INDEX_T uiAllocIndex;
+} COMMAND_MAP_RANGE;
+
+/* UNMAP_RANGE command:
+ * Denotes a range of pages within the given allocation being mapped.
+ * The range is expressed as [Page Index] + [Page Count]
+ * This information is packed into a 40-bit integer, in order to make
+ * the command size 8 bytes.
+ * Note: COMMAND_MAP_RANGE and COMMAND_UNMAP_RANGE commands have the same layout.
+ */
+typedef COMMAND_MAP_RANGE COMMAND_UNMAP_RANGE;
+
+/* wrapper structure for a command */
+typedef struct _COMMAND_WRAPPER_
+{
+ IMG_UINT8 ui8Type;
+ union {
+ COMMAND_TIMESTAMP sTimeStamp;
+ COMMAND_MAP_ALL sMapAll;
+ COMMAND_UNMAP_ALL sUnmapAll;
+ COMMAND_MAP_RANGE sMapRange;
+ COMMAND_UNMAP_RANGE sUnmapRange;
+ } u;
+} COMMAND_WRAPPER;
+
+/* target size for the circular buffer of commands */
+#define CIRCULAR_BUFFER_SIZE_KB 2048
+/* turn the circular buffer target size into a number of commands */
+#define CIRCULAR_BUFFER_NUM_COMMANDS ((CIRCULAR_BUFFER_SIZE_KB * 1024) / sizeof(COMMAND_WRAPPER))
+
+/* index value denoting the end of a list */
+#define END_OF_LIST 0xFFFFFFFF
+#define ALLOC_INDEX_TO_PTR(idx) (&(gsDevicememHistoryData.sRecords.pasAllocations[idx]))
+#define CHECK_ALLOC_INDEX(idx) (idx < ALLOCATION_LIST_NUM_ENTRIES)
+
+/* wrapper structure for the allocation records and the commands circular buffer */
+typedef struct _RECORDS_
+{
+ RECORD_ALLOCATION *pasAllocations;
+ IMG_UINT32 ui32AllocationsListHead;
+
+ IMG_UINT32 ui32Head;
+ IMG_UINT32 ui32Tail;
+ COMMAND_WRAPPER *pasCircularBuffer;;
+} RECORDS;
+
+typedef struct _DEVICEMEM_HISTORY_DATA_
+{
+ /* debugfs entry */
+ void *pvStatsEntry;
+
+ RECORDS sRecords;
+ POS_LOCK hLock;
+} DEVICEMEM_HISTORY_DATA;
+
+static DEVICEMEM_HISTORY_DATA gsDevicememHistoryData = { 0 };
+
+static void DevicememHistoryLock(void)
+{
+ OSLockAcquire(gsDevicememHistoryData.hLock);
+}
+
+static void DevicememHistoryUnlock(void)
+{
+ OSLockRelease(gsDevicememHistoryData.hLock);
+}
+
+/* given a time stamp, calculate the age in nanoseconds */
+static IMG_UINT64 _CalculateAge(IMG_UINT64 ui64Now,
+ IMG_UINT64 ui64Then,
+ IMG_UINT64 ui64Max)
+{
+ if(ui64Now >= ui64Then)
+ {
+ /* no clock wrap */
+ return ui64Now - ui64Then;
+ }
+ else
+ {
+ /* clock has wrapped */
+ return (ui64Max - ui64Then) + ui64Now + 1;
+ }
+}
+
+/* AcquireCBSlot:
+ * Acquire the next slot in the circular buffer and
+ * move the circular buffer head along by one
+ * Returns a pointer to the acquired slot.
+ */
+static COMMAND_WRAPPER *AcquireCBSlot(void)
+{
+ COMMAND_WRAPPER *psSlot;
+
+ psSlot = &gsDevicememHistoryData.sRecords.pasCircularBuffer[gsDevicememHistoryData.sRecords.ui32Head];
+
+ gsDevicememHistoryData.sRecords.ui32Head =
+ (gsDevicememHistoryData.sRecords.ui32Head + 1)
+ % CIRCULAR_BUFFER_NUM_COMMANDS;
+
+ return psSlot;
+}
+
+/* TimeStampPack:
+ * Packs the given timestamp value into the COMMAND_TIMESTAMP structure.
+ * This takes a 64-bit nanosecond timestamp and packs it in to a 56-bit
+ * integer in the COMMAND_TIMESTAMP command.
+ */
+static void TimeStampPack(COMMAND_TIMESTAMP *psTimeStamp, IMG_UINT64 ui64Now)
+{
+ IMG_UINT32 i;
+
+ for(i = 0; i < IMG_ARR_NUM_ELEMS(psTimeStamp->aui8TimeNs); i++)
+ {
+ psTimeStamp->aui8TimeNs[i] = ui64Now & 0xFF;
+ ui64Now >>= 8;
+ }
+}
+
+/* packing a 64-bit nanosecond into a 7-byte integer loses the
+ * top 8 bits of data. This must be taken into account when
+ * comparing a full timestamp against an unpacked timestamp
+ */
+#define TIME_STAMP_MASK ((1LLU << 56) - 1)
+#define DO_TIME_STAMP_MASK(ns64) (ns64 & TIME_STAMP_MASK)
+
+/* TimeStampUnpack:
+ * Unpack the timestamp value from the given COMMAND_TIMESTAMP command
+ */
+static IMG_UINT64 TimeStampUnpack(COMMAND_TIMESTAMP *psTimeStamp)
+{
+ IMG_UINT64 ui64TimeNs = 0;
+ IMG_UINT32 i;
+
+ for(i = IMG_ARR_NUM_ELEMS(psTimeStamp->aui8TimeNs); i > 0; i--)
+ {
+ ui64TimeNs <<= 8;
+ ui64TimeNs |= psTimeStamp->aui8TimeNs[i - 1];
+ }
+
+ return ui64TimeNs;
+}
+
+#if defined(PDUMP)
+
+static void EmitPDumpAllocation(IMG_UINT32 ui32AllocationIndex,
+ RECORD_ALLOCATION *psAlloc)
+{
+ PDUMPCOMMENT("[SrvPFD] Allocation: %u"
+ " Addr: " IMG_DEV_VIRTADDR_FMTSPEC
+ " Size: " IMG_DEVMEM_SIZE_FMTSPEC
+ " Page size: %u"
+ " PID: %u"
+ " Process: %s"
+ " Name: %s",
+ ui32AllocationIndex,
+ psAlloc->sDevVAddr.uiAddr,
+ psAlloc->uiSize,
+ 1U << psAlloc->ui32Log2PageSize,
+ psAlloc->uiPID,
+ OSGetCurrentClientProcessNameKM(),
+ psAlloc->szName);
+}
+
+static void EmitPDumpMapUnmapAll(COMMAND_TYPE eType,
+ IMG_UINT32 ui32AllocationIndex)
+{
+ const IMG_CHAR *pszOpName;
+
+ switch(eType)
+ {
+ case COMMAND_TYPE_MAP_ALL:
+ pszOpName = "MAP_ALL";
+ break;
+ case COMMAND_TYPE_UNMAP_ALL:
+ pszOpName = "UNMAP_ALL";
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapAll: Invalid type: %u",
+ eType));
+ return;
+
+ }
+
+ PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u",
+ pszOpName,
+ ui32AllocationIndex);
+}
+
+static void EmitPDumpMapUnmapRange(COMMAND_TYPE eType,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32Count)
+{
+ const IMG_CHAR *pszOpName;
+
+ switch(eType)
+ {
+ case COMMAND_TYPE_MAP_RANGE:
+ pszOpName = "MAP_RANGE";
+ break;
+ case COMMAND_TYPE_UNMAP_RANGE:
+ pszOpName = "UNMAP_RANGE";
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapRange: Invalid type: %u",
+ eType));
+ return;
+ }
+
+ PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u Start Page: %u Count: %u",
+ pszOpName,
+ ui32AllocationIndex,
+ ui32StartPage,
+ ui32Count);
+}
+
+#endif
+
+/* InsertTimeStampCommand:
+ * Insert a timestamp command into the circular buffer.
+ */
+static void InsertTimeStampCommand(IMG_UINT64 ui64Now)
+{
+ COMMAND_WRAPPER *psCommand;
+
+ psCommand = AcquireCBSlot();
+
+ psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP;
+
+ TimeStampPack(&psCommand->u.sTimeStamp, ui64Now);
+}
+
+/* InsertMapAllCommand:
+ * Insert a "MAP_ALL" command for the given allocation into the circular buffer
+ */
+static void InsertMapAllCommand(IMG_UINT32 ui32AllocIndex)
+{
+ COMMAND_WRAPPER *psCommand;
+
+ psCommand = AcquireCBSlot();
+
+ psCommand->ui8Type = COMMAND_TYPE_MAP_ALL;
+ psCommand->u.sMapAll.uiAllocIndex = ui32AllocIndex;
+
+#if defined(PDUMP)
+ EmitPDumpMapUnmapAll(COMMAND_TYPE_MAP_ALL, ui32AllocIndex);
+#endif
+}
+
+/* InsertUnmapAllCommand:
+ * Insert a "UNMAP_ALL" command for the given allocation into the circular buffer
+ */
+static void InsertUnmapAllCommand(IMG_UINT32 ui32AllocIndex)
+{
+ COMMAND_WRAPPER *psCommand;
+
+ psCommand = AcquireCBSlot();
+
+ psCommand->ui8Type = COMMAND_TYPE_UNMAP_ALL;
+ psCommand->u.sUnmapAll.uiAllocIndex = ui32AllocIndex;
+
+#if defined(PDUMP)
+ EmitPDumpMapUnmapAll(COMMAND_TYPE_UNMAP_ALL, ui32AllocIndex);
+#endif
+}
+
+/* MapRangePack:
+ * Pack the given StartPage and Count values into the 40-bit representation
+ * in the MAP_RANGE command.
+ */
+static void MapRangePack(COMMAND_MAP_RANGE *psMapRange,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32Count)
+{
+ IMG_UINT64 ui64Data;
+ IMG_UINT32 i;
+
+ /* we must encode the data into 40 bits:
+ * 18 bits for the start page index
+ * 12 bits for the range
+ */
+
+ PVR_ASSERT(ui32StartPage <= MAP_RANGE_MAX_START);
+ PVR_ASSERT(ui32Count <= MAP_RANGE_MAX_RANGE);
+
+ ui64Data = (((IMG_UINT64) ui32StartPage) << 12) | ui32Count;
+
+ for(i = 0; i < IMG_ARR_NUM_ELEMS(psMapRange->aui8Data); i++)
+ {
+ psMapRange->aui8Data[i] = ui64Data & 0xFF;
+ ui64Data >>= 8;
+ }
+}
+
+/* MapRangePack:
+ * Unpack the StartPage and Count values from the 40-bit representation
+ * in the MAP_RANGE command.
+ */
+static void MapRangeUnpack(COMMAND_MAP_RANGE *psMapRange,
+ IMG_UINT32 *pui32StartPage,
+ IMG_UINT32 *pui32Count)
+{
+ IMG_UINT64 ui64Data = 0;
+ IMG_UINT32 i;
+
+ for(i = IMG_ARR_NUM_ELEMS(psMapRange->aui8Data); i > 0; i--)
+ {
+ ui64Data <<= 8;
+ ui64Data |= psMapRange->aui8Data[i - 1];
+ }
+
+ *pui32StartPage = (ui64Data >> 12);
+ *pui32Count = ui64Data & ((1 << 12) - 1);
+}
+
+/* InsertMapRangeCommand:
+ * Insert a MAP_RANGE command into the circular buffer with the given
+ * StartPage and Count values.
+ */
+static void InsertMapRangeCommand(IMG_UINT32 ui32AllocIndex,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32Count)
+{
+ COMMAND_WRAPPER *psCommand;
+
+ psCommand = AcquireCBSlot();
+
+ psCommand->ui8Type = COMMAND_TYPE_MAP_RANGE;
+ psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+
+ MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
+
+#if defined(PDUMP)
+ EmitPDumpMapUnmapRange(COMMAND_TYPE_MAP_RANGE,
+ ui32AllocIndex,
+ ui32StartPage,
+ ui32Count);
+#endif
+}
+
+/* InsertUnmapRangeCommand:
+ * Insert a UNMAP_RANGE command into the circular buffer with the given
+ * StartPage and Count values.
+ */
+static void InsertUnmapRangeCommand(IMG_UINT32 ui32AllocIndex,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32Count)
+{
+ COMMAND_WRAPPER *psCommand;
+
+ psCommand = AcquireCBSlot();
+
+ psCommand->ui8Type = COMMAND_TYPE_UNMAP_RANGE;
+ psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+
+ MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
+
+#if defined(PDUMP)
+ EmitPDumpMapUnmapRange(COMMAND_TYPE_UNMAP_RANGE,
+ ui32AllocIndex,
+ ui32StartPage,
+ ui32Count);
+#endif
+}
+
+/* InsertAllocationToList:
+ * Helper function for the allocation list.
+ * Inserts the given allocation at the head of the list, whose current head is
+ * pointed to by pui32ListHead
+ */
+static void InsertAllocationToList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc)
+{
+ RECORD_ALLOCATION *psAlloc;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+ if(*pui32ListHead == END_OF_LIST)
+ {
+ /* list is currently empty, so just replace it */
+ *pui32ListHead = ui32Alloc;
+ psAlloc->ui32Next = psAlloc->ui32Prev = *pui32ListHead;
+ }
+ else
+ {
+ RECORD_ALLOCATION *psHeadAlloc;
+ RECORD_ALLOCATION *psTailAlloc;
+
+ psHeadAlloc = ALLOC_INDEX_TO_PTR(*pui32ListHead);
+ psTailAlloc = ALLOC_INDEX_TO_PTR(psHeadAlloc->ui32Prev);
+
+ /* make the new alloc point forwards to the previous head */
+ psAlloc->ui32Next = *pui32ListHead;
+ /* make the new alloc point backwards to the previous tail */
+ psAlloc->ui32Prev = psHeadAlloc->ui32Prev;
+
+ /* the head is now our new alloc */
+ *pui32ListHead = ui32Alloc;
+
+ /* the old head now points back to the new head */
+ psHeadAlloc->ui32Prev = *pui32ListHead;
+
+ /* the tail now points forward to the new head */
+ psTailAlloc->ui32Next = ui32Alloc;
+ }
+}
+
+static void InsertAllocationToBusyList(IMG_UINT32 ui32Alloc)
+{
+ InsertAllocationToList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+}
+
+/* RemoveAllocationFromList:
+ * Helper function for the allocation list.
+ * Removes the given allocation from the list, whose head is
+ * pointed to by pui32ListHead
+ */
+static void RemoveAllocationFromList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc)
+{
+ RECORD_ALLOCATION *psAlloc;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+ /* if this is the only element in the list then just make the list empty */
+ if((*pui32ListHead == ui32Alloc) && (psAlloc->ui32Next == ui32Alloc))
+ {
+ *pui32ListHead = END_OF_LIST;
+ }
+ else
+ {
+ RECORD_ALLOCATION *psPrev, *psNext;
+
+ psPrev = ALLOC_INDEX_TO_PTR(psAlloc->ui32Prev);
+ psNext = ALLOC_INDEX_TO_PTR(psAlloc->ui32Next);
+
+ /* remove the allocation from the list */
+ psPrev->ui32Next = psAlloc->ui32Next;
+ psNext->ui32Prev = psAlloc->ui32Prev;
+
+ /* if this allocation is the head then update the head */
+ if(*pui32ListHead == ui32Alloc)
+ {
+ *pui32ListHead = psAlloc->ui32Prev;
+ }
+ }
+}
+
+static void RemoveAllocationFromBusyList(IMG_UINT32 ui32Alloc)
+{
+ RemoveAllocationFromList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+}
+
+/* TouchBusyAllocation:
+ * Move the given allocation to the head of the list
+ */
+static void TouchBusyAllocation(IMG_UINT32 ui32Alloc)
+{
+ RemoveAllocationFromBusyList(ui32Alloc);
+ InsertAllocationToBusyList(ui32Alloc);
+}
+
+static INLINE IMG_BOOL IsAllocationListEmpty(IMG_UINT32 ui32ListHead)
+{
+ return ui32ListHead == END_OF_LIST;
+}
+
+/* GetOldestBusyAllocation:
+ * Returns the index of the oldest allocation in the MRU list
+ */
+static IMG_UINT32 GetOldestBusyAllocation(void)
+{
+ IMG_UINT32 ui32Alloc;
+ RECORD_ALLOCATION *psAlloc;
+
+ ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+
+ if(ui32Alloc == END_OF_LIST)
+ {
+ return END_OF_LIST;
+ }
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+ return psAlloc->ui32Prev;
+}
+
+static IMG_UINT32 GetFreeAllocation(void)
+{
+ IMG_UINT32 ui32Alloc;
+
+ ui32Alloc = GetOldestBusyAllocation();
+
+ return ui32Alloc;
+}
+
+/* FindAllocation:
+ * Searches the list of allocations and returns the index if an allocation
+ * is found which matches the given properties
+ */
+static IMG_UINT32 FindAllocation(const IMG_CHAR *pszName,
+ IMG_UINT64 ui64Serial,
+ IMG_PID uiPID,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize)
+{
+ IMG_UINT32 ui32Head, ui32Index;
+ RECORD_ALLOCATION *psAlloc;
+
+ ui32Head = ui32Index = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+
+ if(IsAllocationListEmpty(ui32Index))
+ {
+ goto not_found;
+ }
+
+ do
+ {
+ psAlloc = &gsDevicememHistoryData.sRecords.pasAllocations[ui32Index];
+
+ if( (psAlloc->ui64Serial == ui64Serial) &&
+ (psAlloc->sDevVAddr.uiAddr == sDevVAddr.uiAddr) &&
+ (psAlloc->uiSize == uiSize) &&
+ (strcmp(psAlloc->szName, pszName) == 0))
+ {
+ goto found;
+ }
+
+ ui32Index = psAlloc->ui32Next;
+ } while(ui32Index != ui32Head);
+
+not_found:
+ /* not found */
+ ui32Index = END_OF_LIST;
+
+found:
+ /* if the allocation was not found then we return END_OF_LIST.
+ * otherwise, we return the index of the allocation
+ */
+
+ return ui32Index;
+}
+
+/* InitialiseAllocation:
+ * Initialise the given allocation structure with the given properties
+ */
+static void InitialiseAllocation(RECORD_ALLOCATION *psAlloc,
+ const IMG_CHAR *pszName,
+ IMG_UINT64 ui64Serial,
+ IMG_PID uiPID,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32Log2PageSize)
+{
+ OSStringNCopy(psAlloc->szName, pszName, sizeof(psAlloc->szName));
+ psAlloc->szName[sizeof(psAlloc->szName) - 1] = '\0';
+ psAlloc->ui64Serial = ui64Serial;
+ psAlloc->uiPID = uiPID;
+ psAlloc->sDevVAddr = sDevVAddr;
+ psAlloc->uiSize = uiSize;
+ psAlloc->ui32Log2PageSize = ui32Log2PageSize;
+ psAlloc->ui64CreationTime = OSClockns64();
+}
+
+/* CreateAllocation:
+ * Creates a new allocation with the given properties then outputs the
+ * index of the allocation
+ */
+static PVRSRV_ERROR CreateAllocation(const IMG_CHAR *pszName,
+ IMG_UINT64 ui64Serial,
+ IMG_PID uiPID,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_BOOL bAutoPurge,
+ IMG_UINT32 *puiAllocationIndex)
+{
+ IMG_UINT32 ui32Alloc;
+ RECORD_ALLOCATION *psAlloc;
+
+ ui32Alloc = GetFreeAllocation();
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+ InitialiseAllocation(ALLOC_INDEX_TO_PTR(ui32Alloc),
+ pszName,
+ ui64Serial,
+ uiPID,
+ sDevVAddr,
+ uiSize,
+ ui32Log2PageSize);
+
+ /* put the newly initialised allocation at the front of the MRU list */
+ TouchBusyAllocation(ui32Alloc);
+
+ *puiAllocationIndex = ui32Alloc;
+
+#if defined(PDUMP)
+ EmitPDumpAllocation(ui32Alloc, psAlloc);
+#endif
+
+ return PVRSRV_OK;
+}
+
+/* MatchAllocation:
+ * Tests if the allocation at the given index matches the supplied properties.
+ * Returns IMG_TRUE if it is a match, otherwise IMG_FALSE.
+ */
+static IMG_BOOL MatchAllocation(IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT64 ui64Serial,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszName,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_PID uiPID)
+{
+ RECORD_ALLOCATION *psAlloc;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocationIndex);
+
+ return (psAlloc->ui64Serial == ui64Serial) &&
+ (psAlloc->sDevVAddr.uiAddr == sDevVAddr.uiAddr) &&
+ (psAlloc->uiSize == uiSize) &&
+ (psAlloc->ui32Log2PageSize == ui32Log2PageSize) &&
+ (strcmp(psAlloc->szName, pszName) == 0);
+}
+
+/* FindOrCreateAllocation:
+ * Convenience function.
+ * Given a set of allocation properties (serial, DevVAddr, size, name, etc),
+ * this function will look for an existing record of this allocation and
+ * create the allocation if there is no existing record
+ */
+static PVRSRV_ERROR FindOrCreateAllocation(IMG_UINT32 ui32AllocationIndexHint,
+ IMG_UINT64 ui64Serial,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char *pszName,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_PID uiPID,
+ IMG_BOOL bSparse,
+ IMG_UINT32 *pui32AllocationIndexOut,
+ IMG_BOOL *pbCreated)
+{
+ IMG_UINT32 ui32AllocationIndex;
+
+ if(ui32AllocationIndexHint != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE)
+ {
+ IMG_BOOL bHaveAllocation;
+
+ /* first, try to match against the index given by the client */
+ bHaveAllocation = MatchAllocation(ui32AllocationIndexHint,
+ ui64Serial,
+ sDevVAddr,
+ uiSize,
+ pszName,
+ ui32Log2PageSize,
+ uiPID);
+ if(bHaveAllocation)
+ {
+ *pbCreated = IMG_FALSE;
+ *pui32AllocationIndexOut = ui32AllocationIndexHint;
+ return PVRSRV_OK;
+ }
+ }
+
+ /* if matching against the client-supplied index fails then check
+ * if the allocation exists in the list
+ */
+ ui32AllocationIndex = FindAllocation(pszName,
+ ui64Serial,
+ uiPID,
+ sDevVAddr,
+ uiSize);
+
+ /* if there is no record of the allocation then we
+ * create it now
+ */
+ if(ui32AllocationIndex == END_OF_LIST)
+ {
+ PVRSRV_ERROR eError;
+ eError = CreateAllocation(pszName,
+ ui64Serial,
+ uiPID,
+ sDevVAddr,
+ uiSize,
+ ui32Log2PageSize,
+ IMG_TRUE,
+ &ui32AllocationIndex);
+
+ if(eError == PVRSRV_OK)
+ {
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+ *pbCreated = IMG_TRUE;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create record for allocation %s",
+ __func__,
+ pszName));
+ }
+
+ return eError;
+ }
+ else
+ {
+ /* found existing record */
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+ *pbCreated = IMG_FALSE;
+ return PVRSRV_OK;
+ }
+
+}
+
+/* GenerateMapUnmapCommandsForSparsePMR:
+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the PMR's
+ * current mapping table
+ *
+ * PMR: The PMR whose mapping table to read.
+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to.
+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping
+ *
+ * This function goes through every page in the PMR's mapping table and looks for
+ * virtually contiguous ranges to record as being mapped or unmapped.
+ */
+static void GenerateMapUnmapCommandsForSparsePMR(PMR *psPMR,
+ IMG_UINT32 ui32AllocIndex,
+ IMG_BOOL bMap)
+{
+ PMR_MAPPING_TABLE *psMappingTable;
+ IMG_UINT32 ui32DonePages = 0;
+ IMG_UINT32 ui32NumPages;
+ IMG_UINT32 i;
+ IMG_BOOL bInARun = IMG_FALSE;
+ IMG_UINT32 ui32CurrentStart = 0;
+ IMG_UINT32 ui32RunCount = 0;
+
+ psMappingTable = PMR_GetMappigTable(psPMR);
+ ui32NumPages = psMappingTable->ui32NumPhysChunks;
+
+ if(ui32NumPages == 0)
+ {
+ /* nothing to do */
+ return;
+ }
+
+ for(i = 0; i < psMappingTable->ui32NumVirtChunks; i++)
+ {
+ if(psMappingTable->aui32Translation[i] != TRANSLATION_INVALID)
+ {
+ if(!bInARun)
+ {
+ bInARun = IMG_TRUE;
+ ui32CurrentStart = i;
+ ui32RunCount = 1;
+ }
+ else
+ {
+ ui32RunCount++;
+ }
+ }
+
+ if(bInARun)
+ {
+ /* test if we need to end this current run and generate the command,
+ * either because the next page is not virtually contiguous
+ * to the current page, we have reached the maximum range,
+ * or this is the last page in the mapping table
+ */
+ if((psMappingTable->aui32Translation[i] == TRANSLATION_INVALID) ||
+ (ui32RunCount == MAP_RANGE_MAX_RANGE) ||
+ (i == (psMappingTable->ui32NumVirtChunks - 1)))
+ {
+ if(bMap)
+ {
+ InsertMapRangeCommand(ui32AllocIndex,
+ ui32CurrentStart,
+ ui32RunCount);
+ }
+ else
+ {
+ InsertUnmapRangeCommand(ui32AllocIndex,
+ ui32CurrentStart,
+ ui32RunCount);
+ }
+
+ ui32DonePages += ui32RunCount;
+
+ if(ui32DonePages == ui32NumPages)
+ {
+ break;
+ }
+
+ bInARun = IMG_FALSE;
+ }
+ }
+ }
+
+}
+
+/* GenerateMapUnmapCommandsForChangeList:
+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the
+ * list of page change (page map or page unmap) indices given.
+ *
+ * ui32NumPages: Number of pages which have changed.
+ * pui32PageList: List of indices of the pages which have changed.
+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to.
+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping
+ *
+ * This function goes through every page in the list and looks for
+ * virtually contiguous ranges to record as being mapped or unmapped.
+ */
+static void GenerateMapUnmapCommandsForChangeList(IMG_UINT32 ui32NumPages,
+ IMG_UINT32 *pui32PageList,
+ IMG_UINT32 ui32AllocIndex,
+ IMG_BOOL bMap)
+{
+ IMG_UINT32 i;
+ IMG_BOOL bInARun = IMG_FALSE;
+ IMG_UINT32 ui32CurrentStart = 0;
+ IMG_UINT32 ui32RunCount = 0;
+
+ for(i = 0; i < ui32NumPages; i++)
+ {
+ if(!bInARun)
+ {
+ bInARun = IMG_TRUE;
+ ui32CurrentStart = pui32PageList[i];
+ }
+
+ ui32RunCount++;
+
+ /* we flush if:
+ * - the next page in the list is not one greater than the current page
+ * - this is the last page in the list
+ * - we have reached the maximum range size
+ */
+ if((i == (ui32NumPages - 1)) ||
+ ((pui32PageList[i] + 1) != pui32PageList[i + 1]) ||
+ (ui32RunCount == MAP_RANGE_MAX_RANGE))
+ {
+ if(bMap)
+ {
+ InsertMapRangeCommand(ui32AllocIndex,
+ ui32CurrentStart,
+ ui32RunCount);
+ }
+ else
+ {
+ InsertUnmapRangeCommand(ui32AllocIndex,
+ ui32CurrentStart,
+ ui32RunCount);
+ }
+
+ bInARun = IMG_FALSE;
+ ui32RunCount = 0;
+ }
+ }
+}
+
+/* DevicememHistoryMapKM:
+ * Entry point for when an allocation is mapped into the MMU GPU
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ * We will use this as a short-cut to find the allocation
+ * in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ * This may be a new value if we just created the
+ * allocation record.
+ */
+PVRSRV_ERROR DevicememHistoryMapNewKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ IMG_BOOL bSparse = PMR_IsSparse(psPMR);
+ IMG_UINT64 ui64Serial;
+ IMG_PID uiPID = OSGetCurrentProcessID();
+ PVRSRV_ERROR eError;
+ IMG_BOOL bCreated;
+
+ if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+ !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+ __func__,
+ ui32AllocationIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PMRGetUID(psPMR, &ui64Serial);
+
+ DevicememHistoryLock();
+
+ eError = FindOrCreateAllocation(ui32AllocationIndex,
+ ui64Serial,
+ sDevVAddr,
+ uiSize,
+ szName,
+ ui32Log2PageSize,
+ uiPID,
+ bSparse,
+ &ui32AllocationIndex,
+ &bCreated);
+
+ if((eError == PVRSRV_OK) && !bCreated)
+ {
+ /* touch the allocation so it goes to the head of our MRU list */
+ TouchBusyAllocation(ui32AllocationIndex);
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+ __func__,
+ szName,
+ PVRSRVGETERRORSTRING(eError)));
+ goto out_unlock;
+ }
+
+ if(!bSparse)
+ {
+ InsertMapAllCommand(ui32AllocationIndex);
+ }
+ else
+ {
+ GenerateMapUnmapCommandsForSparsePMR(psPMR,
+ ui32AllocationIndex,
+ IMG_TRUE);
+ }
+
+ InsertTimeStampCommand(OSClockns64());
+
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return eError;
+}
+
+static void VRangeInsertMapUnmapCommands(IMG_BOOL bMap,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ const IMG_CHAR *pszName)
+{
+ while(ui32NumPages > 0)
+ {
+ IMG_UINT32 ui32PagesToAdd;
+
+ ui32PagesToAdd = MIN(ui32NumPages, MAP_RANGE_MAX_RANGE);
+
+ if(ui32StartPage > MAP_RANGE_MAX_START)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Cannot record %s range beginning at page "
+ "%u on allocation %s",
+ bMap ? "map" : "unmap",
+ ui32StartPage,
+ pszName));
+ return;
+ }
+
+ if(bMap)
+ {
+ InsertMapRangeCommand(ui32AllocationIndex,
+ ui32StartPage,
+ ui32PagesToAdd);
+ }
+ else
+ {
+ InsertUnmapRangeCommand(ui32AllocationIndex,
+ ui32StartPage,
+ ui32PagesToAdd);
+ }
+
+ ui32StartPage += ui32PagesToAdd;
+ ui32NumPages -= ui32PagesToAdd;
+ }
+}
+
+PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ IMG_PID uiPID = OSGetCurrentProcessID();
+ PVRSRV_ERROR eError;
+ IMG_BOOL bCreated;
+
+ if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+ !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+ __func__,
+ ui32AllocationIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ DevicememHistoryLock();
+
+ eError = FindOrCreateAllocation(ui32AllocationIndex,
+ 0,
+ sBaseDevVAddr,
+ uiAllocSize,
+ szName,
+ ui32Log2PageSize,
+ uiPID,
+ IMG_FALSE,
+ &ui32AllocationIndex,
+ &bCreated);
+
+ if((eError == PVRSRV_OK) && !bCreated)
+ {
+ /* touch the allocation so it goes to the head of our MRU list */
+ TouchBusyAllocation(ui32AllocationIndex);
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+ __func__,
+ szName,
+ PVRSRVGETERRORSTRING(eError)));
+ goto out_unlock;
+ }
+
+ VRangeInsertMapUnmapCommands(IMG_TRUE,
+ ui32AllocationIndex,
+ sBaseDevVAddr,
+ ui32StartPage,
+ ui32NumPages,
+ szName);
+
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return eError;
+
+}
+
+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ IMG_PID uiPID = OSGetCurrentProcessID();
+ PVRSRV_ERROR eError;
+ IMG_BOOL bCreated;
+
+ if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+ !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+ __func__,
+ ui32AllocationIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ DevicememHistoryLock();
+
+ eError = FindOrCreateAllocation(ui32AllocationIndex,
+ 0,
+ sBaseDevVAddr,
+ uiAllocSize,
+ szName,
+ ui32Log2PageSize,
+ uiPID,
+ IMG_FALSE,
+ &ui32AllocationIndex,
+ &bCreated);
+
+ if((eError == PVRSRV_OK) && !bCreated)
+ {
+ /* touch the allocation so it goes to the head of our MRU list */
+ TouchBusyAllocation(ui32AllocationIndex);
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+ __func__,
+ szName,
+ PVRSRVGETERRORSTRING(eError)));
+ goto out_unlock;
+ }
+
+ VRangeInsertMapUnmapCommands(IMG_FALSE,
+ ui32AllocationIndex,
+ sBaseDevVAddr,
+ ui32StartPage,
+ ui32NumPages,
+ szName);
+
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return eError;
+}
+
+
+
+/* DevicememHistoryUnmapKM:
+ * Entry point for when an allocation is unmapped from the MMU GPU
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ * We will use this as a short-cut to find the allocation
+ * in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ * This may be a new value if we just created the
+ * allocation record.
+ */
+PVRSRV_ERROR DevicememHistoryUnmapNewKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ IMG_BOOL bSparse = PMR_IsSparse(psPMR);
+ IMG_UINT64 ui64Serial;
+ IMG_PID uiPID = OSGetCurrentProcessID();
+ PVRSRV_ERROR eError;
+ IMG_BOOL bCreated;
+
+ if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+ !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+ __func__,
+ ui32AllocationIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PMRGetUID(psPMR, &ui64Serial);
+
+ DevicememHistoryLock();
+
+ eError = FindOrCreateAllocation(ui32AllocationIndex,
+ ui64Serial,
+ sDevVAddr,
+ uiSize,
+ szName,
+ ui32Log2PageSize,
+ uiPID,
+ bSparse,
+ &ui32AllocationIndex,
+ &bCreated);
+
+ if((eError == PVRSRV_OK) && !bCreated)
+ {
+ /* touch the allocation so it goes to the head of our MRU list */
+ TouchBusyAllocation(ui32AllocationIndex);
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+ __func__,
+ szName,
+ PVRSRVGETERRORSTRING(eError)));
+ goto out_unlock;
+ }
+
+ if(!bSparse)
+ {
+ InsertUnmapAllCommand(ui32AllocationIndex);
+ }
+ else
+ {
+ GenerateMapUnmapCommandsForSparsePMR(psPMR,
+ ui32AllocationIndex,
+ IMG_FALSE);
+ }
+
+ InsertTimeStampCommand(OSClockns64());
+
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return eError;
+}
+
+/* DevicememHistorySparseChangeKM:
+ * Entry point for when a sparse allocation is changed, such that some of the
+ * pages within the sparse allocation are mapped or unmapped.
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocPageCount: Number of pages which have been mapped.
+ * paui32AllocPageIndices: Indices of pages which have been mapped.
+ * ui32FreePageCount: Number of pages which have been unmapped.
+ * paui32FreePageIndices: Indices of pages which have been unmapped.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ * We will use this as a short-cut to find the allocation
+ * in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ * This may be a new value if we just created the
+ * allocation record.
+ */
+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *paui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *paui32FreePageIndices,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ IMG_UINT64 ui64Serial;
+ IMG_PID uiPID = OSGetCurrentProcessID();
+ PVRSRV_ERROR eError;
+ IMG_BOOL bCreated;
+
+ if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+ !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+ __func__,
+ ui32AllocationIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PMRGetUID(psPMR, &ui64Serial);
+
+ DevicememHistoryLock();
+
+ eError = FindOrCreateAllocation(ui32AllocationIndex,
+ ui64Serial,
+ sDevVAddr,
+ uiSize,
+ szName,
+ ui32Log2PageSize,
+ uiPID,
+ IMG_TRUE /* bSparse */,
+ &ui32AllocationIndex,
+ &bCreated);
+
+ if((eError == PVRSRV_OK) && !bCreated)
+ {
+ /* touch the allocation so it goes to the head of our MRU list */
+ TouchBusyAllocation(ui32AllocationIndex);
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+ __func__,
+ szName,
+ PVRSRVGETERRORSTRING(eError)));
+ goto out_unlock;
+ }
+
+ GenerateMapUnmapCommandsForChangeList(ui32AllocPageCount,
+ paui32AllocPageIndices,
+ ui32AllocationIndex,
+ IMG_TRUE);
+
+ GenerateMapUnmapCommandsForChangeList(ui32FreePageCount,
+ paui32FreePageIndices,
+ ui32AllocationIndex,
+ IMG_FALSE);
+
+ InsertTimeStampCommand(OSClockns64());
+
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return eError;
+
+}
+
+/* CircularBufferIterateStart:
+ * Initialise local state for iterating over the circular buffer
+ */
+static void CircularBufferIterateStart(IMG_UINT32 *pui32Head, IMG_UINT32 *pui32Iter)
+{
+ *pui32Head = gsDevicememHistoryData.sRecords.ui32Head;
+
+ if(*pui32Head != 0)
+ {
+ *pui32Iter = *pui32Head - 1;
+ }
+ else
+ {
+ *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1;
+ }
+}
+
+/* CircularBufferIteratePrevious:
+ * Iterate to the previous item in the circular buffer.
+ * This is called repeatedly to iterate over the whole circular buffer.
+ */
+static COMMAND_WRAPPER *CircularBufferIteratePrevious(IMG_UINT32 ui32Head,
+ IMG_UINT32 *pui32Iter,
+ COMMAND_TYPE *peType,
+ IMG_BOOL *pbLast)
+{
+ IMG_UINT8 *pui8Header;
+ COMMAND_WRAPPER *psOut = NULL;
+
+ psOut = gsDevicememHistoryData.sRecords.pasCircularBuffer + *pui32Iter;
+
+ pui8Header = (IMG_UINT8 *) psOut;
+
+ /* sanity check the command looks valid.
+ * this condition should never happen, but check for it anyway
+ * and try to handle it
+ */
+ if(*pui8Header >= COMMAND_TYPE_COUNT)
+ {
+ /* invalid header detected. Circular buffer corrupted? */
+ PVR_DPF((PVR_DBG_ERROR, "CircularBufferIteratePrevious: "
+ "Invalid header: %u",
+ *pui8Header));
+ *pbLast = IMG_TRUE;
+ return NULL;
+ }
+
+ *peType = *pui8Header;
+
+ if(*pui32Iter != 0)
+ {
+ (*pui32Iter)--;
+ }
+ else
+ {
+ *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1;
+ }
+
+
+ /* inform the caller this is the last command if either we have reached
+ * the head (where we started) or if we have reached an empty command,
+ * which means we have covered all populated entries
+ */
+ if((*pui32Iter == ui32Head) || (*peType == COMMAND_TYPE_NONE))
+ {
+ /* this is the final iteration */
+ *pbLast = IMG_TRUE;
+ }
+
+ return psOut;
+}
+
+/* MapUnmapCommandGetInfo:
+ * Helper function to get the address and mapping information from a MAP_ALL, UNMAP_ALL,
+ * MAP_RANGE or UNMAP_RANGE command
+ */
+static void MapUnmapCommandGetInfo(COMMAND_WRAPPER *psCommand,
+ COMMAND_TYPE eType,
+ IMG_DEV_VIRTADDR *psDevVAddrStart,
+ IMG_DEV_VIRTADDR *psDevVAddrEnd,
+ IMG_BOOL *pbMap,
+ IMG_UINT32 *pui32AllocIndex)
+{
+ if((eType == COMMAND_TYPE_MAP_ALL) || ((eType == COMMAND_TYPE_UNMAP_ALL)))
+ {
+ COMMAND_MAP_ALL *psMapAll = &psCommand->u.sMapAll;
+ RECORD_ALLOCATION *psAlloc;
+
+ *pbMap = (eType == COMMAND_TYPE_MAP_ALL);
+ *pui32AllocIndex = psMapAll->uiAllocIndex;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(psMapAll->uiAllocIndex);
+
+ *psDevVAddrStart = psAlloc->sDevVAddr;
+ psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + psAlloc->uiSize - 1;
+ }
+ else if((eType == COMMAND_TYPE_MAP_RANGE) || ((eType == COMMAND_TYPE_UNMAP_RANGE)))
+ {
+ COMMAND_MAP_RANGE *psMapRange = &psCommand->u.sMapRange;
+ RECORD_ALLOCATION *psAlloc;
+ IMG_UINT32 ui32StartPage, ui32Count;
+
+ *pbMap = (eType == COMMAND_TYPE_MAP_RANGE);
+ *pui32AllocIndex = psMapRange->uiAllocIndex;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(psMapRange->uiAllocIndex);
+
+ MapRangeUnpack(psMapRange, &ui32StartPage, &ui32Count);
+
+ psDevVAddrStart->uiAddr = psAlloc->sDevVAddr.uiAddr +
+ ((1U << psAlloc->ui32Log2PageSize) * ui32StartPage);
+
+ psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr +
+ ((1U << psAlloc->ui32Log2PageSize) * ui32Count) - 1;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid command type: %u",
+ __func__,
+ eType));
+ }
+}
+
+/* DevicememHistoryQuery:
+ * Entry point for rgxdebug to look up addresses relating to a page fault
+ */
+IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn,
+ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+ IMG_UINT32 ui32PageSizeBytes,
+ IMG_BOOL bMatchAnyAllocInPage)
+{
+ IMG_UINT32 ui32Head, ui32Iter;
+ COMMAND_TYPE eType = COMMAND_TYPE_NONE;
+ COMMAND_WRAPPER *psCommand = NULL;
+ IMG_BOOL bLast = IMG_FALSE;
+ IMG_UINT64 ui64StartTime = OSClockns64();
+ IMG_UINT64 ui64TimeNs = 0;
+
+ /* initialise the results count for the caller */
+ psQueryOut->ui32NumResults = 0;
+
+ DevicememHistoryLock();
+
+ /* if the search is constrained to a particular PID then we
+ * first search the list of allocations to see if this
+ * PID is known to us
+ */
+ if(psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY)
+ {
+ IMG_UINT32 ui32Alloc;
+ ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+
+ while(ui32Alloc != END_OF_LIST)
+ {
+ RECORD_ALLOCATION *psAlloc;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+ if(psAlloc->uiPID == psQueryIn->uiPID)
+ {
+ goto found_pid;
+ }
+
+ if(ui32Alloc == gsDevicememHistoryData.sRecords.ui32AllocationsListHead)
+ {
+ /* gone through whole list */
+ break;
+ }
+ }
+
+ /* PID not found, so we do not have any suitable data for this
+ * page fault
+ */
+ goto out_unlock;
+ }
+
+found_pid:
+
+ CircularBufferIterateStart(&ui32Head, &ui32Iter);
+
+ while(!bLast)
+ {
+ psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast);
+
+ if(eType == COMMAND_TYPE_TIMESTAMP)
+ {
+ ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp);
+ continue;
+ }
+
+ if((eType == COMMAND_TYPE_MAP_ALL) ||
+ (eType == COMMAND_TYPE_UNMAP_ALL) ||
+ (eType == COMMAND_TYPE_MAP_RANGE) ||
+ (eType == COMMAND_TYPE_UNMAP_RANGE))
+ {
+ RECORD_ALLOCATION *psAlloc;
+ IMG_DEV_VIRTADDR sAllocStartAddrOrig, sAllocEndAddrOrig;
+ IMG_DEV_VIRTADDR sAllocStartAddr, sAllocEndAddr;
+ IMG_BOOL bMap;
+ IMG_UINT32 ui32AllocIndex;
+
+ MapUnmapCommandGetInfo(psCommand,
+ eType,
+ &sAllocStartAddrOrig,
+ &sAllocEndAddrOrig,
+ &bMap,
+ &ui32AllocIndex);
+
+ sAllocStartAddr = sAllocStartAddrOrig;
+ sAllocEndAddr = sAllocEndAddrOrig;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+
+ /* skip this command if we need to search within
+ * a particular PID, and this allocation is not from
+ * that PID
+ */
+ if((psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) &&
+ (psAlloc->uiPID != psQueryIn->uiPID))
+ {
+ continue;
+ }
+
+ /* if the allocation was created after this event, then this
+ * event must be for an old/removed allocation, so skip it
+ */
+ if(DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs)
+ {
+ continue;
+ }
+
+ /* if the caller wants us to match any allocation in the
+ * same page as the allocation then tweak the real start/end
+ * addresses of the allocation here
+ */
+ if(bMatchAnyAllocInPage)
+ {
+ sAllocStartAddr.uiAddr = sAllocStartAddr.uiAddr & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
+ sAllocEndAddr.uiAddr = (sAllocEndAddr.uiAddr + ui32PageSizeBytes - 1) & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
+ }
+
+ if((psQueryIn->sDevVAddr.uiAddr >= sAllocStartAddr.uiAddr) &&
+ (psQueryIn->sDevVAddr.uiAddr < sAllocEndAddr.uiAddr))
+ {
+ DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult = &psQueryOut->sResults[psQueryOut->ui32NumResults];
+
+ OSStringNCopy(psResult->szString, psAlloc->szName, sizeof(psResult->szString));
+ psResult->szString[DEVICEMEM_HISTORY_TEXT_BUFSZ - 1] = '\0';
+ psResult->sBaseDevVAddr = psAlloc->sDevVAddr;
+ psResult->uiSize = psAlloc->uiSize;
+ psResult->bMap = bMap;
+ psResult->ui64Age = _CalculateAge(ui64StartTime, ui64TimeNs, TIME_STAMP_MASK);
+ psResult->ui64When = ui64TimeNs;
+ /* write the responsible PID in the placeholder */
+ psResult->sProcessInfo.uiPID = psAlloc->uiPID;
+
+ if((eType == COMMAND_TYPE_MAP_ALL) || (eType == COMMAND_TYPE_UNMAP_ALL))
+ {
+ psResult->bRange = IMG_FALSE;
+ psResult->bAll = IMG_TRUE;
+ }
+ else
+ {
+ psResult->bRange = IMG_TRUE;
+ MapRangeUnpack(&psCommand->u.sMapRange,
+ &psResult->ui32StartPage,
+ &psResult->ui32PageCount);
+ psResult->bAll = (psResult->ui32PageCount * (1U << psAlloc->ui32Log2PageSize))
+ == psAlloc->uiSize;
+ psResult->sMapStartAddr = sAllocStartAddrOrig;
+ psResult->sMapEndAddr = sAllocEndAddrOrig;
+ }
+
+ psQueryOut->ui32NumResults++;
+
+ if(psQueryOut->ui32NumResults == DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS)
+ {
+ break;
+ }
+ }
+ }
+ }
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return psQueryOut->ui32NumResults > 0;
+}
+
+static void DeviceMemHistoryFmt(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN],
+ IMG_PID uiPID,
+ const IMG_CHAR *pszName,
+ const IMG_CHAR *pszAction,
+ IMG_DEV_VIRTADDR sDevVAddrStart,
+ IMG_DEV_VIRTADDR sDevVAddrEnd,
+ IMG_UINT64 ui64TimeNs)
+{
+
+ szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN - 1] = '\0';
+ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+ /* PID NAME MAP/UNMAP MIN-MAX SIZE AbsUS AgeUS*/
+ "%04u %-40s %-10s "
+ IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC " "
+ "0x%08llX "
+ "%013llu", /* 13 digits is over 2 hours of ns */
+ uiPID,
+ pszName,
+ pszAction,
+ sDevVAddrStart.uiAddr,
+ sDevVAddrEnd.uiAddr,
+ sDevVAddrEnd.uiAddr - sDevVAddrStart.uiAddr,
+ ui64TimeNs);
+}
+
+static void DeviceMemHistoryFmtHeader(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN])
+{
+ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+ "%-4s %-40s %-6s %10s %10s %8s %13s",
+ "PID",
+ "NAME",
+ "ACTION",
+ "ADDR MIN",
+ "ADDR MAX",
+ "SIZE",
+ "ABS NS");
+}
+
+static const char *CommandTypeToString(COMMAND_TYPE eType)
+{
+ switch(eType)
+ {
+ case COMMAND_TYPE_MAP_ALL:
+ return "MapAll";
+ case COMMAND_TYPE_UNMAP_ALL:
+ return "UnmapAll";
+ case COMMAND_TYPE_MAP_RANGE:
+ return "MapRange";
+ case COMMAND_TYPE_UNMAP_RANGE:
+ return "UnmapRange";
+ case COMMAND_TYPE_TIMESTAMP:
+ return "TimeStamp";
+ default:
+ return "???";
+ }
+}
+
+static void DevicememHistoryPrintAll(void *pvFilePtr, OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+ IMG_UINT32 ui32Iter;
+ IMG_UINT32 ui32Head;
+ IMG_BOOL bLast = IMG_FALSE;
+ IMG_UINT64 ui64TimeNs = 0;
+ IMG_UINT64 ui64StartTime = OSClockns64();
+
+ DeviceMemHistoryFmtHeader(szBuffer);
+ pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+
+ CircularBufferIterateStart(&ui32Head, &ui32Iter);
+
+ while(!bLast)
+ {
+ COMMAND_WRAPPER *psCommand;
+ COMMAND_TYPE eType = COMMAND_TYPE_NONE;
+
+ psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast);
+
+ if(eType == COMMAND_TYPE_TIMESTAMP)
+ {
+ ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp);
+ continue;
+ }
+
+
+ if((eType == COMMAND_TYPE_MAP_ALL) ||
+ (eType == COMMAND_TYPE_UNMAP_ALL) ||
+ (eType == COMMAND_TYPE_MAP_RANGE) ||
+ (eType == COMMAND_TYPE_UNMAP_RANGE))
+ {
+ RECORD_ALLOCATION *psAlloc;
+ IMG_DEV_VIRTADDR sDevVAddrStart, sDevVAddrEnd;
+ IMG_BOOL bMap;
+ IMG_UINT32 ui32AllocIndex;
+
+ MapUnmapCommandGetInfo(psCommand,
+ eType,
+ &sDevVAddrStart,
+ &sDevVAddrEnd,
+ &bMap,
+ &ui32AllocIndex);
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+
+ if(DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs)
+ {
+ /* if this event relates to an allocation we
+ * are no longer tracking then do not print it
+ */
+ continue;
+ }
+
+ DeviceMemHistoryFmt(szBuffer,
+ psAlloc->uiPID,
+ psAlloc->szName,
+ CommandTypeToString(eType),
+ sDevVAddrStart,
+ sDevVAddrEnd,
+ ui64TimeNs);
+
+ pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+ }
+ }
+
+ pfnOSStatsPrintf(pvFilePtr, "\nTimestamp reference: %013llu\n", ui64StartTime);
+}
+
+static void DevicememHistoryPrintAllWrapper(void *pvFilePtr, void *pvData, OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ PVR_UNREFERENCED_PARAMETER(pvData);
+ DevicememHistoryLock();
+ DevicememHistoryPrintAll(pvFilePtr, pfnOSStatsPrintf);
+ DevicememHistoryUnlock();
+}
+
+static PVRSRV_ERROR CreateRecords(void)
+{
+ gsDevicememHistoryData.sRecords.pasAllocations =
+ OSAllocMem(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES);
+
+ if(gsDevicememHistoryData.sRecords.pasAllocations == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ gsDevicememHistoryData.sRecords.pasCircularBuffer =
+ OSAllocMem(sizeof(COMMAND_WRAPPER) * CIRCULAR_BUFFER_NUM_COMMANDS);
+
+ if(gsDevicememHistoryData.sRecords.pasCircularBuffer == NULL)
+ {
+ OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ return PVRSRV_OK;
+}
+
+static void DestroyRecords(void)
+{
+ OSFreeMem(gsDevicememHistoryData.sRecords.pasCircularBuffer);
+ OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+}
+
+static void InitialiseRecords(void)
+{
+ IMG_UINT32 i;
+
+ /* initialise the allocations list */
+
+ gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Prev = ALLOCATION_LIST_NUM_ENTRIES - 1;
+ gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Next = 1;
+
+ for(i = 1; i < ALLOCATION_LIST_NUM_ENTRIES; i++)
+ {
+ gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Prev = i - 1;
+ gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Next = i + 1;
+ }
+
+ gsDevicememHistoryData.sRecords.pasAllocations[ALLOCATION_LIST_NUM_ENTRIES - 1].ui32Next = 0;
+
+ gsDevicememHistoryData.sRecords.ui32AllocationsListHead = 0;
+
+ /* initialise the circular buffer with zeros so every command
+ * is initialised as a command of type COMMAND_TYPE_NONE
+ */
+ OSCachedMemSet(gsDevicememHistoryData.sRecords.pasCircularBuffer,
+ COMMAND_TYPE_NONE,
+ sizeof(gsDevicememHistoryData.sRecords.pasCircularBuffer[0]) * CIRCULAR_BUFFER_NUM_COMMANDS);
+}
+
+PVRSRV_ERROR DevicememHistoryInitKM(void)
+{
+ PVRSRV_ERROR eError;
+
+ eError = OSLockCreate(&gsDevicememHistoryData.hLock, LOCK_TYPE_PASSIVE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevicememHistoryInitKM: Failed to create lock"));
+ goto err_lock;
+ }
+
+ eError = CreateRecords();
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevicememHistoryInitKM: Failed to create records"));
+ goto err_allocations;
+ }
+
+ InitialiseRecords();
+
+ gsDevicememHistoryData.pvStatsEntry = OSCreateStatisticEntry("devicemem_history",
+ NULL,
+ DevicememHistoryPrintAllWrapper,
+ NULL,
+ NULL,
+ NULL);
+
+ return PVRSRV_OK;
+
+err_allocations:
+ OSLockDestroy(gsDevicememHistoryData.hLock);
+err_lock:
+ return eError;
+}
+
+void DevicememHistoryDeInitKM(void)
+{
+ if(gsDevicememHistoryData.pvStatsEntry != NULL)
+ {
+ OSRemoveStatisticEntry(gsDevicememHistoryData.pvStatsEntry);
+ }
+
+ DestroyRecords();
+
+ OSLockDestroy(gsDevicememHistoryData.hLock);
+}
+
+PVRSRV_ERROR DevicememHistoryMapKM(IMG_DEV_VIRTADDR sDevVAddr, size_t uiSize, const char szString[DEVICEMEM_HISTORY_TEXT_BUFSZ])
+{
+ IMG_UINT32 ui32AllocationIndex = DEVICEMEM_HISTORY_ALLOC_INDEX_NONE;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32StartPage;
+ IMG_UINT32 ui32NumPages;
+
+ /* assume 4K page size */
+ ui32Log2PageSize = 12;
+
+ ui32StartPage = 0;
+ ui32NumPages = (uiSize + 4095) / 4096;
+
+ return DevicememHistoryMapVRangeKM(sDevVAddr,
+ ui32StartPage,
+ ui32NumPages,
+ uiSize,
+ szString,
+ ui32Log2PageSize,
+ ui32AllocationIndex,
+ &ui32AllocationIndex);
+}
+
+PVRSRV_ERROR DevicememHistoryUnmapKM(IMG_DEV_VIRTADDR sDevVAddr, size_t uiSize, const char szString[DEVICEMEM_HISTORY_TEXT_BUFSZ])
+{
+ IMG_UINT32 ui32AllocationIndex = DEVICEMEM_HISTORY_ALLOC_INDEX_NONE;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32StartPage;
+ IMG_UINT32 ui32NumPages;
+
+ /* assume 4K page size */
+ ui32Log2PageSize = 12;
+
+ ui32StartPage = 0;
+ ui32NumPages = (uiSize + 4095) / 4096;
+
+ return DevicememHistoryUnmapVRangeKM(sDevVAddr,
+ ui32StartPage,
+ ui32NumPages,
+ uiSize,
+ szString,
+ ui32Log2PageSize,
+ ui32AllocationIndex,
+ &ui32AllocationIndex);
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File devicemem_history_server.h
+@Title Resource Information abstraction
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Devicemem History functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_HISTORY_SERVER_H_
+#define _DEVICEMEM_HISTORY_SERVER_H_
+
+#include "img_defs.h"
+#include "mm_common.h"
+#include "pvrsrv_error.h"
+#include "rgxmem.h"
+
+extern PVRSRV_ERROR
+DevicememHistoryInitKM(void);
+
+extern void
+DevicememHistoryDeInitKM(void);
+
+extern PVRSRV_ERROR
+DevicememHistoryMapKM(IMG_DEV_VIRTADDR sDevVAddr, size_t uiSize, const char szText[DEVICEMEM_HISTORY_TEXT_BUFSZ]);
+
+extern PVRSRV_ERROR
+DevicememHistoryUnmapKM(IMG_DEV_VIRTADDR sDevVAddr, size_t uiSize, const char szText[DEVICEMEM_HISTORY_TEXT_BUFSZ]);
+
+
+PVRSRV_ERROR DevicememHistoryMapNewKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+ IMG_UINT32 ui32PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryUnmapNewKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+ IMG_UINT32 ui32PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *ui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *ui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+ IMG_UINT32 ui32PageSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *paui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pauiFreePageIndices,
+ IMG_UINT32 AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+/* used when the PID does not matter */
+#define DEVICEMEM_HISTORY_PID_ANY 0xFFFFFFFE
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_IN_
+{
+ IMG_PID uiPID;
+ IMG_DEV_VIRTADDR sDevVAddr;
+} DEVICEMEM_HISTORY_QUERY_IN;
+
+/* Store up to 4 results for a lookup. In the case of the faulting page being
+ * re-mapped between the page fault occurring on HW and the page fault analysis
+ * being done, the second result entry will show the allocation being unmapped.
+ * A further 2 entries are added to cater for multiple buffers in the same page.
+ */
+#define DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS 4
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_RESULT_
+{
+ IMG_CHAR szString[DEVICEMEM_HISTORY_TEXT_BUFSZ];
+ IMG_DEV_VIRTADDR sBaseDevVAddr;
+ size_t uiSize;
+ IMG_BOOL bMap;
+ IMG_BOOL bRange;
+ IMG_BOOL bAll;
+ IMG_UINT64 ui64When;
+ IMG_UINT64 ui64Age;
+ /* info for sparse map/unmap operations (i.e. bRange=IMG_TRUE) */
+ IMG_UINT32 ui32StartPage;
+ IMG_UINT32 ui32PageCount;
+ IMG_DEV_VIRTADDR sMapStartAddr;
+ IMG_DEV_VIRTADDR sMapEndAddr;
+ RGXMEM_PROCESS_INFO sProcessInfo;
+} DEVICEMEM_HISTORY_QUERY_OUT_RESULT;
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_
+{
+ IMG_UINT32 ui32NumResults;
+ /* result 0 is the newest */
+ DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS];
+} DEVICEMEM_HISTORY_QUERY_OUT;
+
+extern IMG_BOOL
+DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn,
+ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+ IMG_UINT32 ui32PageSizeBytes,
+ IMG_BOOL bMatchAnyAllocInPage);
+
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device Memory History shared definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Shared (client/server) definitions related to the Devicemem History
+ functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_HISTORY_SHARED_H
+#define DEVICEMEM_HISTORY_SHARED_H
+
+/* structure used inside MEMDESC to hold the allocation name until
+ * the allocation is unmapped
+ */
+typedef struct _DEVICEMEM_HISTORY_MEMDESC_DATA_
+{
+ IMG_CHAR szText[DEVICEMEM_HISTORY_TEXT_BUFSZ];
+ IMG_UINT32 ui32AllocationIndex;
+} DEVICEMEM_HISTORY_MEMDESC_DATA;
+
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Shared device memory management PDump functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements common (client & server) PDump functions for the
+ memory management code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined PDUMP
+
+#include "allocmem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pdump.h"
+#include "devicemem.h"
+#include "devicemem_utils.h"
+#include "devicemem_pdump.h"
+#include "client_pdumpmm_bridge.h"
+#if defined(LINUX) && !defined(__KERNEL__)
+#include <stdio.h>
+#if defined(SUPPORT_ANDROID_PLATFORM)
+#include "android_utils.h"
+#endif
+#endif
+
+IMG_INTERNAL void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize);
+
+ eError = BridgePMRPDumpLoadMem(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ uiSize,
+ uiPDumpFlags,
+ IMG_FALSE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize);
+
+ eError = BridgePMRPDumpLoadMem(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ uiSize,
+ uiPDumpFlags,
+ IMG_TRUE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgePMRPDumpLoadMemValue32(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ ui32Value,
+ uiPDumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgePMRPDumpLoadMemValue64(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ ui64Value,
+ uiPDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* FIXME: This should be server side only */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T *puiMemOffset,
+ IMG_CHAR *pszName,
+ IMG_UINT32 ui32Size)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[100];
+ IMG_CHAR aszSymbolicName[100];
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ *puiMemOffset += psMemDesc->uiOffset;
+
+ eError = BridgePMRPDumpSymbolicAddr(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ *puiMemOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ puiMemOffset,
+ &uiNextSymName);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ OSSNPrintf(pszName, ui32Size, "%s:%s", &aszMemspaceName[0], &aszSymbolicName[0]);
+ return eError;
+}
+
+IMG_INTERNAL void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgePMRPDumpSaveToFile(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ uiSize,
+ OSStringLength(pszFilename) + 1,
+ pszFilename,
+ uiFileOffset);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+
+
+/* FIXME: Remove? */
+IMG_INTERNAL void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PdumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEV_VIRTADDR sDevAddrStart;
+
+ sDevAddrStart = psMemDesc->psImport->sDeviceImport.sDevVAddr;
+ sDevAddrStart.uiAddr += psMemDesc->uiOffset;
+ sDevAddrStart.uiAddr += uiOffset;
+
+ eError = BridgeDevmemIntPDumpSaveToFileVirtual(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext,
+ sDevAddrStart,
+ uiSize,
+ OSStringLength(pszFilename) + 1,
+ pszFilename,
+ ui32FileOffset,
+ ui32PdumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEVMEM_SIZE_T uiNumBytes;
+
+ uiNumBytes = 4;
+
+ if (psMemDesc->uiOffset + uiOffset + uiNumBytes >= psMemDesc->psImport->uiSize)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ eError = BridgePMRPDumpPol32(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVRSRV_ERROR eError;
+
+ if ((psMemDesc->uiOffset + uiReadOffset) > psMemDesc->psImport->uiSize)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ eError = BridgePMRPDumpCBP(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiReadOffset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+#endif /* PDUMP */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management PDump internal
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services internal interface to PDump device memory management
+ functions that are shared between client and server code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_PDUMP_H_
+#define _DEVICEMEM_PDUMP_H_
+
+#include "devicemem.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+
+#if defined(PDUMP)
+/*
+ * DevmemPDumpMem()
+ *
+ * takes a memory descriptor, offset, and size, and takes the current
+ * contents of the memory at that location and writes it to the prm
+ * pdump file, and emits a pdump LDB to load the data from that file.
+ * The intention here is that the contents of the simulated buffer
+ * upon pdump playback will be made to be the same as they are when
+ * this command is run, enabling pdump of cases where the memory has
+ * been modified externally, i.e. by the host cpu or by a third
+ * party.
+ */
+extern void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpZeroMem()
+ *
+ * as DevmemPDumpMem() but the PDump allocation will be populated with zeros from
+ * the zero page in the parameter stream
+ */
+extern void
+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpMemValue()
+ *
+ * As above but dumps the value at a dword-aligned address in plain
+ * text to the pdump script2 file. Useful for patching a buffer at
+ * pdump playback by simply editing the script output file.
+ *
+ * (The same functionality can be achieved by the above function but
+ * the binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpMemValue64()
+ *
+ * As above but dumps the 64bit-value at a dword-aligned address in plain
+ * text to the pdump script2 file. Useful for patching a buffer at
+ * pdump playback by simply editing the script output file.
+ *
+ * (The same functionality can be achieved by the above function but
+ * the binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpPageCatBaseToSAddr()
+ *
+ * Returns the symbolic address of a piece of memory represented
+ * by an offset into the mem descriptor.
+ */
+extern PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T *puiMemOffset,
+ IMG_CHAR *pszName,
+ IMG_UINT32 ui32Size);
+
+/*
+ * DevmemPDumpSaveToFile()
+ *
+ * emits a pdump SAB to cause the current contents of the memory to be
+ * written to the given file during playback
+ */
+extern void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset);
+
+/*
+ * DevmemPDumpSaveToFileVirtual()
+ *
+ * emits a pdump SAB, just like DevmemPDumpSaveToFile(), but uses the
+ * virtual address and device MMU context to cause the pdump player to
+ * traverse the MMU page tables itself.
+ */
+extern void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PdumpFlags);
+
+
+/*
+ *
+ * Devmem_PDumpDevmemPol32()
+ *
+ * writes a PDump 'POL' command to wait for a masked 32-bit memory
+ * location to become the specified value
+ */
+extern PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags);
+
+/*
+ * DevmemPDumpCBP()
+ *
+ * Polls for space in circular buffer. Reads the read offset
+ * from memory and waits until there is enough space to write
+ * the packet.
+ *
+ * hMemDesc - MemDesc which contains the read offset
+ * uiReadOffset - Offset into MemDesc to the read offset
+ * uiWriteOffset - Current write offset
+ * uiPacketSize - Size of packet to write
+ * uiBufferSize - Size of circular buffer
+ */
+extern PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMem)
+#endif
+static INLINE void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue32)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue64)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(ui64Value);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpPageCatBaseToSAddr)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T *puiMemOffset,
+ IMG_CHAR *pszName,
+ IMG_UINT32 ui32Size)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(puiMemOffset);
+ PVR_UNREFERENCED_PARAMETER(pszName);
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFile)
+#endif
+static INLINE void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(pszFilename);
+ PVR_UNREFERENCED_PARAMETER(uiFileOffset);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFileVirtual)
+#endif
+static INLINE void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PdumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(pszFilename);
+ PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32PdumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpDevmemPol32)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+ PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+ PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+#endif /* _DEVICEMEM_PDUMP_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Server-side component of the Device Memory Management.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* our exported API */
+#include "devicemem_server.h"
+#include "devicemem_utils.h"
+#include "devicemem.h"
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "physmem.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lock.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include <linux/sched.h>
+#include "pvr_buffer_sync.h"
+#endif
+
+struct _DEVMEMINT_CTX_
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ /* MMU common code needs to have a context. There's a one-to-one
+ correspondence between device memory context and MMU context,
+ but we have the abstraction here so that we don't need to care
+ what the MMU does with its context, and the MMU code need not
+ know about us at all. */
+ MMU_CONTEXT *psMMUContext;
+
+ ATOMIC_T hRefCount;
+
+ /* This handle is for devices that require notification when a new
+ memory context is created and they need to store private data that
+ is associated with the context. */
+ IMG_HANDLE hPrivData;
+
+ /* The following tracks UM applications that need to be notified of a
+ * page fault */
+ DLLIST_NODE sProcessNotifyListHead;
+ /* The following is a node for the list of registered devmem contexts */
+ DLLIST_NODE sPageFaultNotifyListElem;
+};
+
+struct _DEVMEMINT_CTX_EXPORT_
+{
+ DEVMEMINT_CTX *psDevmemCtx;
+ PMR *psPMR;
+ ATOMIC_T hRefCount;
+ DLLIST_NODE sNode;
+};
+
+struct _DEVMEMINT_HEAP_
+{
+ struct _DEVMEMINT_CTX_ *psDevmemCtx;
+ IMG_UINT32 uiLog2PageSize;
+ ATOMIC_T hRefCount;
+};
+
+struct _DEVMEMINT_RESERVATION_
+{
+ struct _DEVMEMINT_HEAP_ *psDevmemHeap;
+ IMG_DEV_VIRTADDR sBase;
+ IMG_DEVMEM_SIZE_T uiLength;
+};
+
+struct _DEVMEMINT_MAPPING_
+{
+ struct _DEVMEMINT_RESERVATION_ *psReservation;
+ PMR *psPMR;
+ IMG_UINT32 uiNumPages;
+};
+
+struct _DEVMEMINT_PF_NOTIFY_
+{
+ IMG_UINT32 ui32PID;
+ DLLIST_NODE sProcessNotifyListElem;
+};
+
+/*************************************************************************/ /*!
+@Function _DevmemIntCtxAcquire
+@Description Acquire a reference to the provided device memory context.
+@Return None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx)
+{
+ OSAtomicIncrement(&psDevmemCtx->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function _DevmemIntCtxRelease
+@Description Release the reference to the provided device memory context.
+ If this is the last reference which was taken then the
+ memory context will be freed.
+@Return None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx)
+{
+ if (OSAtomicDecrement(&psDevmemCtx->hRefCount) == 0)
+ {
+ /* The last reference has gone, destroy the context */
+ PVRSRV_DEVICE_NODE *psDevNode = psDevmemCtx->psDevNode;
+ DLLIST_NODE *psNode, *psNodeNext;
+
+ /* If there are any PIDs registered for page fault notification.
+ * Loop through the registered PIDs and free each one */
+ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+ {
+ DEVMEMINT_PF_NOTIFY *psNotifyNode =
+ IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+ dllist_remove_node(psNode);
+ OSFreeMem(psNotifyNode);
+ }
+
+ /* If this context is in the list registered for a debugger, remove
+ * from that list */
+ if (dllist_node_is_in_list(&psDevmemCtx->sPageFaultNotifyListElem))
+ {
+ dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem);
+ }
+
+ if (psDevNode->pfnUnregisterMemoryContext)
+ {
+ psDevNode->pfnUnregisterMemoryContext(psDevmemCtx->hPrivData);
+ }
+ MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed memory context %p", __FUNCTION__, psDevmemCtx));
+ OSFreeMem(psDevmemCtx);
+ }
+}
+
+/*************************************************************************/ /*!
+@Function _DevmemIntHeapAcquire
+@Description Acquire a reference to the provided device memory heap.
+@Return None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap)
+{
+ OSAtomicIncrement(&psDevmemHeap->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function _DevmemIntHeapRelease
+@Description Release the reference to the provided device memory heap.
+ If this is the last reference which was taken then the
+ memory context will be freed.
+@Return None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntHeapRelease(DEVMEMINT_HEAP *psDevmemHeap)
+{
+ OSAtomicDecrement(&psDevmemHeap->hRefCount);
+}
+
+PVRSRV_ERROR
+DevmemIntUnpin(PMR *psPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Unpin */
+ eError = PMRUnpinPMR(psPMR, IMG_FALSE);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ eError = PMRUnpinPMR(psPMR, IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e_exit;
+ }
+
+ /* Invalidate mapping */
+ eError = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ psDevmemMapping->psReservation->sBase,
+ psDevmemMapping->uiNumPages,
+ psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize,
+ IMG_FALSE, /* !< Choose to invalidate PT entries */
+ psPMR);
+
+e_exit:
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPin(PMR *psPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Start the pinning */
+ eError = PMRPinPMR(psPMR);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eErrorMMU = PVRSRV_OK;
+ IMG_UINT32 uiLog2PageSize = psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize;
+
+ /* Start the pinning */
+ eError = PMRPinPMR(psPMR);
+
+ if (eError == PVRSRV_OK)
+ {
+ /* Make mapping valid again */
+ eErrorMMU = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ psDevmemMapping->psReservation->sBase,
+ psDevmemMapping->uiNumPages,
+ uiLog2PageSize,
+ IMG_TRUE, /* !< Choose to make PT entries valid again */
+ psPMR);
+ }
+ else if (eError == PVRSRV_ERROR_PMR_NEW_MEMORY)
+ {
+ /* If we lost the physical baking we have to map it again because
+ * the old physical addresses are not valid anymore. */
+ IMG_UINT32 uiFlags;
+ uiFlags = PMR_Flags(psPMR);
+
+ eErrorMMU = MMU_MapPages(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiFlags,
+ psDevmemMapping->psReservation->sBase,
+ psPMR,
+ 0,
+ psDevmemMapping->uiNumPages,
+ NULL,
+ uiLog2PageSize);
+ }
+
+ /* Just overwrite eError if the mappings failed.
+ * PMR_NEW_MEMORY has to be propagated to the user. */
+ if (eErrorMMU != PVRSRV_OK)
+ {
+ eError = eErrorMMU;
+ }
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemServerGetImportHandle
+@Description For given exportable memory descriptor returns PMR handle.
+@Return Memory is exportable - Success
+ PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *phImport)
+{
+ PVRSRV_ERROR eError;
+
+ if ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+ goto e0;
+ }
+
+ *phImport = psMemDesc->psImport->hPMR;
+ return PVRSRV_OK;
+
+e0:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemServerGetHeapHandle
+@Description For given reservation returns the Heap handle.
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+ IMG_HANDLE *phHeap)
+{
+ *phHeap = psReservation->psDevmemHeap;
+ return PVRSRV_OK;
+}
+
+
+
+/*************************************************************************/ /*!
+@Function DevmemIntCtxCreate
+@Description Creates and initialises a device memory context.
+@Return valid Device Memory context handle - Success
+ PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxCreate(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bKernelMemoryCtx,
+ DEVMEMINT_CTX **ppsDevmemCtxPtr,
+ IMG_HANDLE *hPrivData,
+ IMG_UINT32 *pui32CPUCacheLineSize
+ )
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemCtx;
+ IMG_HANDLE hPrivDataInt = NULL;
+ MMU_DEVICEATTRIBS *psMMUDevAttrs;
+
+ if((psDeviceNode->pfnCheckDeviceFeature) && \
+ psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_MIPS_BIT_MASK))
+ {
+ psMMUDevAttrs = bKernelMemoryCtx ? psDeviceNode->psFirmwareMMUDevAttrs:
+ psDeviceNode->psMMUDevAttrs;
+ }else
+ {
+ psMMUDevAttrs = psDeviceNode->psMMUDevAttrs;
+ PVR_UNREFERENCED_PARAMETER(bKernelMemoryCtx);
+ }
+
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s", __FUNCTION__));
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* allocate a Devmem context */
+ psDevmemCtx = OSAllocMem(sizeof *psDevmemCtx);
+ if (psDevmemCtx == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Alloc failed", __FUNCTION__));
+ goto fail_alloc;
+ }
+
+ OSAtomicWrite(&psDevmemCtx->hRefCount, 1);
+ psDevmemCtx->psDevNode = psDeviceNode;
+
+ /* Call down to MMU context creation */
+
+ eError = MMU_ContextCreate(psDeviceNode,
+ &psDevmemCtx->psMMUContext,
+ psMMUDevAttrs);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: MMU_ContextCreate failed", __FUNCTION__));
+ goto fail_mmucontext;
+ }
+
+
+ if (psDeviceNode->pfnRegisterMemoryContext)
+ {
+ eError = psDeviceNode->pfnRegisterMemoryContext(psDeviceNode, psDevmemCtx->psMMUContext, &hPrivDataInt);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register MMU context", __FUNCTION__));
+ goto fail_register;
+ }
+ }
+
+ /* Store the private data as it is required to unregister the memory context */
+ psDevmemCtx->hPrivData = hPrivDataInt;
+ *hPrivData = hPrivDataInt;
+ *ppsDevmemCtxPtr = psDevmemCtx;
+
+ /* Pass the CPU cache line size through the bridge to the user mode as it can't be queried in user mode.*/
+ *pui32CPUCacheLineSize = OSCPUCacheAttributeSize(PVR_DCACHE_LINE_SIZE);
+
+ /* Initialise the PID notify list */
+ dllist_init(&(psDevmemCtx->sProcessNotifyListHead));
+ psDevmemCtx->sPageFaultNotifyListElem.psNextNode = NULL;
+ psDevmemCtx->sPageFaultNotifyListElem.psPrevNode = NULL;
+
+ return PVRSRV_OK;
+
+fail_register:
+ MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+fail_mmucontext:
+ OSFreeMem(psDevmemCtx);
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemIntHeapCreate
+@Description Creates and initialises a device memory heap.
+@Return valid Device Memory heap handle - Success
+ PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntHeapCreate(
+ DEVMEMINT_CTX *psDevmemCtx,
+ IMG_DEV_VIRTADDR sHeapBaseAddr,
+ IMG_DEVMEM_SIZE_T uiHeapLength,
+ IMG_UINT32 uiLog2DataPageSize,
+ DEVMEMINT_HEAP **ppsDevmemHeapPtr
+ )
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP *psDevmemHeap;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: DevmemIntHeap_Create", __FUNCTION__));
+
+ /* allocate a Devmem context */
+ psDevmemHeap = OSAllocMem(sizeof *psDevmemHeap);
+ if (psDevmemHeap == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Alloc failed", __FUNCTION__));
+ goto fail_alloc;
+ }
+
+ psDevmemHeap->psDevmemCtx = psDevmemCtx;
+
+ _DevmemIntCtxAcquire(psDevmemHeap->psDevmemCtx);
+
+ OSAtomicWrite(&psDevmemHeap->hRefCount, 1);
+
+ psDevmemHeap->uiLog2PageSize = uiLog2DataPageSize;
+
+ *ppsDevmemHeapPtr = psDevmemHeap;
+
+ return PVRSRV_OK;
+
+fail_alloc:
+ return eError;
+}
+
+static PVRSRV_ERROR DevmemIntAllocDummyPage(DEVMEMINT_HEAP *psDevmemHeap)
+{
+
+ IMG_UINT32 ui32Dummyref = 0;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ psDevNode = psDevmemHeap->psDevmemCtx->psDevNode;
+
+ /* We know there will not be 4G number of sparse PMR's */
+ /* Also this function depends on the fact that its called under the global lock &
+ * pmr lock and thus is safe from being re-entrant */
+ ui32Dummyref = OSAtomicIncrement(&psDevNode->sDummyPage.atRefCounter);
+ if(1 == ui32Dummyref)
+ {
+ IMG_UINT8 u8Value = 0;
+ IMG_BOOL bInitPage = IMG_FALSE;
+ IMG_DEV_PHYADDR sDevPhysAddr={0};
+
+ /*Acquire the lock */
+ OSLockAcquire(psDevNode->sDummyPage.psDummyPgLock);
+
+#if defined(PVR_DUMMY_PAGE_INIT_VALUE)
+ u8Value = PVR_DUMMY_PAGE_INIT_VALUE;
+ bInitPage = IMG_TRUE;
+#else
+ bInitPage = IMG_FALSE;
+#endif
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Alloc Dummy page object");
+#endif
+ /*Allocate the dummy page required for sparse backing */
+ eError = DevPhysMemAlloc(psDevNode,
+ (1 << psDevNode->sDummyPage.ui32Log2DummyPgSize),
+ u8Value,
+ bInitPage,
+#if defined(PDUMP)
+ psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+ DUMMY_PAGE,
+ &psDevNode->sDummyPage.hPdumpDummyPg,
+#endif
+ &psDevNode->sDummyPage.sDummyPageHandle,
+ &sDevPhysAddr);
+ if(PVRSRV_OK != eError)
+ {
+ OSAtomicDecrement(&psDevNode->sDummyPage.atRefCounter);
+ }
+ else
+ {
+ psDevNode->sDummyPage.ui64DummyPgPhysAddr = sDevPhysAddr.uiAddr;
+ }
+
+ /*Release the lock */
+ OSLockRelease(psDevNode->sDummyPage.psDummyPgLock);
+ }
+ return eError;
+}
+
+static void DevmemIntFreeDummyPage(DEVMEMINT_HEAP *psDevmemHeap)
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ IMG_UINT32 ui32Dummyref = 0;
+
+ psDevNode = psDevmemHeap->psDevmemCtx->psDevNode;
+ ui32Dummyref = OSAtomicRead(&psDevNode->sDummyPage.atRefCounter);
+
+ /* For the cases where the dummy page allocation fails due to lack of memory
+ * The refcount can still be 0 even for a sparse allocation */
+ if(0 != ui32Dummyref)
+ {
+ OSLockAcquire(psDevNode->sDummyPage.psDummyPgLock);
+
+ /* We know there will not be 4G number of sparse PMR's */
+ ui32Dummyref = OSAtomicDecrement(&psDevNode->sDummyPage.atRefCounter);
+
+ if(0 == ui32Dummyref)
+ {
+ PDUMPCOMMENT("Free Dummy page object");
+
+ /* Free the dummy page when refcount reaches zero */
+ DevPhysMemFree(psDevNode,
+#if defined(PDUMP)
+ psDevNode->sDummyPage.hPdumpDummyPg,
+#endif
+ &psDevNode->sDummyPage.sDummyPageHandle);
+
+#if defined(PDUMP)
+ psDevNode->sDummyPage.hPdumpDummyPg = NULL;
+#endif
+ psDevNode->sDummyPage.ui64DummyPgPhysAddr = MMU_BAD_PHYS_ADDR;
+ }
+
+ OSLockRelease(psDevNode->sDummyPage.psDummyPgLock);
+ }
+
+}
+
+PVRSRV_ERROR
+DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysicalPgOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+ PVRSRV_ERROR eError;
+
+ if (psReservation->psDevmemHeap->uiLog2PageSize > PMR_GetLog2Contiguity(psPMR))
+ {
+ PVR_DPF ((PVR_DBG_ERROR,
+ "%s: Device heap and PMR have incompatible Log2Contiguity (%u - %u). "
+ "PMR contiguity must be a multiple of the heap contiguity!",
+ __func__,
+ psReservation->psDevmemHeap->uiLog2PageSize,
+ PMR_GetLog2Contiguity(psPMR) ));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiFlags,
+ sDevVAddrBase,
+ psPMR,
+ ui32PhysicalPgOffset,
+ ui32PageCount,
+ NULL,
+ psReservation->psDevmemHeap->uiLog2PageSize);
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT32 ui32PageCount)
+{
+ /*Unmap the pages and mark them invalid in the MMU PTE */
+ MMU_UnmapPages (psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ 0,
+ sDevVAddrBase,
+ ui32PageCount,
+ NULL,
+ psReservation->psDevmemHeap->uiLog2PageSize,
+ IMG_FALSE);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+ DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ DEVMEMINT_MAPPING **ppsMappingPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_MAPPING *psMapping;
+ /* number of pages (device pages) that allocation spans */
+ IMG_UINT32 ui32NumDevPages;
+ /* device virtual address of start of allocation */
+ IMG_DEV_VIRTADDR sAllocationDevVAddr;
+ /* and its length */
+ IMG_DEVMEM_SIZE_T uiAllocationSize;
+ IMG_UINT32 uiLog2Contiguity = psDevmemHeap->uiLog2PageSize;
+ IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PMR_FLAGS_T uiPMRFlags;
+
+ if (uiLog2Contiguity > PMR_GetLog2Contiguity(psPMR))
+ {
+ PVR_DPF ((PVR_DBG_ERROR,
+ "%s: Device heap and PMR have incompatible contiguity (%u - %u). "
+ "Heap contiguity must be a multiple of the heap contiguity!",
+ __func__,
+ uiLog2Contiguity,
+ PMR_GetLog2Contiguity(psPMR) ));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ psDevNode = psDevmemHeap->psDevmemCtx->psDevNode;
+
+ /* allocate memory to record the mapping info */
+ psMapping = OSAllocMem(sizeof *psMapping);
+ if (psMapping == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF ((PVR_DBG_ERROR, "DevmemIntMapPMR: Alloc failed"));
+ goto e0;
+ }
+
+ uiAllocationSize = psReservation->uiLength;
+
+
+ ui32NumDevPages = 0xffffffffU & ( ( (uiAllocationSize - 1) >> uiLog2Contiguity) + 1);
+ PVR_ASSERT(ui32NumDevPages << uiLog2Contiguity == uiAllocationSize);
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ sAllocationDevVAddr = psReservation->sBase;
+
+ /*Check if the PMR that need to be mapped is sparse */
+ bIsSparse = PMR_IsSparse(psPMR);
+ if(bIsSparse)
+ {
+ /*Get the flags*/
+ uiPMRFlags = PMR_Flags(psPMR);
+ bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+ if(bNeedBacking)
+ {
+ /*Error is logged with in the function if any failures.
+ * As the allocation fails we need to fail the map request and
+ * return appropriate error
+ *
+ * Allocation of dummy page is done after locking the pages for PMR physically
+ * By implementing this way, the best case path of dummy page being most likely to be
+ * allocated after physically locking down pages, is considered.
+ * If the dummy page allocation fails, we do unlock the physical address and the impact
+ * is a bit more in on demand mode of operation */
+ eError = DevmemIntAllocDummyPage(psDevmemHeap);
+ if(PVRSRV_OK != eError)
+ {
+ goto e3;
+ }
+ }
+
+ /* N.B. We pass mapping permission flags to MMU_MapPages and let
+ it reject the mapping if the permissions on the PMR are not compatible. */
+ eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiMapFlags,
+ sAllocationDevVAddr,
+ psPMR,
+ 0,
+ ui32NumDevPages,
+ NULL,
+ uiLog2Contiguity);
+ if(PVRSRV_OK != eError)
+ {
+ goto e4;
+ }
+ }
+ else
+ {
+ eError = MMU_MapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext,
+ sAllocationDevVAddr,
+ psPMR,
+ ui32NumDevPages << uiLog2Contiguity,
+ uiMapFlags,
+ uiLog2Contiguity);
+ if(PVRSRV_OK != eError)
+ {
+ goto e3;
+ }
+ }
+
+ psMapping->psReservation = psReservation;
+ psMapping->uiNumPages = ui32NumDevPages;
+ psMapping->psPMR = psPMR;
+ /* Don't bother with refcount on reservation, as a reservation
+ only ever holds one mapping, so we directly increment the
+ refcount on the heap instead */
+ _DevmemIntHeapAcquire(psMapping->psReservation->psDevmemHeap);
+
+ *ppsMappingPtr = psMapping;
+
+ return PVRSRV_OK;
+ e4:
+ if(bNeedBacking)
+ {
+ /*if the mapping failed, the allocated dummy ref count need
+ * to be handled accordingly */
+ DevmemIntFreeDummyPage(psDevmemHeap);
+ }
+ e3:
+ {
+ PVRSRV_ERROR eError1=PVRSRV_OK;
+ eError1 = PMRUnlockSysPhysAddresses(psPMR);
+ if(PVRSRV_OK != eError1)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Failed to unlock the physical addresses",__func__));
+ }
+ *ppsMappingPtr = NULL;
+ }
+ e2:
+ OSFreeMem(psMapping);
+
+ e0:
+ PVR_ASSERT (eError != PVRSRV_OK);
+ return eError;
+}
+
+
+PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP *psDevmemHeap = psMapping->psReservation->psDevmemHeap;
+ /* device virtual address of start of allocation */
+ IMG_DEV_VIRTADDR sAllocationDevVAddr;
+ /* number of pages (device pages) that allocation spans */
+ IMG_UINT32 ui32NumDevPages;
+ IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
+ PMR_FLAGS_T uiPMRFlags;
+#if defined(SUPPORT_BUFFER_SYNC)
+ PVRSRV_DEVICE_NODE *psDevNode = psDevmemHeap->psDevmemCtx->psDevNode;
+ bool bInterruptible = true;
+ unsigned long ulTimeout = MAX_SCHEDULE_TIMEOUT;
+ IMG_INT iErr;
+
+retry:
+ iErr = pvr_buffer_sync_wait(psDevNode->psBufferSyncContext,
+ psMapping->psPMR, bInterruptible, ulTimeout);
+ if (iErr)
+ {
+ if (iErr == -ERESTARTSYS)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Buffer sync wait interrupted (retrying)",
+ __FUNCTION__));
+ bInterruptible = false;
+ ulTimeout = 30 * HZ;
+ goto retry;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to unmap PMR from device (errno=%d)",
+ __FUNCTION__, iErr));
+ return PVRSRV_ERROR_STILL_MAPPED;
+ }
+#endif
+
+ ui32NumDevPages = psMapping->uiNumPages;
+ sAllocationDevVAddr = psMapping->psReservation->sBase;
+
+ /*Check if the PMR that need to be mapped is sparse */
+ bIsSparse = PMR_IsSparse(psMapping->psPMR);
+
+ if(bIsSparse)
+ {
+ /*Get the flags*/
+ uiPMRFlags = PMR_Flags(psMapping->psPMR);
+ bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+ if(bNeedBacking)
+ {
+ DevmemIntFreeDummyPage(psDevmemHeap);
+ }
+
+ MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+ 0,
+ sAllocationDevVAddr,
+ ui32NumDevPages,
+ NULL,
+ psMapping->psReservation->psDevmemHeap->uiLog2PageSize,
+ IMG_FALSE);
+ }
+ else
+ {
+ MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext,
+ sAllocationDevVAddr,
+ ui32NumDevPages,
+ psMapping->psReservation->psDevmemHeap->uiLog2PageSize);
+ }
+
+
+
+ eError = PMRUnlockSysPhysAddresses(psMapping->psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Don't bother with refcount on reservation, as a reservation
+ only ever holds one mapping, so we directly decrement the
+ refcount on the heap instead */
+ _DevmemIntHeapRelease(psDevmemHeap);
+
+ OSFreeMem(psMapping);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_DEV_VIRTADDR sAllocationDevVAddr,
+ IMG_DEVMEM_SIZE_T uiAllocationSize,
+ DEVMEMINT_RESERVATION **ppsReservationPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_RESERVATION *psReservation;
+
+ /* allocate memory to record the reservation info */
+ psReservation = OSAllocMem(sizeof *psReservation);
+ if (psReservation == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF ((PVR_DBG_ERROR, "DevmemIntReserveRange: Alloc failed"));
+ goto e0;
+ }
+
+ psReservation->sBase = sAllocationDevVAddr;
+ psReservation->uiLength = uiAllocationSize;
+
+ eError = MMU_Alloc (psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiAllocationSize,
+ &uiAllocationSize,
+ 0, /* IMG_UINT32 uiProtFlags */
+ 0, /* alignment is n/a since we supply devvaddr */
+ &sAllocationDevVAddr,
+ psDevmemHeap->uiLog2PageSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ /* since we supplied the virt addr, MMU_Alloc shouldn't have
+ chosen a new one for us */
+ PVR_ASSERT(sAllocationDevVAddr.uiAddr == psReservation->sBase.uiAddr);
+
+ _DevmemIntHeapAcquire(psDevmemHeap);
+
+ psReservation->psDevmemHeap = psDevmemHeap;
+ *ppsReservationPtr = psReservation;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e1:
+ OSFreeMem(psReservation);
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psReservation)
+{
+ IMG_DEV_VIRTADDR sBase = psReservation->sBase;
+ IMG_UINT32 uiLength = psReservation->uiLength;
+ IMG_UINT32 uiLog2DataPageSize = psReservation->psDevmemHeap->uiLog2PageSize;
+
+ MMU_Free (psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ sBase,
+ uiLength,
+ uiLog2DataPageSize);
+
+ _DevmemIntHeapRelease(psReservation->psDevmemHeap);
+ OSFreeMem(psReservation);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntHeapDestroy(
+ DEVMEMINT_HEAP *psDevmemHeap
+ )
+{
+ if (OSAtomicRead(&psDevmemHeap->hRefCount) != 1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BUG! %s called but has too many references (%d) "
+ "which probably means allocations have been made from the heap and not freed",
+ __FUNCTION__,
+ OSAtomicRead(&psDevmemHeap->hRefCount)));
+
+ /*
+ * Try again later when you've freed all the memory
+ *
+ * Note:
+ * We don't expect the application to retry (after all this call would
+ * succeed if the client had freed all the memory which it should have
+ * done before calling this function). However, given there should be
+ * an associated handle, when the handle base is destroyed it will free
+ * any allocations leaked by the client and then it will retry this call,
+ * which should then succeed.
+ */
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ PVR_ASSERT(OSAtomicRead(&psDevmemHeap->hRefCount) == 1);
+
+ _DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __FUNCTION__, psDevmemHeap));
+ OSFreeMem(psDevmemHeap);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
+ PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ SPARSE_MEM_RESIZE_FLAGS uiSparseFlags,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT64 sCpuVAddrBase)
+{
+ PVRSRV_ERROR eError;
+
+ IMG_UINT32 uiLog2PageSize = PMR_GetLog2Contiguity(psPMR);
+
+ /*
+ * The order of steps in which this request is done is given below. The order of
+ * operations is very important in this case:
+ *
+ * 1. The parameters are validated in function PMR_ChangeSparseMem below.
+ * A successful response indicates all the parameters are correct.
+ * In failure case we bail out from here with out processing further.
+ * 2. On success, get the PMR specific operations done. this includes page alloc, page free
+ * and the corresponding PMR status changes.
+ * when this call fails, it is ensured that the state of the PMR before is
+ * not disturbed. If it succeeds, then we can go ahead with the subsequent steps.
+ * 3. Invalidate the GPU page table entries for the pages to be freed.
+ * 4. Write the GPU page table entries for the pages that got allocated.
+ * 5. Change the corresponding CPU space map.
+ *
+ * The above steps can be selectively controlled using flags.
+ */
+
+ {
+ if (uiSparseFlags & (SPARSE_REMAP_MEM | SPARSE_RESIZE_BOTH))
+ {
+ /* Do the PMR specific changes first */
+ eError = PMR_ChangeSparseMem(psPMR,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ uiSparseFlags);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Failed to do PMR specific changes.",
+ __func__));
+ goto e0;
+ }
+
+ /* Invalidate the page table entries for the free pages.
+ * Optimisation later would be not to touch the ones that gets re-mapped */
+ if ((0 != ui32FreePageCount) && (uiSparseFlags & SPARSE_RESIZE_FREE))
+ {
+ PMR_FLAGS_T uiPMRFlags;
+ IMG_BOOL bNeedBacking = IMG_FALSE;
+
+ /*Get the flags*/
+ uiPMRFlags = PMR_Flags(psPMR);
+ bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+ if (SPARSE_REMAP_MEM != (uiSparseFlags & SPARSE_REMAP_MEM))
+ {
+ /* Unmap the pages and mark them invalid in the MMU PTE */
+ MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiFlags,
+ sDevVAddrBase,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ uiLog2PageSize,
+ bNeedBacking);
+ }
+ }
+
+ /* Wire the pages tables that got allocated */
+ if ((0 != ui32AllocPageCount) && (uiSparseFlags & SPARSE_RESIZE_ALLOC))
+ {
+ /* Map the pages and mark them Valid in the MMU PTE */
+ eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiFlags,
+ sDevVAddrBase,
+ psPMR,
+ 0,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ uiLog2PageSize);
+
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Failed to map alloc indices.",
+ __func__));
+ goto e0;
+ }
+ }
+
+ /* Currently only used for debug */
+ if (SPARSE_REMAP_MEM == (uiSparseFlags & SPARSE_REMAP_MEM))
+ {
+ eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiFlags,
+ sDevVAddrBase,
+ psPMR,
+ 0,
+ ui32AllocPageCount,
+ pai32FreeIndices,
+ uiLog2PageSize);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Failed to map Free indices.",
+ __func__));
+ goto e0;
+ }
+ }
+ }
+
+ }
+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+ /* Do the changes in sparse on to the CPU virtual map accordingly */
+ if (uiSparseFlags & SPARSE_MAP_CPU_ADDR)
+ {
+ if (sCpuVAddrBase != 0)
+ {
+ eError = PMR_ChangeSparseMemCPUMap(psPMR,
+ sCpuVAddrBase,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Failed to map to CPU addr space.",
+ __func__));
+ goto e0;
+ }
+ }
+ }
+#endif
+
+ return PVRSRV_OK;
+
+e0:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemIntCtxDestroy
+@Description Destroy that created by DevmemIntCtxCreate
+@Input psDevmemCtx Device Memory context
+@Return cannot fail.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxDestroy(
+ DEVMEMINT_CTX *psDevmemCtx
+ )
+{
+ /*
+ We can't determine if we should be freeing the context here
+ as it refcount!=1 could be due to either the fact that heap(s)
+ remain with allocations on them, or that this memory context
+ has been exported.
+ As the client couldn’t do anything useful with this information
+ anyway and the fact that the refcount will ensure we only
+ free the context when _all_ references have been released
+ don't bother checking and just return OK regardless.
+ */
+ _DevmemIntCtxRelease(psDevmemCtx);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_DEV_VIRTADDR sDevAddr)
+{
+ IMG_UINT32 i, j, uiLog2HeapPageSize = 0;
+ DEVICE_MEMORY_INFO *psDinfo = &psDevNode->sDevMemoryInfo;
+ DEVMEM_HEAP_CONFIG *psConfig = psDinfo->psDeviceMemoryHeapConfigArray;
+
+ IMG_BOOL bFound = IMG_FALSE;
+
+ for (i = 0;
+ i < psDinfo->uiNumHeapConfigs && !bFound;
+ i++)
+ {
+ for (j = 0;
+ j < psConfig[i].uiNumHeaps && !bFound;
+ j++)
+ {
+ IMG_DEV_VIRTADDR uiBase =
+ psConfig[i].psHeapBlueprintArray[j].sHeapBaseAddr;
+ IMG_DEVMEM_SIZE_T uiSize =
+ psConfig[i].psHeapBlueprintArray[j].uiHeapLength;
+
+ if ( (sDevAddr.uiAddr >= uiBase.uiAddr) &&
+ (sDevAddr.uiAddr < (uiBase.uiAddr + uiSize)))
+ {
+ uiLog2HeapPageSize =
+ psConfig[i].psHeapBlueprintArray[j].uiLog2DataPageSize;
+ bFound = IMG_TRUE;
+ }
+ }
+ }
+
+ if (uiLog2HeapPageSize == 0)
+ {
+ return PVRSRV_ERROR_INVALID_GPU_ADDR;
+ }
+
+ return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext,
+ uiLog2HeapPageSize,
+ sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR;
+}
+
+
+static void _DevmemIntExportCtxGetList(PDLLIST_NODE *ppsListHead)
+{
+ static DECLARE_DLLIST(sListHead);
+
+ *ppsListHead = &sListHead;
+}
+
+PVRSRV_ERROR
+DevmemIntExportCtx(DEVMEMINT_CTX *psContext,
+ PMR *psPMR,
+ DEVMEMINT_CTX_EXPORT **ppsContextExport)
+{
+ PDLLIST_NODE psListHead;
+ DEVMEMINT_CTX_EXPORT *psCtxExport;
+
+ _DevmemIntCtxAcquire(psContext);
+ PMRRefPMR(psPMR);
+
+ _DevmemIntExportCtxGetList(&psListHead);
+
+ psCtxExport = OSAllocMem(sizeof(DEVMEMINT_CTX_EXPORT));
+ if (psCtxExport == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to export context. System currently out of memory",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psCtxExport->psDevmemCtx = psContext;
+ psCtxExport->psPMR = psPMR;
+ dllist_add_to_tail(psListHead, &psCtxExport->sNode);
+
+ *ppsContextExport = psCtxExport;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport)
+{
+ PDLLIST_NODE psListHead;
+
+ _DevmemIntExportCtxGetList(&psListHead);
+
+ PMRUnrefPMR(psContextExport->psPMR);
+ _DevmemIntCtxRelease(psContextExport->psDevmemCtx);
+ dllist_remove_node(&psContextExport->sNode);
+ OSFreeMem(psContextExport);
+
+ /* Unable to find exported context, return error */
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntAcquireRemoteCtx(PMR *psPMR,
+ DEVMEMINT_CTX **ppsContext,
+ IMG_HANDLE *phPrivData)
+{
+
+ PDLLIST_NODE psListHead;
+ PDLLIST_NODE psListNode, psListNodeNext;
+ DEVMEMINT_CTX_EXPORT *psCtxExport;
+
+ _DevmemIntExportCtxGetList(&psListHead);
+
+ /* Find context from list using PMR as key */
+ dllist_foreach_node(psListHead, psListNode, psListNodeNext)
+ {
+ psCtxExport = IMG_CONTAINER_OF(psListNode, DEVMEMINT_CTX_EXPORT, sNode);
+ if (psCtxExport->psPMR == psPMR)
+ {
+ _DevmemIntCtxAcquire(psCtxExport->psDevmemCtx);
+ *ppsContext = psCtxExport->psDevmemCtx;
+ *phPrivData = psCtxExport->psDevmemCtx->hPrivData;
+ return PVRSRV_OK;
+ }
+ }
+
+ /* Unable to find exported context, return error */
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire remote context. Could not retrieve context with given PMR",
+ __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemIntRegisterPFNotify
+@Description Registers a PID to be notified when a page fault occurs on a
+ specific device memory context.
+@Input psDevmemCtx The context to be notified about.
+@Input ui32PID The PID of the process that would like to be
+ notified.
+@Input bRegister If true, register. If false, de-register.
+@Return PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_INT32 ui32PID,
+ IMG_BOOL bRegister)
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ DLLIST_NODE *psNode, *psNodeNext;
+ DEVMEMINT_PF_NOTIFY *psNotifyNode;
+ IMG_BOOL bPresent = IMG_FALSE;
+
+ if (psDevmemCtx == NULL)
+ {
+ PVR_ASSERT(!"Devmem Context Missing");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevNode = psDevmemCtx->psDevNode;
+
+ if (bRegister)
+ {
+ /* If this is the first PID in the list, the device memory context
+ * needs to be registered for notification */
+ if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead))
+ {
+ dllist_add_to_tail(&psDevNode->sMemoryContextPageFaultNotifyListHead,
+ &psDevmemCtx->sPageFaultNotifyListElem);
+ }
+ }
+
+ /* Loop through the registered PIDs and check whether this one is
+ * present */
+ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+ {
+ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+
+ if (psNotifyNode->ui32PID == ui32PID)
+ {
+ bPresent = IMG_TRUE;
+ break;
+ }
+ }
+
+ if (bRegister == IMG_TRUE)
+ {
+ if (bPresent)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Trying to register a PID that is already registered",
+ __func__));
+ return PVRSRV_ERROR_PID_ALREADY_REGISTERED;
+ }
+
+ psNotifyNode = OSAllocMem(sizeof(*psNotifyNode));
+ if (psNotifyNode == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unable to allocate memory for the notify list",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ psNotifyNode->ui32PID = ui32PID;
+ dllist_add_to_tail(&(psDevmemCtx->sProcessNotifyListHead), &(psNotifyNode->sProcessNotifyListElem));
+ }
+ else
+ {
+ if (!bPresent)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Trying to unregister a PID that is not registered",
+ __func__));
+ return PVRSRV_ERROR_PID_NOT_REGISTERED;
+ }
+ dllist_remove_node(psNode);
+ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+ OSFreeMem(psNotifyNode);
+ }
+
+ if (!bRegister)
+ {
+ /* If the last process in the list is being unregistered, then also
+ * unregister the device memory context from the notify list. */
+ if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead))
+ {
+ dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem);
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemIntPFNotify
+@Description Notifies any processes that have registered themselves to be
+ notified when a page fault happens on a specific device memory
+ context.
+@Input *psDevNode The device node.
+@Input ui64FaultedPCAddress The page catalogue address that faulted.
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT64 ui64FaultedPCAddress)
+{
+ DLLIST_NODE *psNode, *psNodeNext;
+ DEVMEMINT_PF_NOTIFY *psNotifyNode;
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemCtx = NULL;
+ IMG_BOOL bFailed = IMG_FALSE;
+
+ if (dllist_is_empty(&(psDevNode->sMemoryContextPageFaultNotifyListHead)))
+ {
+ return PVRSRV_OK;
+ }
+
+ dllist_foreach_node(&(psDevNode->sMemoryContextPageFaultNotifyListHead), psNode, psNodeNext)
+ {
+ DEVMEMINT_CTX *psThisContext =
+ IMG_CONTAINER_OF(psNode, DEVMEMINT_CTX, sPageFaultNotifyListElem);
+ IMG_DEV_PHYADDR sPCDevPAddr;
+
+ eError = MMU_AcquireBaseAddr(psThisContext->psMMUContext, &sPCDevPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to Acquire Base Address (%s)",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ if (sPCDevPAddr.uiAddr == ui64FaultedPCAddress)
+ {
+ psDevmemCtx = psThisContext;
+ break;
+ }
+ }
+
+ if (psDevmemCtx == NULL)
+ {
+ /* Not found, just return */
+ return PVRSRV_OK;
+ }
+
+ /* Loop through each registered PID and send a signal to the process */
+ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+ {
+ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+
+ eError = OSDebugSignalPID(psNotifyNode->ui32PID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unable to signal process for PID: %u",
+ __func__,
+ psNotifyNode->ui32PID));
+
+ PVR_ASSERT(!"Unable to signal process");
+
+ bFailed = IMG_TRUE;
+ }
+ }
+
+ if (bFailed)
+ {
+ return PVRSRV_ERROR_SIGNAL_FAILED;
+ }
+
+ return PVRSRV_OK;
+}
+
+#if defined (PDUMP)
+IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext)
+{
+ IMG_UINT32 ui32MMUContextID;
+ MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32MMUContextID);
+ return ui32MMUContextID;
+}
+
+PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_DEV_VIRTADDR sDevAddrStart,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiPDumpMMUCtx;
+
+
+ PVR_UNREFERENCED_PARAMETER(ui32ArraySize);
+
+ eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext,
+ &uiPDumpMMUCtx);
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /*
+ The following SYSMEM refers to the 'MMU Context', hence it
+ should be the MMU context, not the PMR, that says what the PDump
+ MemSpace tag is?
+ From a PDump P.O.V. it doesn't matter which name space we use as long
+ as that MemSpace is used on the 'MMU Context' we're dumping from
+ */
+ eError = PDumpMMUSAB(psDevmemCtx->psDevNode->sDevId.pszPDumpDevName,
+ uiPDumpMMUCtx,
+ sDevAddrStart,
+ uiSize,
+ pszFilename,
+ ui32FileOffset,
+ ui32PDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext);
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ IMG_UINT32 ui32ContextID;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemIntPDumpBitmap: Failed to acquire MMU context"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID;
+ }
+
+ eError = PDumpBitmapKM(psDeviceNode,
+ pszFileName,
+ ui32FileOffset,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ sDevBaseAddr,
+ ui32ContextID,
+ ui32Size,
+ ePixelFormat,
+ ui32AddrMode,
+ ui32PDumpFlags);
+
+ /* Don't care about return value */
+ MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext);
+
+ return eError;
+}
+
+
+#endif
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header file for server side component of device memory management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICEMEM_SERVER_H__
+#define __DEVICEMEM_SERVER_H__
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "connection_server.h"
+
+#include "pmr.h"
+
+
+typedef struct _DEVMEMINT_CTX_ DEVMEMINT_CTX;
+typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT;
+typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP;
+
+typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION;
+typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING;
+typedef struct _DEVMEMINT_PF_NOTIFY_ DEVMEMINT_PF_NOTIFY;
+
+
+/**************************************************************************/ /*!
+@Function DevmemIntUnpin
+@Description This is the counterpart to DevmemPin(). It is meant to be
+ called when the allocation is NOT mapped in the device virtual
+ space.
+
+@Input psPMR The physical memory to unpin.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
+ registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntUnpin(PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function DevmemIntUnpinInvalidate
+@Description This is the counterpart to DevmemIntPinValidate(). It is meant to be
+ called for allocations that ARE mapped in the device virtual space
+ and we have to invalidate the mapping.
+
+@Input psPMR The physical memory to unpin.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
+ registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function DevmemIntPin
+@Description This is the counterpart to DevmemIntUnpin().
+ Is meant to be called if there is NO device mapping present.
+
+@Input psPMR The physical memory to pin.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
+ was successfully restored.
+
+ PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+ could not be restored and new physical memory
+ was allocated.
+
+ A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntPin(PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function DevmemIntPinValidate
+@Description This is the counterpart to DevmemIntUnpinInvalidate().
+ Is meant to be called if there is IS a device mapping present
+ that needs to be taken care of.
+
+@Input psDevmemMapping The mapping structure used for the passed PMR.
+
+@Input psPMR The physical memory to pin.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
+ was successfully restored.
+
+ PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+ could not be restored and new physical memory
+ was allocated.
+
+ A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
+/*
+ * DevmemServerGetImportHandle()
+ *
+ * For given exportable memory descriptor returns PMR handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *phImport);
+
+/*
+ * DevmemServerGetHeapHandle()
+ *
+ * For given reservation returns the Heap handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+ IMG_HANDLE *phHeap);
+
+/*
+ * DevmemIntCtxCreate()
+ *
+ * Create a Server-side Device Memory Context. This is usually the
+ * counterpart of the client side memory context, and indeed is
+ * usually created at the same time.
+ *
+ * You must have one of these before creating any heaps.
+ *
+ * All heaps must have been destroyed before calling
+ * DevmemIntCtxDestroy()
+ *
+ * If you call DevmemIntCtxCreate() (and it succeeds) you are promising
+ * to later call DevmemIntCtxDestroy()
+ *
+ * Note that this call will cause the device MMU code to do some work
+ * for creating the device memory context, but it does not guarantee
+ * that a page catalogue will have been created, as this may be
+ * deferred until first allocation.
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_CTX object
+ * that will be created by this call.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxCreate(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ /* devnode / perproc etc */
+ IMG_BOOL bKernelMemoryCtx,
+ DEVMEMINT_CTX **ppsDevmemCtxPtr,
+ IMG_HANDLE *hPrivData,
+ IMG_UINT32 *pui32CPUCacheLineSize);
+/*
+ * DevmemIntCtxDestroy()
+ *
+ * Undoes a prior DevmemIntCtxCreate or DevmemIntCtxImport.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxDestroy(
+ DEVMEMINT_CTX *psDevmemCtx
+ );
+
+/*
+ * DevmemIntHeapCreate()
+ *
+ * Creates a new heap in this device memory context. This will cause
+ * a call into the MMU code to allocate various data structures for
+ * managing this heap. It will not necessarily cause any page tables
+ * to be set up, as this can be deferred until first allocation.
+ * (i.e. we shouldn't care - it's up to the MMU code)
+ *
+ * Note that the data page size must be specified (as log 2). The
+ * data page size as specified here will be communicated to the mmu
+ * module, and thus may determine the page size configured in page
+ * directory entries for subsequent allocations from this heap. It is
+ * essential that the page size here is less than or equal to the
+ * "minimum contiguity guarantee" of any PMR that you subsequently
+ * attempt to map to this heap.
+ *
+ * If you call DevmemIntHeapCreate() (and the call succeeds) you are
+ * promising that you shall subsequently call DevmemIntHeapDestroy()
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_HEAP object
+ * that will be created by this call.
+ */
+extern PVRSRV_ERROR
+DevmemIntHeapCreate(
+ DEVMEMINT_CTX *psDevmemCtx,
+ IMG_DEV_VIRTADDR sHeapBaseAddr,
+ IMG_DEVMEM_SIZE_T uiHeapLength,
+ IMG_UINT32 uiLog2DataPageSize,
+ DEVMEMINT_HEAP **ppsDevmemHeapPtr
+ );
+/*
+ * DevmemIntHeapDestroy()
+ *
+ * Destroys a heap previously created with DevmemIntHeapCreate()
+ *
+ * All allocations from his heap must have been freed before this
+ * call.
+ */
+extern PVRSRV_ERROR
+DevmemIntHeapDestroy(
+ DEVMEMINT_HEAP *psDevmemHeap
+ );
+
+/*
+ * DevmemIntMapPMR()
+ *
+ * Maps the given PMR to the virtual range previously allocated with
+ * DevmemIntReserveRange()
+ *
+ * If appropriate, the PMR must have had its physical backing
+ * committed, as this call will call into the MMU code to set up the
+ * page tables for this allocation, which shall in turn request the
+ * physical addresses from the PMR. Alternatively, the PMR
+ * implementation can choose to do so off the back of the "lock"
+ * callback, which it will receive as a result (indirectly) of this
+ * call.
+ *
+ * This function makes no promise w.r.t. the circumstances that it can
+ * be called, and these would be "inherited" from the implementation
+ * of the PMR. For example if the PMR "lock" callback causes pages to
+ * be pinned at that time (which may cause scheduling or disk I/O
+ * etc.) then it would not be legal to "Map" the PMR in a context
+ * where scheduling events are disallowed.
+ *
+ * If you call DevmemIntMapPMR() (and the call succeeds) then you are
+ * promising that you shall later call DevmemIntUnmapPMR()
+ */
+extern PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+ DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ DEVMEMINT_MAPPING **ppsMappingPtr);
+/*
+ * DevmemIntUnmapPMR()
+ *
+ * Reverses the mapping caused by DevmemIntMapPMR()
+ */
+extern PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping);
+
+/* DevmemIntMapPages()
+ *
+ * Maps an arbitrary amount of pages from a PMR to a reserved range
+ *
+ * @input psReservation Reservation handle for the range
+ * @input psPMR PMR that is mapped
+ * @input ui32PageCount Number of consecutive pages that are mapped
+ * @input uiPhysicalOffset Logical offset in the PMR
+ * @input uiFlags Mapping flags
+ * @input sDevVAddrBase Virtual address base to start the mapping from
+ */
+extern PVRSRV_ERROR
+DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysicalPgOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase);
+
+/* DevmemIntUnmapPages()
+ *
+ * Unmaps an arbitrary amount of pages from a reserved range
+ *
+ * @input psReservation Reservation handle for the range
+ * @input sDevVAddrBase Virtual address base to start from
+ * @input ui32PageCount Number of consecutive pages that are unmapped
+ */
+extern PVRSRV_ERROR
+DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT32 ui32PageCount);
+
+/*
+ * DevmemIntReserveRange()
+ *
+ * Indicates that the specified range should be reserved from the
+ * given heap.
+ *
+ * In turn causes the page tables to be allocated to cover the
+ * specified range.
+ *
+ * If you call DevmemIntReserveRange() (and the call succeeds) then you
+ * are promising that you shall later call DevmemIntUnreserveRange()
+ */
+extern PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_DEV_VIRTADDR sAllocationDevVAddr,
+ IMG_DEVMEM_SIZE_T uiAllocationSize,
+ DEVMEMINT_RESERVATION **ppsReservationPtr);
+/*
+ * DevmemIntUnreserveRange()
+ *
+ * Undoes the state change caused by DevmemIntReserveRage()
+ */
+extern PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation);
+
+/*************************************************************************/ /*!
+@Function DevmemIntChangeSparse
+@Description Changes the sparse allocations of a PMR by allocating and freeing
+ pages and changing their corresponding CPU and GPU mappings.
+
+@input psDevmemHeap Pointer to the heap we map on
+@input psPMR The PMR we want to map
+@input ui32AllocPageCount Number of pages to allocate
+@input pai32AllocIndices The logical PMR indices where pages will
+ be allocated. May be NULL.
+@input ui32FreePageCount Number of pages to free
+@input pai32FreeIndices The logical PMR indices where pages will
+ be freed. May be NULL.
+@input uiSparseFlags Flags passed in to determine which kind
+ of sparse change the user wanted.
+ See devicemem_typedefs.h for details.
+@input uiFlags The memalloc flags for this virtual range.
+@input sDevVAddrBase The base address of the virtual range of
+ this sparse allocation.
+@input sCpuVAddrBase The CPU base address of this allocation.
+ May be 0 if not existing.
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
+ PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ SPARSE_MEM_RESIZE_FLAGS uiSparseFlags,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT64 sCpuVAddrBase);
+
+extern PVRSRV_ERROR
+DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_DEV_VIRTADDR sDevAddr);
+
+/*************************************************************************/ /*!
+@Function DevmemIntRegisterPFNotify
+@Description Registers a PID to be notified when a page fault occurs on a
+ specific device memory context.
+@Input psDevmemCtx The context to be notified about.
+@Input ui32PID The PID of the process that would like to be
+ notified.
+@Input bRegister If true, register. If false, de-register.
+@Return PVRSRV_ERROR.
+*/ /**************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_INT32 ui32PID,
+ IMG_BOOL bRegister);
+
+/*************************************************************************/ /*!
+@Function DevmemIntPFNotify
+@Description Notifies any processes that have registered themselves to be
+ notified when a page fault happens on a specific device memory
+ context.
+@Input *psDevNode The device node.
+@Input ui64FaultedPCAddress The page catalogue address that faulted.
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT64 ui64FaultedPCAddress);
+
+#if defined(PDUMP)
+/*
+ * DevmemIntPDumpSaveToFileVirtual()
+ *
+ * Writes out PDump "SAB" commands with the data found in memory at
+ * the given virtual address.
+ */
+/* FIXME: uiArraySize shouldn't be here, and is an
+ artefact of the bridging */
+extern PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_DEV_VIRTADDR sDevAddrStart,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags);
+
+extern IMG_UINT32
+DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext);
+
+extern PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_DEV_VIRTADDR sDevAddrStart,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevmemCtx);
+ PVR_UNREFERENCED_PARAMETER(sDevAddrStart);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiArraySize);
+ PVR_UNREFERENCED_PARAMETER(pszFilename);
+ PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Width);
+ PVR_UNREFERENCED_PARAMETER(ui32Height);
+ PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes);
+ PVR_UNREFERENCED_PARAMETER(sDevBaseAddr);
+ PVR_UNREFERENCED_PARAMETER(psDevMemContext);
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(ePixelFormat);
+ PVR_UNREFERENCED_PARAMETER(ui32AddrMode);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+PVRSRV_ERROR
+DevmemIntExportCtx(DEVMEMINT_CTX *psContext,
+ PMR *psPMR,
+ DEVMEMINT_CTX_EXPORT **ppsContextExport);
+
+PVRSRV_ERROR
+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport);
+
+PVRSRV_ERROR
+DevmemIntAcquireRemoteCtx(PMR *psPMR,
+ DEVMEMINT_CTX **ppsContext,
+ IMG_HANDLE *phPrivData);
+
+#endif /* ifndef __DEVICEMEM_SERVER_H__ */
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header file utilities that are specific to device memory functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvrsrv_memallocflags.h"
+#include "pvrsrv.h"
+
+static INLINE IMG_UINT32 DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+ IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags);
+ IMG_UINT32 ui32Ret;
+
+ PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags));
+
+ switch (ui32CPUCacheMode)
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT:
+
+ /*
+ * If system has no coherency but coherency has been requested for CPU
+ * and GPU we currently have to fall back to uncached.
+ *
+ * Usually the first case here should return an error but as long as a lot
+ * of services allocations using both CPU/GPU coherency flags and rely on
+ * the UNCACHED fallback we have to leave it here.
+ */
+ if ( (PVRSRV_GPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) &&
+ !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) )
+ {
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+ }
+ else
+ {
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+ }
+
+ break;
+ break;
+
+ default:
+ PVR_LOG(("DevmemCPUCacheMode: Unknown CPU cache mode 0x%08x", ui32CPUCacheMode));
+ PVR_ASSERT(0);
+ /*
+ We should never get here, but if we do then setting the mode
+ to uncached is the safest thing to do.
+ */
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+ break;
+ }
+
+ return ui32Ret;
+}
+
+static INLINE IMG_UINT32 DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+ IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags);
+ IMG_UINT32 ui32Ret;
+
+ PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags));
+
+ switch (ui32DeviceCacheMode)
+ {
+ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT:
+
+ /*
+ * If system has no coherency but coherency has been requested for CPU
+ * and GPU we currently have to fall back to uncached.
+ *
+ * Usually the first case here should return an error but as long as a lot
+ * of services allocations using both CPU/GPU coherency flags and rely on
+ * the UNCACHED fallback we have to leave it here.
+ */
+ if ( (PVRSRV_CPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) &&
+ !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) )
+ {
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+ }
+ else
+ {
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+ }
+
+ break;
+
+ default:
+ PVR_LOG(("DevmemDeviceCacheMode: Unknown device cache mode 0x%08x", ui32DeviceCacheMode));
+ PVR_ASSERT(0);
+ /*
+ We should never get here, but if we do then setting the mode
+ to uncached is the safest thing to do.
+ */
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+ break;
+ }
+
+ return ui32Ret;
+}
+
+static INLINE IMG_BOOL DevmemCPUCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+ IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags);
+ IMG_BOOL bRet = IMG_FALSE;
+
+ PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags));
+
+ if (ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+ {
+ bRet = PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig);
+ }
+ return bRet;
+}
+
+static INLINE IMG_BOOL DevmemDeviceCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+ IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags);
+ IMG_BOOL bRet = IMG_FALSE;
+
+ PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags));
+
+ if (ui32DeviceCacheMode == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT)
+ {
+ bRet = PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig);
+ }
+ return bRet;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Client side part of device memory management -- this file
+ is forked from new_devmem_allocation.h as this one has to
+ reside in the top level include so that client code is able
+ to make use of the typedefs.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_TYPEDEFS_H
+#define DEVICEMEM_TYPEDEFS_H
+
+#include <powervr/mem_types.h>
+#include "img_types.h"
+#include "pvrsrv_memallocflags.h"
+
+typedef struct _DEVMEM_CONTEXT_ DEVMEM_CONTEXT; /*!< Convenience typedef for struct _DEVMEM_CONTEXT_ */
+typedef struct _DEVMEM_HEAP_ DEVMEM_HEAP; /*!< Convenience typedef for struct _DEVMEM_HEAP_ */
+typedef struct _DEVMEM_MEMDESC_ DEVMEM_MEMDESC; /*!< Convenience typedef for struct _DEVMEM_MEMDESC_ */
+typedef struct _DEVMEM_PAGELIST_ DEVMEM_PAGELIST; /*!< Convenience typedef for struct _DEVMEM_PAGELIST_ */
+typedef PVRSRV_MEMALLOCFLAGS_T DEVMEM_FLAGS_T; /*!< Conveneince typedef for PVRSRV_MEMALLOCFLAGS_T */
+
+typedef IMG_HANDLE /* FIXME: should be a SID */ DEVMEM_EXPORTHANDLE; /*!< Typedef for DeviceMem Export Handle */
+typedef IMG_UINT64 DEVMEM_EXPORTKEY; /*!< Typedef for DeviceMem Export Key */
+typedef IMG_DEVMEM_SIZE_T DEVMEM_SIZE_T; /*!< Typedef for DeviceMem SIZE_T */
+typedef IMG_DEVMEM_LOG2ALIGN_T DEVMEM_LOG2ALIGN_T; /*!< Typedef for DeviceMem LOG2 Alignment */
+
+typedef struct _DEVMEMX_PHYS_MEMDESC_ DEVMEMX_PHYSDESC; /*!< Convenience typedef for DevmemX physical */
+typedef struct _DEVMEMX_VIRT_MEMDESC_ DEVMEMX_VIRTDESC; /*!< Convenience typedef for DevmemX virtual */
+
+/*! calling code needs all the info in this struct, to be able to pass it around */
+typedef struct
+{
+ /*! A handle to the PMR. Should be a SID. FIXME: decide whether
+ this is right... as the PMR would have to be a cross-process
+ handle */
+ IMG_HANDLE hPMRExportHandle;
+ /*! The "key" to prove we have authorization to use this PMR */
+ IMG_UINT64 uiPMRExportPassword;
+ /*! Size and alignment properties for this PMR. Note, these
+ numbers are not trusted in kernel, but we need to cache them
+ client-side in order to allocate from the VM arena. The kernel
+ will know the actual alignment and size of the PMR and thus
+ would prevent client code from breaching security here. Ditto
+ for physmem granularity (aka page size) if this is different
+ from alignment */
+ IMG_DEVMEM_SIZE_T uiSize;
+ /*! We call this "contiguity guarantee" to be more precise than
+ calling it "alignment" or "page size", terms which may seem
+ similar but have different emphasis. The number reported here
+ is the minimum contiguity guarantee from the creator of the
+ PMR. Now, there is no requirement to allocate that coarsely
+ from the RA. The alignment given to the RA simply needs to be
+ at least as coarse as the device page size for the heap we
+ ultimately intend to map into. What is important is that the
+ device MMU data page size is not greater than the minimum
+ contiguity guarantee from the PMR. This value is reported to
+ the client in order that it can choose to make early checks and
+ perhaps decide which heap (in a variable page size scenario) it
+ would be safe to map this PMR into. For convenience, the
+ client may choose to use this argument as the alignment of the
+ virtual range he chooses to allocate, but this is _not_
+ necessary and in many cases would be able to get away with a
+ finer alignment, should the heap into which this PMR will be
+ mapped support it. */
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+} DEVMEM_EXPORTCOOKIE;
+
+/* Enum that describes the operation associated with changing sparse memory*/
+typedef enum Resize {
+ SPARSE_RESIZE_NONE = 0,
+
+ /* This should be set to indicate the change needs allocation */
+ SPARSE_RESIZE_ALLOC = 1,
+
+ /* This should be set to indicate the change needs free */
+ SPARSE_RESIZE_FREE = 2,
+
+ SPARSE_RESIZE_BOTH = (SPARSE_RESIZE_ALLOC | SPARSE_RESIZE_FREE),
+
+ /* This should be set to silently swap underlying physical memory
+ * without disturbing its device or cpu virtual maps
+ * This flag is not supported in the case of PDUMP and could lead to
+ * PDUMP panic when used */
+ SPARSE_REMAP_MEM = 4,
+
+ /* Should be set to get the sparse changes appear in cpu virtual map */
+ SPARSE_MAP_CPU_ADDR = 8
+}SPARSE_MEM_RESIZE_FLAGS;
+
+/* To use with DevmemSubAllocate() as the default factor if no
+ * over-allocation is desired. */
+#define DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER 1
+
+#endif /* #ifndef DEVICEMEM_TYPEDEFS_H */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management internal utility functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Utility functions used internally by device memory management
+ code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "ra.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+
+/*
+ SVM heap management support functions for CPU (un)mapping
+*/
+#define DEVMEM_MAP_SVM_USER_MANAGED_RETRY 2
+
+static inline PVRSRV_ERROR
+_DevmemCPUMapSVMKernelManaged(DEVMEM_HEAP *psHeap,
+ DEVMEM_IMPORT *psImport,
+ IMG_UINT64 *ui64MapAddress)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT64 ui64SvmMapAddr;
+ IMG_UINT64 ui64SvmMapAddrEnd;
+ IMG_UINT64 ui64SvmHeapAddrEnd;
+
+ /* SVM heap management is always XXX_KERNEL_MANAGED unless we
+ have triggered the fall back code-path in which case we
+ should not be calling into this code-path */
+ PVR_ASSERT(psHeap->eHeapType == DEVMEM_HEAP_TYPE_KERNEL_MANAGED);
+
+ /* By acquiring the CPU virtual address here, it essentially
+ means we lock-down the virtual address for the duration
+ of the life-cycle of the allocation until a de-allocation
+ request comes in. Thus the allocation is guaranteed not to
+ change its virtual address on the CPU during its life-time.
+ NOTE: Import might have already been CPU Mapped before now,
+ normally this is not a problem, see fall back */
+ eError = _DevmemImportStructCPUMap(psImport);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unable to CPU map (lock-down) device memory for SVM use",
+ __func__));
+ eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED;
+ goto failSVM;
+ }
+
+ /* Supplied kernel mmap virtual address is also device virtual address;
+ calculate the heap & kernel supplied mmap virtual address limits */
+ ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr;
+ ui64SvmHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize;
+ ui64SvmMapAddrEnd = ui64SvmMapAddr + psImport->uiSize;
+ PVR_ASSERT(ui64SvmMapAddr != (IMG_UINT64)0);
+
+ /* SVM limit test may fail if processor has more virtual address bits than device */
+ if (ui64SvmMapAddr >= ui64SvmHeapAddrEnd || ui64SvmMapAddrEnd > ui64SvmHeapAddrEnd)
+ {
+ /* Unmap incompatible SVM virtual address, this
+ may not release address if it was elsewhere
+ CPU Mapped before call into this function */
+ _DevmemImportStructCPUUnmap(psImport);
+
+ /* Flag incompatible SVM mapping */
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto failSVM;
+ }
+
+ *ui64MapAddress = ui64SvmMapAddr;
+failSVM:
+ /* either OK, MAP_FAILED or BAD_MAPPING */
+ return eError;
+}
+
+static inline void
+_DevmemCPUUnmapSVMKernelManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+ PVR_UNREFERENCED_PARAMETER(psHeap);
+ _DevmemImportStructCPUUnmap(psImport);
+}
+
+static inline PVRSRV_ERROR
+_DevmemCPUMapSVMUserManaged(DEVMEM_HEAP *psHeap,
+ DEVMEM_IMPORT *psImport,
+ IMG_UINT uiAlign,
+ IMG_UINT64 *ui64MapAddress)
+{
+ RA_LENGTH_T uiAllocatedSize;
+ RA_BASE_T uiAllocatedAddr;
+ IMG_UINT64 ui64SvmMapAddr;
+ IMG_UINT uiRetry = 0;
+ PVRSRV_ERROR eError;
+
+ /* If SVM heap management has transitioned to XXX_USER_MANAGED,
+ this is essentially a fall back approach that ensures we
+ continue to satisfy SVM alloc. This approach is not without
+ hazards in that we may specify a virtual address that is
+ already in use by the user process */
+ PVR_ASSERT(psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED);
+
+ /* Normally, for SVM heap allocations, CPUMap _must_ be done
+ before DevMap; ideally the initial CPUMap should be done by
+ SVM functions though this is not a hard requirement as long
+ as the prior elsewhere obtained CPUMap virtual address meets
+ SVM address requirements. This is a fall-back code-pathway
+ so we have to test that this assumption holds before we
+ progress any further */
+ OSLockAcquire(psImport->sCPUImport.hLock);
+
+ if (psImport->sCPUImport.ui32RefCount)
+ {
+ /* Already CPU Mapped SVM heap allocation, this prior elsewhere
+ obtained virtual address is responsible for the above
+ XXX_KERNEL_MANAGED failure. As we are not responsible for
+ this, we cannot progress any further so need to fail */
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Previously obtained CPU map address not SVM compatible"
+ , __func__));
+
+ /* Revert SVM heap to DEVMEM_HEAP_TYPE_KERNEL_MANAGED */
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_KERNEL_MANAGED;
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Reverting SVM heap back to kernel managed",
+ __func__));
+
+ OSLockRelease(psImport->sCPUImport.hLock);
+
+ /* Do we need a more specific error code here */
+ eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+ goto failSVM;
+ }
+
+ OSLockRelease(psImport->sCPUImport.hLock);
+
+ do
+ {
+ /* Next we proceed to instruct the kernel to use the RA_Alloc supplied
+ virtual address to map-in this SVM import suballocation; there is no
+ guarantee that this RA_Alloc virtual address may not collide with an
+ already in-use VMA range in the process */
+ eError = RA_Alloc(psHeap->psQuantizedVMRA,
+ psImport->uiSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, /* flags: this RA doesn't use flags*/
+ uiAlign,
+ "SVM_Virtual_Alloc",
+ &uiAllocatedAddr,
+ &uiAllocatedSize,
+ NULL /* don't care about per-import priv data */);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot RA allocate SVM compatible address",
+ __func__));
+ goto failSVM;
+ }
+
+ /* No reason for allocated virtual size to be different from
+ the PMR's size */
+ psImport->sCPUImport.pvCPUVAddr = (void*)(uintptr_t)uiAllocatedAddr;
+ PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
+
+ /* Map the import or allocation using the RA_Alloc virtual address;
+ the kernel may fail the request if the supplied virtual address
+ is already in-use in which case we re-try using another virtual
+ address obtained from the RA_Alloc */
+ eError = _DevmemImportStructCPUMap(psImport);
+ if (eError != PVRSRV_OK)
+ {
+ /* For now we simply discard failed RA_Alloc() obtained virtual
+ address (i.e. plenty of virtual space), this prevents us from
+ re-using these and furthermore essentially blacklists these
+ addresses from future SVM consideration; We exit fall-back
+ attempt if retry exceeds the fall-back retry limit */
+ if (uiRetry++ > DEVMEM_MAP_SVM_USER_MANAGED_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot find SVM compatible address, bad mapping",
+ __func__));
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto failSVM;
+ }
+ }
+ else
+ {
+ /* Found compatible SVM virtual address, set as device virtual address */
+ ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr;
+ }
+ } while (eError != PVRSRV_OK);
+
+ *ui64MapAddress = ui64SvmMapAddr;
+failSVM:
+ return eError;
+}
+
+static inline void
+_DevmemCPUUnmapSVMUserManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+ RA_BASE_T uiAllocatedAddr;
+
+ /* We only free SVM compatible addresses, all addresses in
+ the blacklist are essentially excluded from future RA_Alloc */
+ uiAllocatedAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+ RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr);
+
+ _DevmemImportStructCPUUnmap(psImport);
+}
+
+static inline PVRSRV_ERROR
+_DevmemImportStructDevMapSVM(DEVMEM_HEAP *psHeap,
+ DEVMEM_IMPORT *psImport,
+ IMG_UINT uiAlign,
+ IMG_UINT64 *ui64MapAddress)
+{
+ PVRSRV_ERROR eError;
+
+ switch(psHeap->eHeapType)
+ {
+ case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+ eError = _DevmemCPUMapSVMKernelManaged(psHeap,
+ psImport,
+ ui64MapAddress);
+ if (eError == PVRSRV_ERROR_BAD_MAPPING)
+ {
+ /* If the SVM map address is outside of SVM heap limits,
+ change heap type to DEVMEM_HEAP_TYPE_USER_MANAGED */
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_USER_MANAGED;
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Kernel managed SVM heap is now user managed",
+ __func__));
+
+ /* Retry using user managed fall-back approach */
+ eError = _DevmemCPUMapSVMUserManaged(psHeap,
+ psImport,
+ uiAlign,
+ ui64MapAddress);
+ }
+ break;
+
+ case DEVMEM_HEAP_TYPE_USER_MANAGED:
+ eError = _DevmemCPUMapSVMUserManaged(psHeap,
+ psImport,
+ uiAlign,
+ ui64MapAddress);
+ break;
+
+ default:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ break;
+ }
+
+ return eError;
+}
+
+static inline void
+_DevmemImportStructDevUnmapSVM(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+ switch(psHeap->eHeapType)
+ {
+ case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+ _DevmemCPUUnmapSVMKernelManaged(psHeap, psImport);
+ break;
+
+ case DEVMEM_HEAP_TYPE_USER_MANAGED:
+ _DevmemCPUUnmapSVMUserManaged(psHeap, psImport);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ The Devmem import structure is the structure we use
+ to manage memory that is "imported" (which is page
+ granular) from the server into our process, this
+ includes allocations.
+
+ This allows memory to be imported without requiring
+ any CPU or device mapping. Memory can then be mapped
+ into the device or CPU on demand, but neither is
+ required.
+*/
+
+IMG_INTERNAL
+void _DevmemImportStructAcquire(DEVMEM_IMPORT *psImport)
+{
+ IMG_INT iRefCount = OSAtomicIncrement(&psImport->hRefCount);
+ PVR_UNREFERENCED_PARAMETER(iRefCount);
+ PVR_ASSERT(iRefCount != 1);
+
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ iRefCount-1,
+ iRefCount);
+}
+
+IMG_INTERNAL
+void _DevmemImportStructRelease(DEVMEM_IMPORT *psImport)
+{
+ IMG_INT iRefCount = OSAtomicDecrement(&psImport->hRefCount);
+ PVR_ASSERT(iRefCount >= 0);
+
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ iRefCount+1,
+ iRefCount);
+
+ if (iRefCount == 0)
+ {
+ BridgePMRUnrefPMR(psImport->hDevConnection,
+ psImport->hPMR);
+ OSLockDestroy(psImport->sCPUImport.hLock);
+ OSLockDestroy(psImport->sDeviceImport.hLock);
+ OSLockDestroy(psImport->hLock);
+#if defined(PDUMP)
+ OSFreeMem(psImport->pszAnnotation);
+#endif
+ OSFreeMem(psImport);
+ }
+}
+
+IMG_INTERNAL
+void _DevmemImportDiscard(DEVMEM_IMPORT *psImport)
+{
+ PVR_ASSERT(OSAtomicRead(&psImport->hRefCount) == 0);
+ OSLockDestroy(psImport->sCPUImport.hLock);
+ OSLockDestroy(psImport->sDeviceImport.hLock);
+ OSLockDestroy(psImport->hLock);
+ OSFreeMem(psImport);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc)
+{
+ DEVMEM_MEMDESC *psMemDesc;
+ PVRSRV_ERROR eError;
+
+ psMemDesc = OSAllocMem(sizeof(DEVMEM_MEMDESC));
+
+ if (psMemDesc == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto failAlloc;
+ }
+
+ /* Structure must be zero'd incase it needs to be freed before it is initialised! */
+ OSCachedMemSet(psMemDesc, 0, sizeof(DEVMEM_MEMDESC));
+
+ eError = OSLockCreate(&psMemDesc->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMDLock;
+ }
+
+ eError = OSLockCreate(&psMemDesc->sDeviceMemDesc.hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failDMDLock;
+ }
+
+ eError = OSLockCreate(&psMemDesc->sCPUMemDesc.hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failCMDLock;
+ }
+
+ *ppsMemDesc = psMemDesc;
+
+ return PVRSRV_OK;
+
+failCMDLock:
+ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+failDMDLock:
+ OSLockDestroy(psMemDesc->hLock);
+failMDLock:
+ OSFreeMem(psMemDesc);
+failAlloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+/*
+ Init the MemDesc structure
+*/
+IMG_INTERNAL
+void _DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ DEVMEM_IMPORT *psImport,
+ IMG_DEVMEM_SIZE_T uiSize)
+{
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ 0,
+ 1);
+
+ psMemDesc->psImport = psImport;
+ psMemDesc->uiOffset = uiOffset;
+
+ psMemDesc->sDeviceMemDesc.ui32RefCount = 0;
+ psMemDesc->sCPUMemDesc.ui32RefCount = 0;
+ psMemDesc->uiAllocSize = uiSize;
+ psMemDesc->hPrivData = NULL;
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ psMemDesc->sTraceData.ui32AllocationIndex = DEVICEMEM_HISTORY_ALLOC_INDEX_NONE;
+#endif
+
+ OSAtomicWrite(&psMemDesc->hRefCount, 1);
+}
+
+IMG_INTERNAL
+void _DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc)
+{
+ IMG_INT iRefCount = 0;
+
+ iRefCount = OSAtomicIncrement(&psMemDesc->hRefCount);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ iRefCount-1,
+ iRefCount);
+}
+
+IMG_INTERNAL
+void _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc)
+{
+ IMG_INT iRefCount;
+ PVR_ASSERT(psMemDesc != NULL);
+
+ iRefCount = OSAtomicDecrement(&psMemDesc->hRefCount);
+ PVR_ASSERT(iRefCount >= 0);
+
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ iRefCount+1,
+ iRefCount);
+
+ if (iRefCount == 0)
+ {
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SUBALLOCATABLE)
+ {
+ /* As soon as the first sub-allocation on the psImport is freed
+ * we might get dirty memory when reusing it.
+ * We have to delete the ZEROED & CLEAN flag */
+
+ psMemDesc->psImport->uiProperties &= ~DEVMEM_PROPERTIES_IMPORT_IS_ZEROED;
+ psMemDesc->psImport->uiProperties &= ~DEVMEM_PROPERTIES_IMPORT_IS_CLEAN;
+
+ RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA,
+ psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr +
+ psMemDesc->uiOffset);
+ }
+ else
+ {
+ _DevmemImportStructRelease(psMemDesc->psImport);
+ }
+
+ OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+ OSLockDestroy(psMemDesc->hLock);
+ OSFreeMem(psMemDesc);
+ }
+}
+
+IMG_INTERNAL
+void _DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc)
+{
+ PVR_ASSERT(OSAtomicRead(&psMemDesc->hRefCount) == 0);
+
+ OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+ OSLockDestroy(psMemDesc->hLock);
+ OSFreeMem(psMemDesc);
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T *puiFlags)
+{
+ if ((*puiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &&
+ (*puiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.",
+ __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (uiAlign & (uiAlign-1))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: The requested alignment is not a power of two.",
+ __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (uiSize == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Please request a non-zero size value.",
+ __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* If zero flag is set we have to have write access to the page. */
+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(*puiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(*puiFlags))
+ {
+ (*puiFlags) |= PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*
+ Allocate and init an import structure
+*/
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_IMPORT **ppsImport)
+{
+ DEVMEM_IMPORT *psImport;
+ PVRSRV_ERROR eError;
+
+ psImport = OSAllocMem(sizeof *psImport);
+ if (psImport == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+#if defined (PDUMP)
+ /* Make sure this points nowhere as long as we don't need it */
+ psImport->pszAnnotation = NULL;
+#endif
+
+ /* Setup some known bad values for things we don't have yet */
+ psImport->sDeviceImport.hReservation = LACK_OF_RESERVATION_POISON;
+ psImport->sDeviceImport.hMapping = LACK_OF_MAPPING_POISON;
+ psImport->sDeviceImport.psHeap = NULL;
+ psImport->sDeviceImport.bMapped = IMG_FALSE;
+
+ eError = OSLockCreate(&psImport->sDeviceImport.hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failDIOSLockCreate;
+ }
+
+ psImport->sCPUImport.hOSMMapData = NULL;
+ psImport->sCPUImport.pvCPUVAddr = NULL;
+
+ eError = OSLockCreate(&psImport->sCPUImport.hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failCIOSLockCreate;
+ }
+
+ /* Set up common elements */
+ psImport->hDevConnection = hDevConnection;
+
+ /* Setup properties */
+ psImport->uiProperties = 0;
+
+ /* Setup refcounts */
+ psImport->sDeviceImport.ui32RefCount = 0;
+ psImport->sCPUImport.ui32RefCount = 0;
+ OSAtomicWrite(&psImport->hRefCount, 0);
+
+ /* Create the lock */
+ eError = OSLockCreate(&psImport->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failILockAlloc;
+ }
+
+ *ppsImport = psImport;
+
+ return PVRSRV_OK;
+
+failILockAlloc:
+ OSLockDestroy(psImport->sCPUImport.hLock);
+failCIOSLockCreate:
+ OSLockDestroy(psImport->sDeviceImport.hLock);
+failDIOSLockCreate:
+ OSFreeMem(psImport);
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+/*
+ Initialise the import structure
+*/
+IMG_INTERNAL
+void _DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T uiFlags,
+ IMG_HANDLE hPMR,
+ DEVMEM_PROPERTIES_T uiProperties)
+{
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ 0,
+ 1);
+
+ psImport->uiSize = uiSize;
+ psImport->uiAlign = uiAlign;
+ psImport->uiFlags = uiFlags;
+ psImport->hPMR = hPMR;
+ psImport->uiProperties = uiProperties;
+ OSAtomicWrite(&psImport->hRefCount, 1);
+}
+
+/*
+ Map an import to the device
+*/
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+ IMG_BOOL bMap,
+ DEVMEM_IMPORT *psImport,
+ IMG_UINT64 ui64OptionalMapAddress)
+{
+ DEVMEM_DEVICE_IMPORT *psDeviceImport;
+ RA_BASE_T uiAllocatedAddr;
+ RA_LENGTH_T uiAllocatedSize;
+ IMG_DEV_VIRTADDR sBase;
+ IMG_HANDLE hReservation;
+ PVRSRV_ERROR eError;
+ IMG_UINT uiAlign;
+
+ /* Round the provided import alignment to the configured heap alignment */
+ uiAlign = 1ULL << psHeap->uiLog2ImportAlignment;
+ uiAlign = (psImport->uiAlign + uiAlign - 1) & ~(uiAlign-1);
+
+ psDeviceImport = &psImport->sDeviceImport;
+
+ OSLockAcquire(psDeviceImport->hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ psDeviceImport->ui32RefCount,
+ psDeviceImport->ui32RefCount+1);
+
+ if (psDeviceImport->ui32RefCount++ == 0)
+ {
+ _DevmemImportStructAcquire(psImport);
+
+ OSAtomicIncrement(&psHeap->hImportCount);
+
+ if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags))
+ {
+ /* SVM (shared virtual memory) imports or allocations always
+ need to acquire CPU virtual address first as address is
+ used to map the allocation into the device virtual address
+ space; i.e. the virtual address of the allocation for both
+ the CPU/GPU must be identical. */
+ eError = _DevmemImportStructDevMapSVM(psHeap,
+ psImport,
+ uiAlign,
+ &ui64OptionalMapAddress);
+ if (eError != PVRSRV_OK)
+ {
+ goto failVMRAAlloc;
+ }
+ }
+
+ if (ui64OptionalMapAddress == 0)
+ {
+ if (psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ||
+ psHeap->eHeapType == DEVMEM_HEAP_TYPE_KERNEL_MANAGED)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ?
+ "%s: Heap is user managed, please use PVRSRVMapToDeviceAddress().":
+ "%s: Heap is kernel managed, use right allocation flags (e.g. SVM).",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failVMRAAlloc;
+ }
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_RA_MANAGED;
+
+ /* Allocate space in the VM */
+ eError = RA_Alloc(psHeap->psQuantizedVMRA,
+ psImport->uiSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, /* flags: this RA doesn't use flags*/
+ uiAlign,
+ "Virtual_Alloc",
+ &uiAllocatedAddr,
+ &uiAllocatedSize,
+ NULL /* don't care about per-import priv data */
+ );
+ if (PVRSRV_OK != eError)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM;
+ goto failVMRAAlloc;
+ }
+
+ /* No reason for the allocated virtual size to be different from
+ the PMR's size */
+ PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
+
+ sBase.uiAddr = uiAllocatedAddr;
+
+ }
+ else
+ {
+ IMG_UINT64 uiHeapAddrEnd;
+
+ switch (psHeap->eHeapType)
+ {
+ case DEVMEM_HEAP_TYPE_UNKNOWN:
+ /* DEVMEM_HEAP_TYPE_USER_MANAGED can apply to _any_
+ heap and can only be determined here. This heap
+ type transitions from DEVMEM_HEAP_TYPE_UNKNOWN
+ to DEVMEM_HEAP_TYPE_USER_MANAGED on 1st alloc */
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_USER_MANAGED;
+ break;
+
+ case DEVMEM_HEAP_TYPE_USER_MANAGED:
+ case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+ if (! psHeap->uiSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ?
+ "%s: Heap DEVMEM_HEAP_TYPE_USER_MANAGED is disabled.":
+ "%s: Heap DEVMEM_HEAP_TYPE_KERNEL_MANAGED is disabled."
+ , __func__));
+ eError = PVRSRV_ERROR_INVALID_HEAP;
+ goto failVMRAAlloc;
+ }
+ break;
+
+ case DEVMEM_HEAP_TYPE_RA_MANAGED:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: This heap is managed by an RA, please use PVRSRVMapToDevice()"
+ " and don't use allocation flags that assume differently (e.g. SVM)."
+ , __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failVMRAAlloc;
+
+ default:
+ break;
+ }
+
+ /* Ensure supplied ui64OptionalMapAddress is within heap range */
+ uiHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize;
+ if (ui64OptionalMapAddress >= uiHeapAddrEnd ||
+ ui64OptionalMapAddress + psImport->uiSize > uiHeapAddrEnd)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: ui64OptionalMapAddress %p is outside of heap limits <%p:%p>."
+ , __func__
+ , (void*)(uintptr_t)ui64OptionalMapAddress
+ , (void*)(uintptr_t)psHeap->sBaseAddress.uiAddr
+ , (void*)(uintptr_t)uiHeapAddrEnd));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failVMRAAlloc;
+ }
+
+ if (ui64OptionalMapAddress & ((1 << psHeap->uiLog2Quantum) - 1))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid address to map to. Please prove an address aligned to"
+ "a page multiple of the heap."
+ , __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failVMRAAlloc;
+ }
+
+ uiAllocatedAddr = ui64OptionalMapAddress;
+
+ if (psImport->uiSize & ((1 << psHeap->uiLog2Quantum) - 1))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid heap to map to. "
+ "Please choose a heap that can handle smaller page sizes."
+ , __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failVMRAAlloc;
+ }
+ uiAllocatedSize = psImport->uiSize;
+ sBase.uiAddr = uiAllocatedAddr;
+ }
+
+ /* Setup page tables for the allocated VM space */
+ eError = BridgeDevmemIntReserveRange(psHeap->psCtx->hDevConnection,
+ psHeap->hDevMemServerHeap,
+ sBase,
+ uiAllocatedSize,
+ &hReservation);
+ if (eError != PVRSRV_OK)
+ {
+ goto failReserve;
+ }
+
+ if (bMap)
+ {
+ DEVMEM_FLAGS_T uiMapFlags;
+
+ uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK;
+
+ /* Actually map the PMR to allocated VM space */
+ eError = BridgeDevmemIntMapPMR(psHeap->psCtx->hDevConnection,
+ psHeap->hDevMemServerHeap,
+ hReservation,
+ psImport->hPMR,
+ uiMapFlags,
+ &psDeviceImport->hMapping);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+ psDeviceImport->bMapped = IMG_TRUE;
+ }
+
+ /* Setup device mapping specific parts of the mapping info */
+ psDeviceImport->hReservation = hReservation;
+ psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr;
+ psDeviceImport->psHeap = psHeap;
+ }
+ else
+ {
+ /*
+ Check that we've been asked to map it into the
+ same heap 2nd time around
+ */
+ if (psHeap != psDeviceImport->psHeap)
+ {
+ eError = PVRSRV_ERROR_INVALID_HEAP;
+ goto failParams;
+ }
+ }
+ OSLockRelease(psDeviceImport->hLock);
+
+ return PVRSRV_OK;
+
+failMap:
+ BridgeDevmemIntUnreserveRange(psHeap->psCtx->hDevConnection,
+ hReservation);
+failReserve:
+ if (ui64OptionalMapAddress == 0)
+ {
+ RA_Free(psHeap->psQuantizedVMRA,
+ uiAllocatedAddr);
+ }
+failVMRAAlloc:
+ _DevmemImportStructRelease(psImport);
+ OSAtomicDecrement(&psHeap->hImportCount);
+failParams:
+ psDeviceImport->ui32RefCount--;
+ OSLockRelease(psDeviceImport->hLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ Unmap an import from the Device
+*/
+IMG_INTERNAL
+void _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_DEVICE_IMPORT *psDeviceImport;
+
+ psDeviceImport = &psImport->sDeviceImport;
+
+ OSLockAcquire(psDeviceImport->hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ psDeviceImport->ui32RefCount,
+ psDeviceImport->ui32RefCount-1);
+
+ if (--psDeviceImport->ui32RefCount == 0)
+ {
+ DEVMEM_HEAP *psHeap = psDeviceImport->psHeap;
+
+ if (psDeviceImport->bMapped)
+ {
+ eError = BridgeDevmemIntUnmapPMR(psImport->hDevConnection,
+ psDeviceImport->hMapping);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+
+ eError = BridgeDevmemIntUnreserveRange(psImport->hDevConnection,
+ psDeviceImport->hReservation);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ psDeviceImport->bMapped = IMG_FALSE;
+ psDeviceImport->hMapping = LACK_OF_MAPPING_POISON;
+ psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON;
+
+ if (psHeap->eHeapType == DEVMEM_HEAP_TYPE_RA_MANAGED)
+ {
+ RA_Free(psHeap->psQuantizedVMRA,
+ psDeviceImport->sDevVAddr.uiAddr);
+ }
+
+ if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags))
+ {
+ _DevmemImportStructDevUnmapSVM(psHeap, psImport);
+ }
+
+ OSLockRelease(psDeviceImport->hLock);
+
+ _DevmemImportStructRelease(psImport);
+
+ OSAtomicDecrement(&psHeap->hImportCount);
+ }
+ else
+ {
+ OSLockRelease(psDeviceImport->hLock);
+ }
+}
+
+/*
+ Map an import into the CPU
+*/
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_CPU_IMPORT *psCPUImport;
+ size_t uiMappingLength;
+
+ psCPUImport = &psImport->sCPUImport;
+
+ OSLockAcquire(psCPUImport->hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ psCPUImport->ui32RefCount,
+ psCPUImport->ui32RefCount+1);
+
+ if (psCPUImport->ui32RefCount++ == 0)
+ {
+ _DevmemImportStructAcquire(psImport);
+
+ eError = OSMMapPMR(psImport->hDevConnection,
+ psImport->hPMR,
+ psImport->uiSize,
+ psImport->uiFlags,
+ &psCPUImport->hOSMMapData,
+ &psCPUImport->pvCPUVAddr,
+ &uiMappingLength);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+
+ /* There is no reason the mapping length is different to the size */
+ PVR_ASSERT(uiMappingLength == psImport->uiSize);
+ }
+ OSLockRelease(psCPUImport->hLock);
+
+ return PVRSRV_OK;
+
+failMap:
+ psCPUImport->ui32RefCount--;
+ _DevmemImportStructRelease(psImport);
+ OSLockRelease(psCPUImport->hLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ Unmap an import from the CPU
+*/
+IMG_INTERNAL
+void _DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport)
+{
+ DEVMEM_CPU_IMPORT *psCPUImport;
+
+ psCPUImport = &psImport->sCPUImport;
+
+ OSLockAcquire(psCPUImport->hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ psCPUImport->ui32RefCount,
+ psCPUImport->ui32RefCount-1);
+
+ if (--psCPUImport->ui32RefCount == 0)
+ {
+ /* FIXME: psImport->uiSize is a 64-bit quantity where as the 5th
+ * argument to OSUnmapPMR is a 32-bit quantity on 32-bit systems
+ * hence a compiler warning of implicit cast and loss of data.
+ * Added explicit cast and assert to remove warning.
+ */
+#if (defined(_WIN32) && !defined(_WIN64)) || (defined(LINUX) && defined(__i386__))
+ PVR_ASSERT(psImport->uiSize<IMG_UINT32_MAX);
+#endif
+ OSMUnmapPMR(psImport->hDevConnection,
+ psImport->hPMR,
+ psCPUImport->hOSMMapData,
+ psCPUImport->pvCPUVAddr,
+ psImport->uiSize);
+
+ OSLockRelease(psCPUImport->hLock);
+
+ _DevmemImportStructRelease(psImport);
+ }
+ else
+ {
+ OSLockRelease(psCPUImport->hLock);
+ }
+}
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management internal utility functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Utility functions used internally by device memory management
+ code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_UTILS_H_
+#define _DEVICEMEM_UTILS_H_
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "lock.h"
+#include "osmmap.h"
+#include "devicemem_utils.h"
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "mm_common.h"
+#include "devicemem_history_shared.h"
+#endif
+
+#define DEVMEM_HEAPNAME_MAXLENGTH 160
+
+
+#if defined(DEVMEM_DEBUG) && defined(REFCOUNT_DEBUG)
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_ERROR, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+/* If we need a "hMapping" but we don't have a server-side mapping, we
+ poison the entry with this value so that it's easily recognised in
+ the debugger. Note that this is potentially a valid handle, but
+ then so is NULL, which is no better, indeed worse, as it's not
+ obvious in the debugger. The value doesn't matter. We _never_ use
+ it (and because it's valid, we never assert it isn't this) but it's
+ nice to have a value in the source code that we can grep for when
+ things go wrong. */
+#define LACK_OF_MAPPING_POISON ((IMG_HANDLE)0x6116dead)
+#define LACK_OF_RESERVATION_POISON ((IMG_HANDLE)0x7117dead)
+
+struct _DEVMEM_CONTEXT_ {
+
+ SHARED_DEV_CONNECTION hDevConnection;
+
+ /* Number of heaps that have been created in this context
+ (regardless of whether they have allocations) */
+ IMG_UINT32 uiNumHeaps;
+
+ /*
+ Each "DEVMEM_CONTEXT" has a counterpart in the server,
+ which is responsible for handling the mapping into device MMU.
+ We have a handle to that here.
+ */
+ IMG_HANDLE hDevMemServerContext;
+
+ /* Number of automagically created heaps in this context,
+ i.e. those that are born at context creation time from the
+ chosen "heap config" or "blueprint" */
+ IMG_UINT32 uiAutoHeapCount;
+
+ /* pointer to array of such heaps */
+ struct _DEVMEM_HEAP_ **ppsAutoHeapArray;
+
+ /* The cache line size for use when allocating memory, as it is not queryable on the client side */
+ IMG_UINT32 ui32CPUCacheLineSize;
+
+ /* Private data handle for device specific data */
+ IMG_HANDLE hPrivData;
+};
+
+
+typedef enum
+{
+ DEVMEM_HEAP_TYPE_UNKNOWN = 0,
+ DEVMEM_HEAP_TYPE_USER_MANAGED,
+ DEVMEM_HEAP_TYPE_KERNEL_MANAGED,
+ DEVMEM_HEAP_TYPE_RA_MANAGED,
+}DEVMEM_HEAP_TYPE;
+
+struct _DEVMEM_HEAP_ {
+ /* Name of heap - for debug and lookup purposes. */
+ IMG_CHAR *pszName;
+
+ /* Number of live imports in the heap */
+ ATOMIC_T hImportCount;
+
+ /*
+ * Base address and size of heap, required by clients due to some requesters
+ * not being full range
+ */
+ IMG_DEV_VIRTADDR sBaseAddress;
+ DEVMEM_SIZE_T uiSize;
+
+ /* The heap type, describing if the space is managed by the user or an RA*/
+ DEVMEM_HEAP_TYPE eHeapType;
+
+ /* This RA is for managing sub-allocations in virtual space. Two
+ more RA's will be used under the Hood for managing the coarser
+ allocation of virtual space from the heap, and also for
+ managing the physical backing storage. */
+ RA_ARENA *psSubAllocRA;
+ IMG_CHAR *pszSubAllocRAName;
+ /*
+ This RA is for the coarse allocation of virtual space from the heap
+ */
+ RA_ARENA *psQuantizedVMRA;
+ IMG_CHAR *pszQuantizedVMRAName;
+
+ /* We also need to store a copy of the quantum size in order to
+ feed this down to the server */
+ IMG_UINT32 uiLog2Quantum;
+
+ /* Store a copy of the minimum import alignment */
+ IMG_UINT32 uiLog2ImportAlignment;
+
+ /* The relationship between tiled heap alignment and heap byte-stride
+ * (dependent on tiling mode, abstracted here) */
+ IMG_UINT32 uiLog2TilingStrideFactor;
+
+ /* The parent memory context for this heap */
+ struct _DEVMEM_CONTEXT_ *psCtx;
+
+ /* Lock to protect this structure */
+ POS_LOCK hLock;
+
+ /*
+ Each "DEVMEM_HEAP" has a counterpart in the server,
+ which is responsible for handling the mapping into device MMU.
+ We have a handle to that here.
+ */
+ IMG_HANDLE hDevMemServerHeap;
+};
+
+typedef IMG_UINT32 DEVMEM_PROPERTIES_T; /*!< Typedef for Devicemem properties */
+#define DEVMEM_PROPERTIES_EXPORTABLE (1UL<<0) /*!< Is it exportable? */
+#define DEVMEM_PROPERTIES_IMPORTED (1UL<<1) /*!< Is it imported from another process? */
+#define DEVMEM_PROPERTIES_SUBALLOCATABLE (1UL<<2) /*!< Is it suballocatable? */
+#define DEVMEM_PROPERTIES_UNPINNED (1UL<<3) /*!< Is it currently pinned? */
+#define DEVMEM_PROPERTIES_IMPORT_IS_ZEROED (1UL<<4) /*!< Is the memory fully zeroed? */
+#define DEVMEM_PROPERTIES_IMPORT_IS_CLEAN (1UL<<5) /*!< Is the memory clean, i.e. not been used before? */
+#define DEVMEM_PROPERTIES_SECURE (1UL<<6) /*!< Is it a special secure buffer? No CPU maps allowed! */
+
+
+typedef struct _DEVMEM_DEVICE_IMPORT_ {
+ DEVMEM_HEAP *psHeap; /*!< Heap this import is bound to */
+ IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the import */
+ IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */
+ IMG_HANDLE hReservation; /*!< Device memory reservation handle */
+ IMG_HANDLE hMapping; /*!< Device mapping handle */
+ IMG_BOOL bMapped; /*!< This is import mapped? */
+ POS_LOCK hLock; /*!< Lock to protect the device import */
+} DEVMEM_DEVICE_IMPORT;
+
+typedef struct _DEVMEM_CPU_IMPORT_ {
+ void *pvCPUVAddr; /*!< CPU virtual address of the import */
+ IMG_UINT32 ui32RefCount; /*!< Refcount of the CPU virtual address */
+ IMG_HANDLE hOSMMapData; /*!< CPU mapping handle */
+ POS_LOCK hLock; /*!< Lock to protect the CPU import */
+} DEVMEM_CPU_IMPORT;
+
+typedef struct _DEVMEM_IMPORT_ {
+ SHARED_DEV_CONNECTION hDevConnection;
+ IMG_DEVMEM_ALIGN_T uiAlign; /*!< Alignment of the PMR */
+ DEVMEM_SIZE_T uiSize; /*!< Size of import */
+ ATOMIC_T hRefCount; /*!< Refcount for this import */
+ DEVMEM_PROPERTIES_T uiProperties; /*!< Stores properties of an import like if
+ it is exportable, pinned or suballocatable */
+ IMG_HANDLE hPMR; /*!< Handle to the PMR */
+ DEVMEM_FLAGS_T uiFlags; /*!< Flags for this import */
+ POS_LOCK hLock; /*!< Lock to protect the import */
+
+ DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the import */
+ DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the import */
+#if defined(PDUMP)
+ IMG_CHAR *pszAnnotation;
+#endif
+} DEVMEM_IMPORT;
+
+typedef struct _DEVMEM_DEVICE_MEMDESC_ {
+ IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the allocation */
+ IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */
+ POS_LOCK hLock; /*!< Lock to protect device memdesc */
+} DEVMEM_DEVICE_MEMDESC;
+
+typedef struct _DEVMEM_CPU_MEMDESC_ {
+ void *pvCPUVAddr; /*!< CPU virtual address of the import */
+ IMG_UINT32 ui32RefCount; /*!< Refcount of the device CPU address */
+ POS_LOCK hLock; /*!< Lock to protect CPU memdesc */
+} DEVMEM_CPU_MEMDESC;
+
+struct _DEVMEM_MEMDESC_ {
+ DEVMEM_IMPORT *psImport; /*!< Import this memdesc is on */
+ IMG_DEVMEM_OFFSET_T uiOffset; /*!< Offset into import where our allocation starts */
+ IMG_DEVMEM_SIZE_T uiAllocSize; /*!< Size of the allocation */
+ ATOMIC_T hRefCount; /*!< Refcount of the memdesc */
+ POS_LOCK hLock; /*!< Lock to protect memdesc */
+ IMG_HANDLE hPrivData;
+
+ DEVMEM_DEVICE_MEMDESC sDeviceMemDesc; /*!< Device specifics of the memdesc */
+ DEVMEM_CPU_MEMDESC sCPUMemDesc; /*!< CPU specifics of the memdesc */
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ DEVICEMEM_HISTORY_MEMDESC_DATA sTraceData;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ IMG_HANDLE hRIHandle; /*!< Handle to RI information */
+#endif
+};
+
+/* The physical descriptor used to store handles and information of
+ * device physical allocations. */
+struct _DEVMEMX_PHYS_MEMDESC_ {
+ IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/
+ IMG_UINT32 uiLog2PageSize; /*!< Page size */
+ ATOMIC_T hRefCount; /*!< Refcount of the memdesc */
+ DEVMEM_FLAGS_T uiFlags; /*!< Flags for this import */
+ IMG_HANDLE hPMR; /*!< Handle to the PMR */
+ DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the memdesc */
+ DEVMEM_BRIDGE_HANDLE hBridge; /*!< Bridge connection for the server */
+};
+
+/* The virtual descriptor used to store handles and information of a
+ * device virtual range and the mappings to it. */
+struct _DEVMEMX_VIRT_MEMDESC_ {
+ IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/
+ DEVMEM_FLAGS_T uiFlags; /*!< Flags for this import */
+ DEVMEMX_PHYSDESC **apsPhysDescTable; /*!< Table to store links to physical descs */
+ DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the memdesc */
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ DEVICEMEM_HISTORY_MEMDESC_DATA sTraceData; /*!< To track mappings in this range */
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ IMG_HANDLE hRIHandle; /*!< Handle to RI information */
+#endif
+};
+
+#define DEVICEMEM_UTILS_NO_ADDRESS 0
+
+/******************************************************************************
+@Function _DevmemValidateParams
+@Description Check if flags are conflicting and if align is a size multiple.
+
+@Input uiSize Size of the import.
+@Input uiAlign Alignment of the import.
+@Input puiFlags Pointer to the flags for the import.
+@return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T *puiFlags);
+
+/******************************************************************************
+@Function _DevmemImportStructAlloc
+@Description Allocates memory for an import struct. Does not allocate a PMR!
+ Create locks for CPU and Devmem mappings.
+
+@Input hBridge Bridge to use for calls from the import.
+@Input ppsImport The import to allocate.
+@return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_IMPORT **ppsImport);
+
+/******************************************************************************
+@Function _DevmemImportStructInit
+@Description Initialises the import struct with the given parameters.
+ Set it's refcount to 1!
+
+@Input psImport The import to initialise.
+@Input uiSize Size of the import.
+@Input uiAlign Alignment of allocations in the import.
+@Input uiMapFlags
+@Input hPMR Reference to the PMR of this import struct.
+@Input uiProperties Properties of the import. Is it exportable,
+ imported, suballocatable, unpinned?
+******************************************************************************/
+void _DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ IMG_HANDLE hPMR,
+ DEVMEM_PROPERTIES_T uiProperties);
+
+/******************************************************************************
+@Function _DevmemImportStructDevMap
+@Description NEVER call after the last _DevmemMemDescRelease()
+ Maps the PMR referenced by the import struct to the device's
+ virtual address space.
+ Does nothing but increase the cpu mapping refcount if the
+ import struct was already mapped.
+
+@Input psHeap The heap to map to.
+@Input bMap Caller can choose if the import should be really
+ mapped in the page tables or if just a virtual range
+ should be reserved and the refcounts increased.
+@Input psImport The import we want to map.
+@Input uiOptionalMapAddress An optional address to map to.
+ Pass DEVICEMEM_UTILS_NOADDRESS if not used.
+@return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+ IMG_BOOL bMap,
+ DEVMEM_IMPORT *psImport,
+ IMG_UINT64 uiOptionalMapAddress);
+
+/******************************************************************************
+@Function _DevmemImportStructDevUnmap
+@Description Unmaps the PMR referenced by the import struct from the
+ device's virtual address space.
+ If this was not the last remaining CPU mapping on the import
+ struct only the cpu mapping refcount is decreased.
+******************************************************************************/
+void _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function _DevmemImportStructCPUMap
+@Description NEVER call after the last _DevmemMemDescRelease()
+ Maps the PMR referenced by the import struct to the CPU's
+ virtual address space.
+ Does nothing but increase the cpu mapping refcount if the
+ import struct was already mapped.
+@return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function _DevmemImportStructCPUUnmap
+@Description Unmaps the PMR referenced by the import struct from the CPU's
+ virtual address space.
+ If this was not the last remaining CPU mapping on the import
+ struct only the cpu mapping refcount is decreased.
+******************************************************************************/
+void _DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport);
+
+
+/******************************************************************************
+@Function _DevmemImportStructAcquire
+@Description Acquire an import struct by increasing it's refcount.
+******************************************************************************/
+void _DevmemImportStructAcquire(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function _DevmemImportStructRelease
+@Description Reduces the refcount of the import struct.
+ Destroys the import in the case it was the last reference.
+ Destroys underlying PMR if this import was the last reference
+ to it.
+@return A boolean to signal if the import was destroyed. True = yes.
+******************************************************************************/
+void _DevmemImportStructRelease(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function _DevmemImportDiscard
+@Description Discard a created, but unitilised import structure.
+ This must only be called before _DevmemImportStructInit
+ after which _DevmemImportStructRelease must be used to
+ "free" the import structure.
+******************************************************************************/
+void _DevmemImportDiscard(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function _DevmemMemDescAlloc
+@Description Allocates a MemDesc and create it's various locks.
+ Zero the allocated memory.
+@return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc);
+
+/******************************************************************************
+@Function _DevmemMemDescInit
+@Description Sets the given offset and import struct fields in the MemDesc.
+ Initialises refcount to 1 and other values to 0.
+
+@Input psMemDesc MemDesc to initialise.
+@Input uiOffset Offset in the import structure.
+@Input psImport Import the MemDesc is on.
+@Input uiAllocSize Size of the allocation
+******************************************************************************/
+void _DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ DEVMEM_IMPORT *psImport,
+ IMG_DEVMEM_SIZE_T uiAllocSize);
+
+/******************************************************************************
+@Function _DevmemMemDescAcquire
+@Description Acquires the MemDesc by increasing it's refcount.
+******************************************************************************/
+void _DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function _DevmemMemDescRelease
+@Description Releases the MemDesc by reducing it's refcount.
+ Destroy the MemDesc if it's recount is 0.
+ Destroy the import struct the MemDesc is on if that was the
+ last MemDesc on the import, probably following the destruction
+ of the underlying PMR.
+******************************************************************************/
+void _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function _DevmemMemDescDiscard
+@Description Discard a created, but unitilised MemDesc structure.
+ This must only be called before _DevmemMemDescInit
+ after which _DevmemMemDescRelease must be used to
+ "free" the MemDesc structure.
+******************************************************************************/
+void _DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc);
+
+#endif /* _DEVICEMEM_UTILS_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title X Device Memory Management core internal
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services internal interface for extended device memory management.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEMX_H
+#define DEVICEMEMX_H
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "devicemem_utils.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "osfunc.h"
+
+/* DevmemXAllocPhysical()
+ *
+ * Allocate physical device memory and return a physical
+ * descriptor for it.
+ */
+PVRSRV_ERROR
+DevmemXAllocPhysical(DEVMEM_CONTEXT *psCtx,
+ IMG_UINT32 uiNumPages,
+ IMG_UINT32 uiLog2PageSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEMX_PHYSDESC **ppsPhysDesc);
+
+/* DevmemXReleasePhysical()
+ *
+ * Removes a physical device allocation if all references
+ * to it are dropped, otherwise just decreases the refcount.
+ */
+void
+DevmemXReleasePhysical(DEVMEMX_PHYSDESC *psPhysDesc);
+
+/* DevmemAllocVirtual()
+ *
+ * Allocate and reserve a device virtual range and return
+ * a virtual descriptor for it.
+ */
+PVRSRV_ERROR
+DevmemXAllocVirtual(DEVMEM_HEAP* hHeap,
+ IMG_UINT32 uiNumPages,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEMX_VIRTDESC **ppsVirtDesc,
+ IMG_DEV_VIRTADDR *psVirtAddr);
+
+/* DevmemXFreeVirtual()
+ *
+ * Removes a device virtual range if all mappings on it
+ * have been removed.
+ */
+PVRSRV_ERROR
+DevmemXFreeVirtual(DEVMEMX_VIRTDESC *psVirtDesc);
+
+/* DevmemXMapVirtualRange()
+ *
+ * Map memory from a physical descriptor into a
+ * virtual range.
+ */
+PVRSRV_ERROR
+DevmemXMapVirtualRange(IMG_UINT32 ui32PageCount,
+ DEVMEMX_PHYSDESC *psPhysDesc,
+ IMG_UINT32 ui32PhysOffset,
+ DEVMEMX_VIRTDESC *psVirtDesc,
+ IMG_UINT32 ui32VirtOffset);
+
+/* DevmemXUnmapVirtualRange()
+ *
+ * Unmap pages from a device virtual range.
+ */
+PVRSRV_ERROR
+DevmemXUnmapVirtualRange(IMG_UINT32 ui32PageCount,
+ DEVMEMX_VIRTDESC *psVirtDesc,
+ IMG_UINT32 ui32VirtPgOffset);
+
+/* DevmemXMapPhysicalToCPU()
+ *
+ * Map a full physical descriptor to CPU space.
+ */
+PVRSRV_ERROR
+DevmemXMapPhysicalToCPU(DEVMEMX_PHYSDESC *psMemAllocPhys,
+ IMG_CPU_VIRTADDR *psVirtAddr);
+
+/* DevmemXUnmapPhysicalToCPU()
+ *
+ * Remove the CPU mapping from the descriptor.
+ */
+PVRSRV_ERROR
+DevmemXUnmapPhysicalToCPU(DEVMEMX_PHYSDESC *psMemAllocPhys);
+
+/* DevmemXCreateDevmemMemDesc()
+ *
+ * DEPRICATED!
+ * DO NOT USE IN PRODUCTION DRIVER!
+ *
+ * Create a devmem memdesc from a physical and
+ * virtual descriptor.
+ * Always destroy with DevmemFreePhysVirtMemDesc().
+ */
+
+PVRSRV_ERROR
+DevmemXCreateDevmemMemDesc(DEVMEMX_VIRTDESC *psVirtDesc,
+ DEVMEM_MEMDESC **ppsMemDesc);
+
+/* DevmemXFreeDevmemMemDesc()
+ *
+ * DEPRICATED!
+ * DO NOT USE IN PRODUCTION DRIVER!
+ *
+ * Free the memdesc again. Has no impact on the underlying
+ * physical and virtual descriptors.
+ */
+PVRSRV_ERROR
+DevmemXFreeDevmemMemDesc(DEVMEM_MEMDESC *psMemDesc);
+
+PVRSRV_ERROR
+_DevmemXFlagCompatibilityCheck(IMG_UINT32 uiPhysFlags,
+ IMG_UINT32 uiVirtFlags);
+
+PVRSRV_ERROR
+_DevmemXPhysDescAlloc(DEVMEMX_PHYSDESC **ppsPhysDesc);
+
+void
+_DevmemXPhysDescInit(DEVMEMX_PHYSDESC *psPhysDesc,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 uiNumPages,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_HANDLE hBridge);
+
+void
+_DevmemXPhysDescFree(DEVMEMX_PHYSDESC *psPhysDesc);
+
+#endif /* DEVICEMEMX_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Shared X device memory management PDump functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements common (client & server) PDump functions for the
+ memory management code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#if defined(PDUMP)
+
+#include "devicememx_pdump.h"
+#include "pdump.h"
+#include "client_pdumpmm_bridge.h"
+#include "devicemem_utils.h"
+
+IMG_INTERNAL void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(uiSize != 0);
+ PVR_ASSERT(uiOffset + uiSize <= (psMemDescPhys->uiNumPages << psMemDescPhys->uiLog2PageSize));
+
+ eError = BridgePMRPDumpLoadMem(psMemDescPhys->hBridge,
+ psMemDescPhys->hPMR,
+ uiOffset,
+ uiSize,
+ uiPDumpFlags,
+ IMG_FALSE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+}
+
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title X Device Memory Management PDump internal
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services internal interface to PDump device memory management
+ functions that are shared between client and server code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEMX_PDUMP_H_
+#define _DEVICEMEMX_PDUMP_H_
+
+#include "devicememx.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+
+#if defined(PDUMP)
+/*
+ * PVRSRVDevMemXPDumpLoadMem()
+ *
+ * Same as DevmemPDumpLoadMem().
+ */
+extern void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags);
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVDevMemXPDumpLoadMem)
+#endif
+
+static INLINE void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDescPhys);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+#endif /* PDUMP */
+#endif /* _DEVICEMEMX_PDUMP_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Double linked list header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Double linked list interface
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DLLIST_
+#define _DLLIST_
+
+#include "img_types.h"
+
+/*!
+ Pointer to a linked list node
+*/
+typedef struct _DLLIST_NODE_ *PDLLIST_NODE;
+
+
+/*!
+ Node in a linked list
+*/
+/*
+ * Note: the following structure's size is architecture-dependent and
+ * clients may need to create a mirror the structure definition if it needs
+ * to be used in a structure shared between host and device. Consider such
+ * clients if any changes are made to this structure.
+ */
+typedef struct _DLLIST_NODE_
+{
+ struct _DLLIST_NODE_ *psPrevNode;
+ struct _DLLIST_NODE_ *psNextNode;
+} DLLIST_NODE;
+
+
+/*!
+ Static initialiser
+*/
+#define DECLARE_DLLIST(n) \
+DLLIST_NODE n = {&n, &n}
+
+
+/*************************************************************************/ /*!
+@Function dllist_init
+
+@Description Initialize a new double linked list
+
+@Input psListHead List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_init(PDLLIST_NODE psListHead)
+{
+ psListHead->psPrevNode = psListHead;
+ psListHead->psNextNode = psListHead;
+}
+
+/*************************************************************************/ /*!
+@Function dllist_is_empty
+
+@Description Returns whether the list is empty
+
+@Input psListHead List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_BOOL dllist_is_empty(PDLLIST_NODE psListHead)
+{
+ return (IMG_BOOL) ((psListHead->psPrevNode == psListHead)
+ && (psListHead->psNextNode == psListHead));
+}
+
+/*************************************************************************/ /*!
+@Function dllist_add_to_head
+
+@Description Add psNewNode to head of list psListHead
+
+@Input psListHead Head Node
+@Input psNewNode New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_add_to_head(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+ PDLLIST_NODE psTmp;
+
+ psTmp = psListHead->psNextNode;
+
+ psListHead->psNextNode = psNewNode;
+ psNewNode->psNextNode = psTmp;
+
+ psTmp->psPrevNode = psNewNode;
+ psNewNode->psPrevNode = psListHead;
+}
+
+
+/*************************************************************************/ /*!
+@Function dllist_add_to_tail
+
+@Description Add psNewNode to tail of list psListHead
+
+@Input psListHead Head Node
+@Input psNewNode New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+ PDLLIST_NODE psTmp;
+
+ psTmp = psListHead->psPrevNode;
+
+ psListHead->psPrevNode = psNewNode;
+ psNewNode->psPrevNode = psTmp;
+
+ psTmp->psNextNode = psNewNode;
+ psNewNode->psNextNode = psListHead;
+}
+
+/*************************************************************************/ /*!
+@Function dllist_node_is_in_list
+
+@Description Returns IMG_TRUE if psNode is in a list
+
+@Input psNode List node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_BOOL dllist_node_is_in_list(PDLLIST_NODE psNode)
+{
+ return (IMG_BOOL) (psNode->psNextNode != 0);
+}
+
+/*************************************************************************/ /*!
+@Function dllist_get_next_node
+
+@Description Returns the list node after psListHead or NULL psListHead
+ is the only element in the list.
+
+@Input psListHead List node to start the operation
+
+*/
+/*****************************************************************************/
+static INLINE
+PDLLIST_NODE dllist_get_next_node(PDLLIST_NODE psListHead)
+{
+ if (psListHead->psNextNode == psListHead)
+ {
+ return NULL;
+ }
+ else
+ {
+ return psListHead->psNextNode;
+ }
+}
+
+
+/*************************************************************************/ /*!
+@Function dllist_remove_node
+
+@Description Removes psListNode from the list where it currently belongs
+
+@Input psListNode List node to be removed
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_remove_node(PDLLIST_NODE psListNode)
+{
+ psListNode->psNextNode->psPrevNode = psListNode->psPrevNode;
+ psListNode->psPrevNode->psNextNode = psListNode->psNextNode;
+
+ /* Clear the node to show it's not on a list */
+ psListNode->psPrevNode = 0;
+ psListNode->psNextNode = 0;
+}
+
+/*************************************************************************/ /*!
+@Function dllist_replace_head
+
+@Description Moves the list from psOldHead to psNewHead
+
+@Input psOldHead List node to be replaced. Will become a head
+ node of an empty list.
+@Input psNewHead List node to be inserted. Must be an empty list
+ head.
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_replace_head(PDLLIST_NODE psOldHead, PDLLIST_NODE psNewHead)
+{
+ if (dllist_is_empty(psOldHead))
+ {
+ psNewHead->psNextNode = psNewHead;
+ psNewHead->psPrevNode = psNewHead;
+ }
+ else
+ {
+ /* Change the neighbouring nodes */
+ psOldHead->psNextNode->psPrevNode = psNewHead;
+ psOldHead->psPrevNode->psNextNode = psNewHead;
+
+ /* Copy the old data to the new node */
+ psNewHead->psNextNode = psOldHead->psNextNode;
+ psNewHead->psPrevNode = psOldHead->psPrevNode;
+
+ /* Remove links to the previous list */
+ psOldHead->psNextNode = psOldHead;
+ psOldHead->psPrevNode = psOldHead;
+ }
+
+
+}
+
+/*************************************************************************/ /*!
+@Function dllist_foreach_node
+
+@Description Walk through all the nodes on the list
+
+@Input list_head List node to start the operation
+@Input node Current list node
+@Input next Node after the current one
+
+*/
+/*****************************************************************************/
+#define dllist_foreach_node(list_head, node, next) \
+ for (node = (list_head)->psNextNode, next = (node)->psNextNode; \
+ node != (list_head); \
+ node = next, next = (node)->psNextNode)
+
+#endif /* _DLLIST_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server side connection management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Linux specific server side connection management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_ENV_CONNECTION_H_)
+#define _ENV_CONNECTION_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include "handle.h"
+#include "pvr_debug.h"
+#include "device.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_sys.h"
+#include "allocmem.h"
+#endif
+
+typedef struct _ENV_CONNECTION_PRIVATE_DATA_
+{
+ struct file *psFile;
+ PVRSRV_DEVICE_NODE *psDevNode;
+} ENV_CONNECTION_PRIVATE_DATA;
+
+#if defined(SUPPORT_ION)
+#define ION_CLIENT_NAME_SIZE 50
+
+typedef struct _ENV_ION_CONNECTION_DATA_
+{
+ IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE];
+ struct ion_device *psIonDev;
+ struct ion_client *psIonClient;
+ IMG_UINT32 ui32IonClientRefCount;
+} ENV_ION_CONNECTION_DATA;
+#endif
+
+typedef struct _ENV_CONNECTION_DATA_
+{
+ pid_t owner;
+
+ struct file *psFile;
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+#if defined(SUPPORT_ION)
+ ENV_ION_CONNECTION_DATA *psIonData;
+#endif
+#if defined(SUPPORT_DRM_EXT)
+ void *pPriv;
+#endif
+} ENV_CONNECTION_DATA;
+
+#if defined(SUPPORT_ION)
+static inline struct ion_client *EnvDataIonClientAcquire(ENV_CONNECTION_DATA *psEnvData)
+{
+ PVR_ASSERT(psEnvData->psIonData != NULL);
+ PVR_ASSERT(psEnvData->psIonData->psIonClient != NULL);
+ PVR_ASSERT(psEnvData->psIonData->ui32IonClientRefCount > 0);
+ psEnvData->psIonData->ui32IonClientRefCount++;
+ return psEnvData->psIonData->psIonClient;
+}
+
+static inline void EnvDataIonClientRelease(ENV_ION_CONNECTION_DATA *psIonData)
+{
+ PVR_ASSERT(psIonData != NULL);
+ PVR_ASSERT(psIonData->psIonClient != NULL);
+ PVR_ASSERT(psIonData->ui32IonClientRefCount > 0);
+ if (--psIonData->ui32IonClientRefCount == 0)
+ {
+ ion_client_destroy(psIonData->psIonClient);
+ IonDevRelease(psIonData->psIonDev);
+ OSFreeMem(psIonData);
+ psIonData = NULL;
+ }
+}
+#endif /* defined(SUPPORT_ION) */
+
+#endif /* !defined(_ENV_CONNECTION_H_) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Event Object
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <asm/io.h>
+#include <asm/page.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm/hardirq.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <asm/uaccess.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "event.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+
+#include "osfunc.h"
+
+/* Returns pointer to task_struct that belongs to thread which acquired
+ * bridge lock. */
+extern struct task_struct *BridgeLockGetOwner(void);
+extern IMG_BOOL BridgeLockIsLocked(void);
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
+{
+ rwlock_t sLock;
+ struct list_head sList;
+
+} PVRSRV_LINUX_EVENT_OBJECT_LIST;
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
+{
+ atomic_t sTimeStamp;
+ IMG_UINT32 ui32TimeStampPrevious;
+#if defined(DEBUG)
+ IMG_UINT ui32Stats;
+#endif
+ wait_queue_head_t sWait;
+ struct list_head sList;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
+} PVRSRV_LINUX_EVENT_OBJECT;
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectListCreate
+
+ @Description
+
+ Linux wait object list creation
+
+ @Output hOSEventKM : Pointer to the event object list handle
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
+{
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
+
+ psEvenObjectList = OSAllocMem(sizeof(*psEvenObjectList));
+ if (psEvenObjectList == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ INIT_LIST_HEAD(&psEvenObjectList->sList);
+
+ rwlock_init(&psEvenObjectList->sLock);
+
+ *phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectListDestroy
+
+ @Description
+
+ Linux wait object list destruction
+
+ @Input hOSEventKM : Event object list handle
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
+{
+
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ;
+
+ if(psEvenObjectList)
+ {
+ if (!list_empty(&psEvenObjectList->sList))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
+ return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+ }
+ OSFreeMem(psEvenObjectList);
+ /*not nulling pointer, copy on stack*/
+ }
+ return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectDelete
+
+ @Description
+
+ Linux wait object removal
+
+ @Input hOSEventObject : Event object handle
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject)
+{
+ if(hOSEventObject)
+ {
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
+
+ write_lock_bh(&psLinuxEventObjectList->sLock);
+ list_del(&psLinuxEventObject->sList);
+ write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+#if defined(DEBUG)
+// PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDelete: Event object waits: %u", psLinuxEventObject->ui32Stats));
+#endif
+
+ OSFreeMem(psLinuxEventObject);
+ /*not nulling pointer, copy on stack*/
+
+ return PVRSRV_OK;
+ }
+ return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+}
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectAdd
+
+ @Description
+
+ Linux wait object addition
+
+ @Input hOSEventObjectList : Event object list handle
+ @Output phOSEventObject : Pointer to the event object handle
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
+ {
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+
+ /* allocate completion variable */
+ psLinuxEventObject = OSAllocMem(sizeof(*psLinuxEventObject));
+ if (psLinuxEventObject == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ INIT_LIST_HEAD(&psLinuxEventObject->sList);
+
+ atomic_set(&psLinuxEventObject->sTimeStamp, 0);
+ psLinuxEventObject->ui32TimeStampPrevious = 0;
+
+#if defined(DEBUG)
+ psLinuxEventObject->ui32Stats = 0;
+#endif
+ init_waitqueue_head(&psLinuxEventObject->sWait);
+
+ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
+
+ write_lock_bh(&psLinuxEventObjectList->sLock);
+ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
+ write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+ *phOSEventObject = psLinuxEventObject;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectSignal
+
+ @Description
+
+ Linux wait object signaling function
+
+ @Input hOSEventObjectList : Event object list handle
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
+{
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+ struct list_head *psListEntry, *psListEntryTemp, *psList;
+ psList = &psLinuxEventObjectList->sList;
+
+ read_lock_bh(&psLinuxEventObjectList->sLock);
+ list_for_each_safe(psListEntry, psListEntryTemp, psList)
+ {
+
+ psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList);
+
+ atomic_inc(&psLinuxEventObject->sTimeStamp);
+ wake_up_interruptible(&psLinuxEventObject->sWait);
+ }
+ read_unlock_bh(&psLinuxEventObjectList->sLock);
+
+ return PVRSRV_OK;
+
+}
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectWait
+
+ @Description
+
+ Linux wait object routine
+
+ @Input hOSEventObject : Event object handle
+
+ @Input ui64Timeoutus : Time out value in usec
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT64 ui64Timeoutus, IMG_BOOL bHoldBridgeLock)
+{
+ IMG_UINT32 ui32TimeStamp;
+ IMG_BOOL bReleasePVRLock;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ IMG_UINT32 ui32Remainder;
+ long timeOutJiffies;
+ DEFINE_WAIT(sWait);
+
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
+
+ /* Check if the driver is good shape */
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ /* usecs_to_jiffies only takes an uint. So if our timeout is bigger than an
+ * uint use the msec version. With such a long timeout we really don't need
+ * the high resolution of usecs. */
+ if (ui64Timeoutus > 0xffffffffULL)
+ timeOutJiffies = msecs_to_jiffies(OSDivide64(ui64Timeoutus, 1000, &ui32Remainder));
+ else
+ timeOutJiffies = usecs_to_jiffies(ui64Timeoutus);
+
+ do
+ {
+ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
+ ui32TimeStamp = (IMG_UINT32)atomic_read(&psLinuxEventObject->sTimeStamp);
+
+ if(psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp)
+ {
+ break;
+ }
+
+ /* Check thread holds the current PVR/bridge lock before obeying the
+ * 'release before deschedule' behaviour. Some threads choose not to
+ * hold the bridge lock in their implementation.
+ */
+ bReleasePVRLock = (!bHoldBridgeLock && BridgeLockIsLocked() && current == BridgeLockGetOwner());
+ if (bReleasePVRLock == IMG_TRUE)
+ {
+ OSReleaseBridgeLock();
+ }
+
+ timeOutJiffies = schedule_timeout(timeOutJiffies);
+
+ if (bReleasePVRLock == IMG_TRUE)
+ {
+ OSAcquireBridgeLock();
+ }
+
+#if defined(DEBUG)
+ psLinuxEventObject->ui32Stats++;
+#endif
+
+
+ } while (timeOutJiffies);
+
+ finish_wait(&psLinuxEventObject->sWait, &sWait);
+
+ psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp;
+
+ return timeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
+
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Event Object
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject);
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT64 ui64Timeoutus, IMG_BOOL bHoldBridgeLock);
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for breakpoint
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for breakpoint
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_BREAKPOINT_BRIDGE_H
+#define COMMON_BREAKPOINT_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST 0
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXSETBREAKPOINT PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+0
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXCLEARBREAKPOINT PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+1
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXENABLEBREAKPOINT PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+2
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXDISABLEBREAKPOINT PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+3
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXOVERALLOCATEBPREGISTERS PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+4
+#define PVRSRV_BRIDGE_BREAKPOINT_CMD_LAST (PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+4)
+
+
+/*******************************************
+ RGXSetBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG
+{
+ IMG_HANDLE hPrivData;
+ IMG_UINT32 eFWDataMaster;
+ IMG_UINT32 ui32BreakpointAddr;
+ IMG_UINT32 ui32HandlerAddr;
+ IMG_UINT32 ui32DM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT;
+
+/* Bridge out structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT;
+
+
+/*******************************************
+ RGXClearBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT_TAG
+{
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT;
+
+/* Bridge out structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT;
+
+
+/*******************************************
+ RGXEnableBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT_TAG
+{
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT;
+
+/* Bridge out structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT;
+
+
+/*******************************************
+ RGXDisableBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT_TAG
+{
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT;
+
+/* Bridge out structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT;
+
+
+/*******************************************
+ RGXOverallocateBPRegisters
+ *******************************************/
+
+/* Bridge in structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+ IMG_UINT32 ui32TempRegs;
+ IMG_UINT32 ui32SharedRegs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS;
+
+/* Bridge out structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS;
+
+
+#endif /* COMMON_BREAKPOINT_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for breakpoint
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for breakpoint
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxbreakpoint.h"
+
+
+#include "common_breakpoint_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXSetBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *psRGXSetBreakpointIN,
+ PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *psRGXSetBreakpointOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXSetBreakpointIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXSetBreakpointOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXSetBreakpointOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSetBreakpoint_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSetBreakpointOUT->eError =
+ PVRSRVRGXSetBreakpointKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt,
+ psRGXSetBreakpointIN->eFWDataMaster,
+ psRGXSetBreakpointIN->ui32BreakpointAddr,
+ psRGXSetBreakpointIN->ui32HandlerAddr,
+ psRGXSetBreakpointIN->ui32DM);
+
+
+
+
+RGXSetBreakpoint_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXClearBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *psRGXClearBreakpointIN,
+ PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *psRGXClearBreakpointOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXClearBreakpointIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXClearBreakpointOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXClearBreakpointOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXClearBreakpoint_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXClearBreakpointOUT->eError =
+ PVRSRVRGXClearBreakpointKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt);
+
+
+
+
+RGXClearBreakpoint_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXEnableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *psRGXEnableBreakpointIN,
+ PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *psRGXEnableBreakpointOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXEnableBreakpointIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXEnableBreakpointOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXEnableBreakpointOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXEnableBreakpoint_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXEnableBreakpointOUT->eError =
+ PVRSRVRGXEnableBreakpointKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt);
+
+
+
+
+RGXEnableBreakpoint_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDisableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointIN,
+ PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXDisableBreakpointIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXDisableBreakpointOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXDisableBreakpointOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXDisableBreakpoint_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXDisableBreakpointOUT->eError =
+ PVRSRVRGXDisableBreakpointKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt);
+
+
+
+
+RGXDisableBreakpoint_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXOverallocateBPRegisters(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersIN,
+ PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXOverallocateBPRegistersOUT->eError =
+ PVRSRVRGXOverallocateBPRegistersKM(psConnection, OSGetDevData(psConnection),
+ psRGXOverallocateBPRegistersIN->ui32TempRegs,
+ psRGXOverallocateBPRegistersIN->ui32SharedRegs);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_BREAKPOINT_BRIDGE */
+
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+PVRSRV_ERROR InitBREAKPOINTBridge(void);
+PVRSRV_ERROR DeinitBREAKPOINTBridge(void);
+
+/*
+ * Register all BREAKPOINT functions with services
+ */
+PVRSRV_ERROR InitBREAKPOINTBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXSETBREAKPOINT, PVRSRVBridgeRGXSetBreakpoint,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXCLEARBREAKPOINT, PVRSRVBridgeRGXClearBreakpoint,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXENABLEBREAKPOINT, PVRSRVBridgeRGXEnableBreakpoint,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXDISABLEBREAKPOINT, PVRSRVBridgeRGXDisableBreakpoint,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXOVERALLOCATEBPREGISTERS, PVRSRVBridgeRGXOverallocateBPRegisters,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all breakpoint functions with services
+ */
+PVRSRV_ERROR DeinitBREAKPOINTBridge(void)
+{
+ return PVRSRV_OK;
+}
+#else /* EXCLUDE_BREAKPOINT_BRIDGE */
+/* This bridge is conditional on EXCLUDE_BREAKPOINT_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitBREAKPOINTBridge() \
+ PVRSRV_OK
+
+#define DeinitBREAKPOINTBridge() \
+ PVRSRV_OK
+
+#endif /* EXCLUDE_BREAKPOINT_BRIDGE */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for cache
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_CACHE_BRIDGE_H
+#define CLIENT_CACHE_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_cache_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumCacheOps,
+ IMG_HANDLE *phPMR,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *piuCacheOp,
+ IMG_UINT32 *pui32CacheOpSeqNum);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PVRSRV_CACHE_OP iuCacheOp);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpSetTimeline(IMG_HANDLE hBridge,
+ IMG_INT32 i32OpTimeline);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_INT64 i64QueuedTimeUs,
+ IMG_INT64 i64ExecuteTimeUs,
+ PVRSRV_CACHE_OP iuCacheOp);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpGetLineSize(IMG_HANDLE hBridge,
+ IMG_UINT32 *pui32L1DataCacheLineSize);
+
+
+#endif /* CLIENT_CACHE_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_cache_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "cache_ops.h"
+
+#include "cache_km.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumCacheOps,
+ IMG_HANDLE *phPMR,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *piuCacheOp,
+ IMG_UINT32 *pui32CacheOpSeqNum)
+{
+ PVRSRV_ERROR eError;
+ PMR * *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR **) phPMR;
+
+ eError =
+ CacheOpQueue(
+ ui32NumCacheOps,
+ psPMRInt,
+ puiOffset,
+ puiSize,
+ piuCacheOp,
+ pui32CacheOpSeqNum);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PVRSRV_CACHE_OP iuCacheOp)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ CacheOpExec(
+ psPMRInt,
+ uiOffset,
+ uiSize,
+ iuCacheOp);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpSetTimeline(IMG_HANDLE hBridge,
+ IMG_INT32 i32OpTimeline)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ CacheOpSetTimeline(
+ i32OpTimeline);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_INT64 i64QueuedTimeUs,
+ IMG_INT64 i64ExecuteTimeUs,
+ PVRSRV_CACHE_OP iuCacheOp)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ CacheOpLog(
+ psPMRInt,
+ uiOffset,
+ uiSize,
+ i64QueuedTimeUs,
+ i64ExecuteTimeUs,
+ iuCacheOp);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpGetLineSize(IMG_HANDLE hBridge,
+ IMG_UINT32 *pui32L1DataCacheLineSize)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ CacheOpGetLineSize(
+ pui32L1DataCacheLineSize);
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for cache
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_CACHE_BRIDGE_H
+#define COMMON_CACHE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "cache_ops.h"
+
+
+#define PVRSRV_BRIDGE_CACHE_CMD_FIRST 0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE PVRSRV_BRIDGE_CACHE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPEXEC PVRSRV_BRIDGE_CACHE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CACHE_CACHEOPSETTIMELINE PVRSRV_BRIDGE_CACHE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CACHE_CACHEOPLOG PVRSRV_BRIDGE_CACHE_CMD_FIRST+3
+#define PVRSRV_BRIDGE_CACHE_CACHEOPGETLINESIZE PVRSRV_BRIDGE_CACHE_CMD_FIRST+4
+#define PVRSRV_BRIDGE_CACHE_CMD_LAST (PVRSRV_BRIDGE_CACHE_CMD_FIRST+4)
+
+
+/*******************************************
+ CacheOpQueue
+ *******************************************/
+
+/* Bridge in structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG
+{
+ IMG_UINT32 ui32NumCacheOps;
+ IMG_HANDLE * phPMR;
+ IMG_DEVMEM_OFFSET_T * puiOffset;
+ IMG_DEVMEM_SIZE_T * puiSize;
+ PVRSRV_CACHE_OP * piuCacheOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPQUEUE;
+
+/* Bridge out structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG
+{
+ IMG_UINT32 ui32CacheOpSeqNum;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPQUEUE;
+
+
+/*******************************************
+ CacheOpExec
+ *******************************************/
+
+/* Bridge in structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPEXEC_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ PVRSRV_CACHE_OP iuCacheOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPEXEC;
+
+/* Bridge out structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPEXEC_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPEXEC;
+
+
+/*******************************************
+ CacheOpSetTimeline
+ *******************************************/
+
+/* Bridge in structure for CacheOpSetTimeline */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPSETTIMELINE_TAG
+{
+ IMG_INT32 i32OpTimeline;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPSETTIMELINE;
+
+/* Bridge out structure for CacheOpSetTimeline */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPSETTIMELINE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPSETTIMELINE;
+
+
+/*******************************************
+ CacheOpLog
+ *******************************************/
+
+/* Bridge in structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPLOG_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_INT64 i64QueuedTimeUs;
+ IMG_INT64 i64ExecuteTimeUs;
+ PVRSRV_CACHE_OP iuCacheOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPLOG;
+
+/* Bridge out structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPLOG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPLOG;
+
+
+/*******************************************
+ CacheOpGetLineSize
+ *******************************************/
+
+/* Bridge in structure for CacheOpGetLineSize */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPGETLINESIZE_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPGETLINESIZE;
+
+/* Bridge out structure for CacheOpGetLineSize */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPGETLINESIZE_TAG
+{
+ IMG_UINT32 ui32L1DataCacheLineSize;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPGETLINESIZE;
+
+
+#endif /* COMMON_CACHE_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for cache
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "cache_km.h"
+
+
+#include "common_cache_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CACHEOPQUEUE *psCacheOpQueueIN,
+ PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *psCacheOpQueueOUT,
+ CONNECTION_DATA *psConnection)
+{
+ PMR * *psPMRInt = NULL;
+ IMG_HANDLE *hPMRInt2 = NULL;
+ IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL;
+ IMG_DEVMEM_SIZE_T *uiSizeInt = NULL;
+ PVRSRV_CACHE_OP *iuCacheOpInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)) +
+ (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) +
+ (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) +
+ (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) +
+ (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psCacheOpQueueIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto CacheOpQueue_exit;
+ }
+ }
+ }
+
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ psPMRInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *);
+ hPMRInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hPMRInt2, psCacheOpQueueIN->phPMR, psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ uiOffsetInt = (IMG_DEVMEM_OFFSET_T*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiOffsetInt, psCacheOpQueueIN->puiOffset, psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK )
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ uiSizeInt = (IMG_DEVMEM_SIZE_T*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiSizeInt, psCacheOpQueueIN->puiSize, psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK )
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ iuCacheOpInt = (PVRSRV_CACHE_OP*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP) > 0)
+ {
+ if ( OSCopyFromUser(NULL, iuCacheOpInt, psCacheOpQueueIN->piuCacheOp, psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) != PVRSRV_OK )
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psCacheOpQueueIN->ui32NumCacheOps;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psCacheOpQueueOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt[i],
+ hPMRInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psCacheOpQueueOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto CacheOpQueue_exit;
+ }
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psCacheOpQueueOUT->eError =
+ CacheOpQueue(
+ psCacheOpQueueIN->ui32NumCacheOps,
+ psPMRInt,
+ uiOffsetInt,
+ uiSizeInt,
+ iuCacheOpInt,
+ &psCacheOpQueueOUT->ui32CacheOpSeqNum);
+
+
+
+
+CacheOpQueue_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psCacheOpQueueIN->ui32NumCacheOps;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeCacheOpExec(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CACHEOPEXEC *psCacheOpExecIN,
+ PVRSRV_BRIDGE_OUT_CACHEOPEXEC *psCacheOpExecOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psCacheOpExecIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psCacheOpExecOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psCacheOpExecOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto CacheOpExec_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psCacheOpExecOUT->eError =
+ CacheOpExec(
+ psPMRInt,
+ psCacheOpExecIN->uiOffset,
+ psCacheOpExecIN->uiSize,
+ psCacheOpExecIN->iuCacheOp);
+
+
+
+
+CacheOpExec_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeCacheOpSetTimeline(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CACHEOPSETTIMELINE *psCacheOpSetTimelineIN,
+ PVRSRV_BRIDGE_OUT_CACHEOPSETTIMELINE *psCacheOpSetTimelineOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+ psCacheOpSetTimelineOUT->eError =
+ CacheOpSetTimeline(
+ psCacheOpSetTimelineIN->i32OpTimeline);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeCacheOpLog(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CACHEOPLOG *psCacheOpLogIN,
+ PVRSRV_BRIDGE_OUT_CACHEOPLOG *psCacheOpLogOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psCacheOpLogIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psCacheOpLogOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psCacheOpLogOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto CacheOpLog_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psCacheOpLogOUT->eError =
+ CacheOpLog(
+ psPMRInt,
+ psCacheOpLogIN->uiOffset,
+ psCacheOpLogIN->uiSize,
+ psCacheOpLogIN->i64QueuedTimeUs,
+ psCacheOpLogIN->i64ExecuteTimeUs,
+ psCacheOpLogIN->iuCacheOp);
+
+
+
+
+CacheOpLog_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeCacheOpGetLineSize(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CACHEOPGETLINESIZE *psCacheOpGetLineSizeIN,
+ PVRSRV_BRIDGE_OUT_CACHEOPGETLINESIZE *psCacheOpGetLineSizeOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psCacheOpGetLineSizeIN);
+
+
+
+
+
+ psCacheOpGetLineSizeOUT->eError =
+ CacheOpGetLineSize(
+ &psCacheOpGetLineSizeOUT->ui32L1DataCacheLineSize);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitCACHEBridge(void);
+PVRSRV_ERROR DeinitCACHEBridge(void);
+
+/*
+ * Register all CACHE functions with services
+ */
+PVRSRV_ERROR InitCACHEBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE, PVRSRVBridgeCacheOpQueue,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC, PVRSRVBridgeCacheOpExec,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPSETTIMELINE, PVRSRVBridgeCacheOpSetTimeline,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG, PVRSRVBridgeCacheOpLog,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPGETLINESIZE, PVRSRVBridgeCacheOpGetLineSize,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cache functions with services
+ */
+PVRSRV_ERROR DeinitCACHEBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for cmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for cmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_CMM_BRIDGE_H
+#define COMMON_CMM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_CMM_CMD_FIRST 0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX PVRSRV_BRIDGE_CMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CMM_CMD_LAST (PVRSRV_BRIDGE_CMM_CMD_FIRST+2)
+
+
+/*******************************************
+ DevmemIntExportCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX_TAG
+{
+ IMG_HANDLE hContext;
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX;
+
+/* Bridge out structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX_TAG
+{
+ IMG_HANDLE hContextExport;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX;
+
+
+/*******************************************
+ DevmemIntUnexportCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX_TAG
+{
+ IMG_HANDLE hContextExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX;
+
+/* Bridge out structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX;
+
+
+/*******************************************
+ DevmemIntAcquireRemoteCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX;
+
+/* Bridge out structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+ IMG_HANDLE hContext;
+ IMG_HANDLE hPrivData;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX;
+
+
+#endif /* COMMON_CMM_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for cmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for cmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pmr.h"
+#include "devicemem_server.h"
+
+
+#include "common_cmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeDevmemIntExportCtx(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hContext = psDevmemIntExportCtxIN->hContext;
+ DEVMEMINT_CTX * psContextInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntExportCtxIN->hPMR;
+ PMR * psPMRInt = NULL;
+ DEVMEMINT_CTX_EXPORT * psContextExportInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntExportCtxOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psContextInt,
+ hContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntExportCtx_exit;
+ }
+ }
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntExportCtxOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntExportCtx_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntExportCtxOUT->eError =
+ DevmemIntExportCtx(
+ psContextInt,
+ psPMRInt,
+ &psContextExportInt);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntExportCtx_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntExportCtxOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntExportCtxOUT->hContextExport,
+ (void *) psContextExportInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,(PFN_HANDLE_RELEASE)&DevmemIntUnexportCtx);
+ if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntExportCtx_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntExportCtx_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ }
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+ {
+ if (psContextExportInt)
+ {
+ DevmemIntUnexportCtx(psContextExportInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnexportCtx(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntUnexportCtxOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntUnexportCtxIN->hContextExport,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+ if ((psDevmemIntUnexportCtxOUT->eError != PVRSRV_OK) &&
+ (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntUnexportCtx: %s",
+ PVRSRVGetErrorStringKM(psDevmemIntUnexportCtxOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto DevmemIntUnexportCtx_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+DevmemIntUnexportCtx_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevmemIntAcquireRemoteCtxIN->hPMR;
+ PMR * psPMRInt = NULL;
+ DEVMEMINT_CTX * psContextInt = NULL;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+ psDevmemIntAcquireRemoteCtxOUT->hContext = NULL;
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntAcquireRemoteCtxOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntAcquireRemoteCtxOUT->eError =
+ DevmemIntAcquireRemoteCtx(
+ psPMRInt,
+ &psContextInt,
+ &hPrivDataInt);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntAcquireRemoteCtxOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntAcquireRemoteCtxOUT->hContext,
+ (void *) psContextInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,(PFN_HANDLE_RELEASE)&DevmemIntCtxDestroy);
+ if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+
+
+
+
+
+
+ psDevmemIntAcquireRemoteCtxOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntAcquireRemoteCtxOUT->hPrivData,
+ (void *) hPrivDataInt,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,psDevmemIntAcquireRemoteCtxOUT->hContext);
+ if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntAcquireRemoteCtx_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psDevmemIntAcquireRemoteCtxOUT->hContext)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntAcquireRemoteCtxOUT->hContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntAcquireRemoteCtx: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psContextInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psContextInt)
+ {
+ DevmemIntCtxDestroy(psContextInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_CMM_BRIDGE */
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+PVRSRV_ERROR InitCMMBridge(void);
+PVRSRV_ERROR DeinitCMMBridge(void);
+
+/*
+ * Register all CMM functions with services
+ */
+PVRSRV_ERROR InitCMMBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX, PVRSRVBridgeDevmemIntExportCtx,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX, PVRSRVBridgeDevmemIntUnexportCtx,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX, PVRSRVBridgeDevmemIntAcquireRemoteCtx,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cmm functions with services
+ */
+PVRSRV_ERROR DeinitCMMBridge(void)
+{
+ return PVRSRV_OK;
+}
+#else /* EXCLUDE_CMM_BRIDGE */
+/* This bridge is conditional on EXCLUDE_CMM_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitCMMBridge() \
+ PVRSRV_OK
+
+#define DeinitCMMBridge() \
+ PVRSRV_OK
+
+#endif /* EXCLUDE_CMM_BRIDGE */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for debugmisc
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for debugmisc
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_DEBUGMISC_BRIDGE_H
+#define COMMON_DEBUGMISC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "rgx_bridge.h"
+#include "pvrsrv_memallocflags.h"
+
+
+#define PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST 0
+#define PVRSRV_BRIDGE_DEBUGMISC_DEBUGMISCSLCSETBYPASSSTATE PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETFWLOG PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCDUMPFREELISTPAGELIST PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DEBUGMISC_PHYSMEMIMPORTSECBUF PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETHCSDEADLINE PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSIDPRIORITY PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+5
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSNEWONLINESTATE PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+6
+#define PVRSRV_BRIDGE_DEBUGMISC_CMD_LAST (PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+6)
+
+
+/*******************************************
+ DebugMiscSLCSetBypassState
+ *******************************************/
+
+/* Bridge in structure for DebugMiscSLCSetBypassState */
+typedef struct PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE_TAG
+{
+ IMG_UINT32 ui32Flags;
+ IMG_BOOL bIsBypassed;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE;
+
+/* Bridge out structure for DebugMiscSLCSetBypassState */
+typedef struct PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE;
+
+
+/*******************************************
+ RGXDebugMiscSetFWLog
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetFWLog */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG_TAG
+{
+ IMG_UINT32 ui32RGXFWLogType;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG;
+
+/* Bridge out structure for RGXDebugMiscSetFWLog */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG;
+
+
+/*******************************************
+ RGXDebugMiscDumpFreelistPageList
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST;
+
+/* Bridge out structure for RGXDebugMiscDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST;
+
+
+/*******************************************
+ PhysmemImportSecBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportSecBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF_TAG
+{
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32Log2Align;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF;
+
+/* Bridge out structure for PhysmemImportSecBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF_TAG
+{
+ IMG_HANDLE hPMRPtr;
+ IMG_UINT64 ui64SecBufHandle;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF;
+
+
+/*******************************************
+ RGXDebugMiscSetHCSDeadline
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETHCSDEADLINE_TAG
+{
+ IMG_UINT32 ui32RGXHCSDeadline;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETHCSDEADLINE;
+
+/* Bridge out structure for RGXDebugMiscSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETHCSDEADLINE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETHCSDEADLINE;
+
+
+/*******************************************
+ RGXDebugMiscSetOSidPriority
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetOSidPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSIDPRIORITY_TAG
+{
+ IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSIDPRIORITY;
+
+/* Bridge out structure for RGXDebugMiscSetOSidPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSIDPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSIDPRIORITY;
+
+
+/*******************************************
+ RGXDebugMiscSetOSNewOnlineState
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSNEWONLINESTATE_TAG
+{
+ IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32OSNewState;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSNEWONLINESTATE;
+
+/* Bridge out structure for RGXDebugMiscSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSNEWONLINESTATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSNEWONLINESTATE;
+
+
+#endif /* COMMON_DEBUGMISC_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for debugmisc
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for debugmisc
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "debugmisc_server.h"
+#include "pmr.h"
+#include "physmem_tdsecbuf.h"
+
+
+#include "common_debugmisc_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeDebugMiscSLCSetBypassState(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE *psDebugMiscSLCSetBypassStateIN,
+ PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE *psDebugMiscSLCSetBypassStateOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psDebugMiscSLCSetBypassStateOUT->eError =
+ PVRSRVDebugMiscSLCSetBypassStateKM(psConnection, OSGetDevData(psConnection),
+ psDebugMiscSLCSetBypassStateIN->ui32Flags,
+ psDebugMiscSLCSetBypassStateIN->bIsBypassed);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetFWLog(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG *psRGXDebugMiscSetFWLogIN,
+ PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG *psRGXDebugMiscSetFWLogOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXDebugMiscSetFWLogOUT->eError =
+ PVRSRVRGXDebugMiscSetFWLogKM(psConnection, OSGetDevData(psConnection),
+ psRGXDebugMiscSetFWLogIN->ui32RGXFWLogType);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST *psRGXDebugMiscDumpFreelistPageListIN,
+ PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST *psRGXDebugMiscDumpFreelistPageListOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXDebugMiscDumpFreelistPageListIN);
+
+
+
+
+
+ psRGXDebugMiscDumpFreelistPageListOUT->eError =
+ PVRSRVRGXDebugMiscDumpFreelistPageListKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemImportSecBuf(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF *psPhysmemImportSecBufIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF *psPhysmemImportSecBufOUT,
+ CONNECTION_DATA *psConnection)
+{
+ PMR * psPMRPtrInt = NULL;
+
+
+
+
+
+
+
+
+ psPhysmemImportSecBufOUT->eError =
+ PhysmemImportSecBuf(psConnection, OSGetDevData(psConnection),
+ psPhysmemImportSecBufIN->uiSize,
+ psPhysmemImportSecBufIN->ui32Log2Align,
+ psPhysmemImportSecBufIN->uiFlags,
+ &psPMRPtrInt,
+ &psPhysmemImportSecBufOUT->ui64SecBufHandle);
+ /* Exit early if bridged call fails */
+ if(psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+ {
+ goto PhysmemImportSecBuf_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPhysmemImportSecBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPhysmemImportSecBufOUT->hPMRPtr,
+ (void *) psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemImportSecBuf_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PhysmemImportSecBuf_exit:
+
+
+
+ if (psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ PMRUnrefPMR(psPMRPtrInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETHCSDEADLINE *psRGXDebugMiscSetHCSDeadlineIN,
+ PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETHCSDEADLINE *psRGXDebugMiscSetHCSDeadlineOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXDebugMiscSetHCSDeadlineOUT->eError =
+ PVRSRVRGXDebugMiscSetHCSDeadlineKM(psConnection, OSGetDevData(psConnection),
+ psRGXDebugMiscSetHCSDeadlineIN->ui32RGXHCSDeadline);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSIDPRIORITY *psRGXDebugMiscSetOSidPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSIDPRIORITY *psRGXDebugMiscSetOSidPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXDebugMiscSetOSidPriorityOUT->eError =
+ PVRSRVRGXDebugMiscSetOSidPriorityKM(psConnection, OSGetDevData(psConnection),
+ psRGXDebugMiscSetOSidPriorityIN->ui32OSid,
+ psRGXDebugMiscSetOSidPriorityIN->ui32Priority);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSNEWONLINESTATE *psRGXDebugMiscSetOSNewOnlineStateIN,
+ PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSNEWONLINESTATE *psRGXDebugMiscSetOSNewOnlineStateOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXDebugMiscSetOSNewOnlineStateOUT->eError =
+ PVRSRVRGXDebugMiscSetOSNewOnlineStateKM(psConnection, OSGetDevData(psConnection),
+ psRGXDebugMiscSetOSNewOnlineStateIN->ui32OSid,
+ psRGXDebugMiscSetOSNewOnlineStateIN->ui32OSNewState);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitDEBUGMISCBridge(void);
+PVRSRV_ERROR DeinitDEBUGMISCBridge(void);
+
+/*
+ * Register all DEBUGMISC functions with services
+ */
+PVRSRV_ERROR InitDEBUGMISCBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_DEBUGMISCSLCSETBYPASSSTATE, PVRSRVBridgeDebugMiscSLCSetBypassState,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETFWLOG, PVRSRVBridgeRGXDebugMiscSetFWLog,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCDUMPFREELISTPAGELIST, PVRSRVBridgeRGXDebugMiscDumpFreelistPageList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_PHYSMEMIMPORTSECBUF, PVRSRVBridgePhysmemImportSecBuf,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETHCSDEADLINE, PVRSRVBridgeRGXDebugMiscSetHCSDeadline,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSIDPRIORITY, PVRSRVBridgeRGXDebugMiscSetOSidPriority,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSNEWONLINESTATE, PVRSRVBridgeRGXDebugMiscSetOSNewOnlineState,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all debugmisc functions with services
+ */
+PVRSRV_ERROR DeinitDEBUGMISCBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for devicememhistory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_devicememhistory_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMapNew(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmapNew(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pui32FreePageIndices,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+
+#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_devicememhistory_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "img_types.h"
+#include "mm_common.h"
+
+#include "devicemem_history_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ DevicememHistoryMapKM(
+ sDevVAddr,
+ uiSize,
+ puiText);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ DevicememHistoryUnmapKM(
+ sDevVAddr,
+ uiSize,
+ puiText);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMapNew(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevicememHistoryMapNewKM(
+ psPMRInt,
+ uiOffset,
+ sDevVAddr,
+ uiSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocationIndex,
+ pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmapNew(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevicememHistoryUnmapNewKM(
+ psPMRInt,
+ uiOffset,
+ sDevVAddr,
+ uiSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocationIndex,
+ pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ DevicememHistoryMapVRangeKM(
+ sBaseDevVAddr,
+ ui32ui32StartPage,
+ ui32NumPages,
+ uiAllocSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocationIndex,
+ pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ DevicememHistoryUnmapVRangeKM(
+ sBaseDevVAddr,
+ ui32ui32StartPage,
+ ui32NumPages,
+ uiAllocSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocationIndex,
+ pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pui32FreePageIndices,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevicememHistorySparseChangeKM(
+ psPMRInt,
+ uiOffset,
+ sDevVAddr,
+ uiSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocPageCount,
+ pui32AllocPageIndices,
+ ui32FreePageCount,
+ pui32FreePageIndices,
+ ui32AllocationIndex,
+ pui32AllocationIndexOut);
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for devicememhistory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H
+#define COMMON_DEVICEMEMHISTORY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "img_types.h"
+#include "mm_common.h"
+
+
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST 0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPNEW PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPNEW PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+5
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+6
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+6)
+
+
+/*******************************************
+ DevicememHistoryMap
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG
+{
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ const IMG_CHAR * puiText;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP;
+
+/* Bridge out structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP;
+
+
+/*******************************************
+ DevicememHistoryUnmap
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG
+{
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ const IMG_CHAR * puiText;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP;
+
+/* Bridge out structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP;
+
+
+/*******************************************
+ DevicememHistoryMapNew
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMapNew */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPNEW_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ const IMG_CHAR * puiText;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPNEW;
+
+/* Bridge out structure for DevicememHistoryMapNew */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPNEW_TAG
+{
+ IMG_UINT32 ui32AllocationIndexOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPNEW;
+
+
+/*******************************************
+ DevicememHistoryUnmapNew
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmapNew */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPNEW_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ const IMG_CHAR * puiText;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPNEW;
+
+/* Bridge out structure for DevicememHistoryUnmapNew */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPNEW_TAG
+{
+ IMG_UINT32 ui32AllocationIndexOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPNEW;
+
+
+/*******************************************
+ DevicememHistoryMapVRange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+ IMG_DEV_VIRTADDR sBaseDevVAddr;
+ IMG_UINT32 ui32ui32StartPage;
+ IMG_UINT32 ui32NumPages;
+ IMG_DEVMEM_SIZE_T uiAllocSize;
+ const IMG_CHAR * puiText;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+ IMG_UINT32 ui32AllocationIndexOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE;
+
+
+/*******************************************
+ DevicememHistoryUnmapVRange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+ IMG_DEV_VIRTADDR sBaseDevVAddr;
+ IMG_UINT32 ui32ui32StartPage;
+ IMG_UINT32 ui32NumPages;
+ IMG_DEVMEM_SIZE_T uiAllocSize;
+ const IMG_CHAR * puiText;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+ IMG_UINT32 ui32AllocationIndexOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE;
+
+
+/*******************************************
+ DevicememHistorySparseChange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ const IMG_CHAR * puiText;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32AllocPageCount;
+ IMG_UINT32 * pui32AllocPageIndices;
+ IMG_UINT32 ui32FreePageCount;
+ IMG_UINT32 * pui32FreePageIndices;
+ IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE;
+
+/* Bridge out structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+ IMG_UINT32 ui32AllocationIndexOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE;
+
+
+#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for devicememhistory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_history_server.h"
+
+
+#include "common_devicememhistory_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *psDevicememHistoryMapIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *psDevicememHistoryMapOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryMapIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryMap_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryMapIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryMap_exit;
+ }
+ }
+
+
+ psDevicememHistoryMapOUT->eError =
+ DevicememHistoryMapKM(
+ psDevicememHistoryMapIN->sDevVAddr,
+ psDevicememHistoryMapIN->uiSize,
+ uiTextInt);
+
+
+
+
+DevicememHistoryMap_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryUnmapIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryUnmap_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryUnmapIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryUnmap_exit;
+ }
+ }
+
+
+ psDevicememHistoryUnmapOUT->eError =
+ DevicememHistoryUnmapKM(
+ psDevicememHistoryUnmapIN->sDevVAddr,
+ psDevicememHistoryUnmapIN->uiSize,
+ uiTextInt);
+
+
+
+
+DevicememHistoryUnmap_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMapNew(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPNEW *psDevicememHistoryMapNewIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPNEW *psDevicememHistoryMapNewOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevicememHistoryMapNewIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryMapNewIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryMapNewIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistoryMapNewOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryMapNew_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryMapNewIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistoryMapNewOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryMapNew_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevicememHistoryMapNewOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevicememHistoryMapNewOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevicememHistoryMapNew_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevicememHistoryMapNewOUT->eError =
+ DevicememHistoryMapNewKM(
+ psPMRInt,
+ psDevicememHistoryMapNewIN->uiOffset,
+ psDevicememHistoryMapNewIN->sDevVAddr,
+ psDevicememHistoryMapNewIN->uiSize,
+ uiTextInt,
+ psDevicememHistoryMapNewIN->ui32Log2PageSize,
+ psDevicememHistoryMapNewIN->ui32AllocationIndex,
+ &psDevicememHistoryMapNewOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryMapNew_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmapNew(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPNEW *psDevicememHistoryUnmapNewIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPNEW *psDevicememHistoryUnmapNewOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevicememHistoryUnmapNewIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryUnmapNewIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryUnmapNewIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistoryUnmapNewOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryUnmapNew_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryUnmapNewIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistoryUnmapNewOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryUnmapNew_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevicememHistoryUnmapNewOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevicememHistoryUnmapNewOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevicememHistoryUnmapNew_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevicememHistoryUnmapNewOUT->eError =
+ DevicememHistoryUnmapNewKM(
+ psPMRInt,
+ psDevicememHistoryUnmapNewIN->uiOffset,
+ psDevicememHistoryUnmapNewIN->sDevVAddr,
+ psDevicememHistoryUnmapNewIN->uiSize,
+ uiTextInt,
+ psDevicememHistoryUnmapNewIN->ui32Log2PageSize,
+ psDevicememHistoryUnmapNewIN->ui32AllocationIndex,
+ &psDevicememHistoryUnmapNewOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryUnmapNew_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryMapVRangeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryMapVRange_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryMapVRangeIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryMapVRange_exit;
+ }
+ }
+
+
+ psDevicememHistoryMapVRangeOUT->eError =
+ DevicememHistoryMapVRangeKM(
+ psDevicememHistoryMapVRangeIN->sBaseDevVAddr,
+ psDevicememHistoryMapVRangeIN->ui32ui32StartPage,
+ psDevicememHistoryMapVRangeIN->ui32NumPages,
+ psDevicememHistoryMapVRangeIN->uiAllocSize,
+ uiTextInt,
+ psDevicememHistoryMapVRangeIN->ui32Log2PageSize,
+ psDevicememHistoryMapVRangeIN->ui32AllocationIndex,
+ &psDevicememHistoryMapVRangeOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryMapVRange_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryUnmapVRangeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryUnmapVRange_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryUnmapVRangeIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryUnmapVRange_exit;
+ }
+ }
+
+
+ psDevicememHistoryUnmapVRangeOUT->eError =
+ DevicememHistoryUnmapVRangeKM(
+ psDevicememHistoryUnmapVRangeIN->sBaseDevVAddr,
+ psDevicememHistoryUnmapVRangeIN->ui32ui32StartPage,
+ psDevicememHistoryUnmapVRangeIN->ui32NumPages,
+ psDevicememHistoryUnmapVRangeIN->uiAllocSize,
+ uiTextInt,
+ psDevicememHistoryUnmapVRangeIN->ui32Log2PageSize,
+ psDevicememHistoryUnmapVRangeIN->ui32AllocationIndex,
+ &psDevicememHistoryUnmapVRangeOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryUnmapVRange_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevicememHistorySparseChangeIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_CHAR *uiTextInt = NULL;
+ IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+ IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+ (psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) +
+ (psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistorySparseChangeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistorySparseChangeIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+ if (psDevicememHistorySparseChangeIN->ui32AllocPageCount != 0)
+ {
+ ui32AllocPageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32AllocPageIndicesInt, psDevicememHistorySparseChangeIN->pui32AllocPageIndices, psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+ if (psDevicememHistorySparseChangeIN->ui32FreePageCount != 0)
+ {
+ ui32FreePageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FreePageIndicesInt, psDevicememHistorySparseChangeIN->pui32FreePageIndices, psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevicememHistorySparseChangeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevicememHistorySparseChangeOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevicememHistorySparseChangeOUT->eError =
+ DevicememHistorySparseChangeKM(
+ psPMRInt,
+ psDevicememHistorySparseChangeIN->uiOffset,
+ psDevicememHistorySparseChangeIN->sDevVAddr,
+ psDevicememHistorySparseChangeIN->uiSize,
+ uiTextInt,
+ psDevicememHistorySparseChangeIN->ui32Log2PageSize,
+ psDevicememHistorySparseChangeIN->ui32AllocPageCount,
+ ui32AllocPageIndicesInt,
+ psDevicememHistorySparseChangeIN->ui32FreePageCount,
+ ui32FreePageIndicesInt,
+ psDevicememHistorySparseChangeIN->ui32AllocationIndex,
+ &psDevicememHistorySparseChangeOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistorySparseChange_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pDEVICEMEMHISTORYBridgeLock;
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* SUPPORT_DEVICEMEMHISTORY_BRIDGE */
+
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void);
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void);
+
+/*
+ * Register all DEVICEMEMHISTORY functions with services
+ */
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP, PVRSRVBridgeDevicememHistoryMap,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP, PVRSRVBridgeDevicememHistoryUnmap,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPNEW, PVRSRVBridgeDevicememHistoryMapNew,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPNEW, PVRSRVBridgeDevicememHistoryUnmapNew,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE, PVRSRVBridgeDevicememHistoryMapVRange,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE, PVRSRVBridgeDevicememHistoryUnmapVRange,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE, PVRSRVBridgeDevicememHistorySparseChange,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all devicememhistory functions with services
+ */
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockDestroy(pDEVICEMEMHISTORYBridgeLock), "OSLockDestroy");
+ return PVRSRV_OK;
+}
+#else /* SUPPORT_DEVICEMEMHISTORY_BRIDGE */
+/* This bridge is conditional on SUPPORT_DEVICEMEMHISTORY_BRIDGE - when not defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitDEVICEMEMHISTORYBridge() \
+ PVRSRV_OK
+
+#define DeinitDEVICEMEMHISTORYBridge() \
+ PVRSRV_OK
+
+#endif /* SUPPORT_DEVICEMEMHISTORY_BRIDGE */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for dmabuf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for dmabuf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_DMABUF_BRIDGE_H
+#define COMMON_DMABUF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+
+
+#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST 0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2)
+
+
+/*******************************************
+ PhysmemImportDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG
+{
+ IMG_INT ifd;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF;
+
+/* Bridge out structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG
+{
+ IMG_HANDLE hPMRPtr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_ALIGN_T sAlign;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF;
+
+
+/*******************************************
+ PhysmemExportDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF;
+
+/* Bridge out structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG
+{
+ IMG_INT iFd;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF;
+
+
+/*******************************************
+ PhysmemImportSparseDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+ IMG_INT ifd;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+ IMG_DEVMEM_SIZE_T uiChunkSize;
+ IMG_UINT32 ui32NumPhysChunks;
+ IMG_UINT32 ui32NumVirtChunks;
+ IMG_UINT32 * pui32MappingTable;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF;
+
+/* Bridge out structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+ IMG_HANDLE hPMRPtr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_ALIGN_T sAlign;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF;
+
+
+#endif /* COMMON_DMABUF_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for dmabuf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for dmabuf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "physmem_dmabuf.h"
+#include "pmr.h"
+
+
+#include "common_dmabuf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufOUT,
+ CONNECTION_DATA *psConnection)
+{
+ PMR * psPMRPtrInt = NULL;
+
+
+
+
+
+
+
+
+ psPhysmemImportDmaBufOUT->eError =
+ PhysmemImportDmaBuf(psConnection, OSGetDevData(psConnection),
+ psPhysmemImportDmaBufIN->ifd,
+ psPhysmemImportDmaBufIN->uiFlags,
+ &psPMRPtrInt,
+ &psPhysmemImportDmaBufOUT->uiSize,
+ &psPhysmemImportDmaBufOUT->sAlign);
+ /* Exit early if bridged call fails */
+ if(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+ {
+ goto PhysmemImportDmaBuf_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPhysmemImportDmaBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPhysmemImportDmaBufOUT->hPMRPtr,
+ (void *) psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemImportDmaBuf_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PhysmemImportDmaBuf_exit:
+
+
+
+ if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ PMRUnrefPMR(psPMRPtrInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPhysmemExportDmaBufIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPhysmemExportDmaBufOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPhysmemExportDmaBufOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemExportDmaBuf_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPhysmemExportDmaBufOUT->eError =
+ PhysmemExportDmaBuf(psConnection, OSGetDevData(psConnection),
+ psPMRInt,
+ &psPhysmemExportDmaBufOUT->iFd);
+
+
+
+
+PhysmemExportDmaBuf_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32MappingTableInt = NULL;
+ PMR * psPMRPtrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPhysmemImportSparseDmaBufIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+ }
+ }
+
+ if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks != 0)
+ {
+ ui32MappingTableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32MappingTableInt, psPhysmemImportSparseDmaBufIN->pui32MappingTable, psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+ }
+
+
+ psPhysmemImportSparseDmaBufOUT->eError =
+ PhysmemImportSparseDmaBuf(psConnection, OSGetDevData(psConnection),
+ psPhysmemImportSparseDmaBufIN->ifd,
+ psPhysmemImportSparseDmaBufIN->uiFlags,
+ psPhysmemImportSparseDmaBufIN->uiChunkSize,
+ psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks,
+ psPhysmemImportSparseDmaBufIN->ui32NumVirtChunks,
+ ui32MappingTableInt,
+ &psPMRPtrInt,
+ &psPhysmemImportSparseDmaBufOUT->uiSize,
+ &psPhysmemImportSparseDmaBufOUT->sAlign);
+ /* Exit early if bridged call fails */
+ if(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+ {
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPhysmemImportSparseDmaBufOUT->hPMRPtr,
+ (void *) psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PhysmemImportSparseDmaBuf_exit:
+
+
+
+ if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ PMRUnrefPMR(psPMRPtrInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitDMABUFBridge(void);
+PVRSRV_ERROR DeinitDMABUFBridge(void);
+
+/*
+ * Register all DMABUF functions with services
+ */
+PVRSRV_ERROR InitDMABUFBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF, PVRSRVBridgePhysmemImportDmaBuf,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF, PVRSRVBridgePhysmemExportDmaBuf,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF, PVRSRVBridgePhysmemImportSparseDmaBuf,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all dmabuf functions with services
+ */
+PVRSRV_ERROR DeinitDMABUFBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for htbuffer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_HTBUFFER_BRIDGE_H
+#define CLIENT_HTBUFFER_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_htbuffer_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBConfigure(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR *puiName,
+ IMG_UINT32 ui32BufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumGroups,
+ IMG_UINT32 *pui32GroupEnable,
+ IMG_UINT32 ui32LogLevel,
+ IMG_UINT32 ui32EnablePID,
+ IMG_UINT32 ui32LogMode,
+ IMG_UINT32 ui32OpMode);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PID,
+ IMG_UINT32 ui32TimeStamp,
+ IMG_UINT32 ui32SF,
+ IMG_UINT32 ui32NumArgs,
+ IMG_UINT32 *pui32Args);
+
+
+#endif /* CLIENT_HTBUFFER_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_htbuffer_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+
+#include "htbserver.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBConfigure(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR *puiName,
+ IMG_UINT32 ui32BufferSize)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ HTBConfigureKM(
+ ui32NameSize,
+ puiName,
+ ui32BufferSize);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumGroups,
+ IMG_UINT32 *pui32GroupEnable,
+ IMG_UINT32 ui32LogLevel,
+ IMG_UINT32 ui32EnablePID,
+ IMG_UINT32 ui32LogMode,
+ IMG_UINT32 ui32OpMode)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ HTBControlKM(
+ ui32NumGroups,
+ pui32GroupEnable,
+ ui32LogLevel,
+ ui32EnablePID,
+ ui32LogMode,
+ ui32OpMode);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PID,
+ IMG_UINT32 ui32TimeStamp,
+ IMG_UINT32 ui32SF,
+ IMG_UINT32 ui32NumArgs,
+ IMG_UINT32 *pui32Args)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ HTBLogKM(
+ ui32PID,
+ ui32TimeStamp,
+ ui32SF,
+ ui32NumArgs,
+ pui32Args);
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for htbuffer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_HTBUFFER_BRIDGE_H
+#define COMMON_HTBUFFER_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST 0
+#define PVRSRV_BRIDGE_HTBUFFER_HTBCONFIGURE PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0
+#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1
+#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+2
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+2)
+
+
+/*******************************************
+ HTBConfigure
+ *******************************************/
+
+/* Bridge in structure for HTBConfigure */
+typedef struct PVRSRV_BRIDGE_IN_HTBCONFIGURE_TAG
+{
+ IMG_UINT32 ui32NameSize;
+ const IMG_CHAR * puiName;
+ IMG_UINT32 ui32BufferSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HTBCONFIGURE;
+
+/* Bridge out structure for HTBConfigure */
+typedef struct PVRSRV_BRIDGE_OUT_HTBCONFIGURE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HTBCONFIGURE;
+
+
+/*******************************************
+ HTBControl
+ *******************************************/
+
+/* Bridge in structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_IN_HTBCONTROL_TAG
+{
+ IMG_UINT32 ui32NumGroups;
+ IMG_UINT32 * pui32GroupEnable;
+ IMG_UINT32 ui32LogLevel;
+ IMG_UINT32 ui32EnablePID;
+ IMG_UINT32 ui32LogMode;
+ IMG_UINT32 ui32OpMode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HTBCONTROL;
+
+/* Bridge out structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HTBCONTROL;
+
+
+/*******************************************
+ HTBLog
+ *******************************************/
+
+/* Bridge in structure for HTBLog */
+typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG
+{
+ IMG_UINT32 ui32PID;
+ IMG_UINT32 ui32TimeStamp;
+ IMG_UINT32 ui32SF;
+ IMG_UINT32 ui32NumArgs;
+ IMG_UINT32 * pui32Args;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HTBLOG;
+
+/* Bridge out structure for HTBLog */
+typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HTBLOG;
+
+
+#endif /* COMMON_HTBUFFER_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for htbuffer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "htbserver.h"
+
+
+#include "common_htbuffer_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeHTBConfigure(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HTBCONFIGURE *psHTBConfigureIN,
+ PVRSRV_BRIDGE_OUT_HTBCONFIGURE *psHTBConfigureOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHTBConfigureIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHTBConfigureIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psHTBConfigureOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HTBConfigure_exit;
+ }
+ }
+ }
+
+ if (psHTBConfigureIN->ui32NameSize != 0)
+ {
+ uiNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiNameInt, psHTBConfigureIN->puiName, psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psHTBConfigureOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HTBConfigure_exit;
+ }
+ }
+
+
+ psHTBConfigureOUT->eError =
+ HTBConfigureKM(
+ psHTBConfigureIN->ui32NameSize,
+ uiNameInt,
+ psHTBConfigureIN->ui32BufferSize);
+
+
+
+
+HTBConfigure_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HTBCONTROL *psHTBControlIN,
+ PVRSRV_BRIDGE_OUT_HTBCONTROL *psHTBControlOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32GroupEnableInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHTBControlIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psHTBControlOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HTBControl_exit;
+ }
+ }
+ }
+
+ if (psHTBControlIN->ui32NumGroups != 0)
+ {
+ ui32GroupEnableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32GroupEnableInt, psHTBControlIN->pui32GroupEnable, psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psHTBControlOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HTBControl_exit;
+ }
+ }
+
+
+ psHTBControlOUT->eError =
+ HTBControlKM(
+ psHTBControlIN->ui32NumGroups,
+ ui32GroupEnableInt,
+ psHTBControlIN->ui32LogLevel,
+ psHTBControlIN->ui32EnablePID,
+ psHTBControlIN->ui32LogMode,
+ psHTBControlIN->ui32OpMode);
+
+
+
+
+HTBControl_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HTBLOG *psHTBLogIN,
+ PVRSRV_BRIDGE_OUT_HTBLOG *psHTBLogOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32ArgsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHTBLogIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psHTBLogOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HTBLog_exit;
+ }
+ }
+ }
+
+ if (psHTBLogIN->ui32NumArgs != 0)
+ {
+ ui32ArgsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ArgsInt, psHTBLogIN->pui32Args, psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HTBLog_exit;
+ }
+ }
+
+
+ psHTBLogOUT->eError =
+ HTBLogKM(
+ psHTBLogIN->ui32PID,
+ psHTBLogIN->ui32TimeStamp,
+ psHTBLogIN->ui32SF,
+ psHTBLogIN->ui32NumArgs,
+ ui32ArgsInt);
+
+
+
+
+HTBLog_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pHTBUFFERBridgeLock;
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+PVRSRV_ERROR InitHTBUFFERBridge(void);
+PVRSRV_ERROR DeinitHTBUFFERBridge(void);
+
+/*
+ * Register all HTBUFFER functions with services
+ */
+PVRSRV_ERROR InitHTBUFFERBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONFIGURE, PVRSRVBridgeHTBConfigure,
+ pHTBUFFERBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL, PVRSRVBridgeHTBControl,
+ pHTBUFFERBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG, PVRSRVBridgeHTBLog,
+ pHTBUFFERBridgeLock, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all htbuffer functions with services
+ */
+PVRSRV_ERROR DeinitHTBUFFERBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockDestroy(pHTBUFFERBridgeLock), "OSLockDestroy");
+ return PVRSRV_OK;
+}
+#else /* EXCLUDE_HTBUFFER_BRIDGE */
+/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitHTBUFFERBridge() \
+ PVRSRV_OK
+
+#define DeinitHTBUFFERBridge() \
+ PVRSRV_OK
+
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for mm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_MM_BRIDGE_H
+#define CLIENT_MM_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_mm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_HANDLE *phPMRExport,
+ IMG_UINT64 *pui64Size,
+ IMG_UINT32 *pui32Log2Contig,
+ IMG_UINT64 *pui64Password);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRExport);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 *pui64UID);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hBuffer,
+ IMG_HANDLE *phExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRExport,
+ IMG_UINT64 ui64uiPassword,
+ IMG_UINT64 ui64uiSize,
+ IMG_UINT32 ui32uiLog2Contig,
+ IMG_HANDLE *phPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtHandle,
+ IMG_HANDLE *phPMR,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *psAlign);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32Log2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32AnnotationLength,
+ const IMG_CHAR *puiAnnotation,
+ IMG_HANDLE *phPMRPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32Log2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32AnnotationLength,
+ const IMG_CHAR *puiAnnotation,
+ IMG_HANDLE *phPMRPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+ IMG_BOOL bbKernelMemoryCtx,
+ IMG_HANDLE *phDevMemServerContext,
+ IMG_HANDLE *phPrivData,
+ IMG_UINT32 *pui32CPUCacheLineSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR sHeapBaseAddr,
+ IMG_DEVMEM_SIZE_T uiHeapLength,
+ IMG_UINT32 ui32Log2DataPageSize,
+ IMG_HANDLE *phDevmemHeapPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ IMG_HANDLE *phMapping);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_HANDLE *phReservation);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hSrvDevMemHeap,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pui32FreePageIndices,
+ IMG_UINT32 ui32SparseFlags,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT64 ui64CPUVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysicalPgOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR sAddress);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+ IMG_UINT32 *pui32NumHeapConfigs);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 *pui32NumHeaps);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapConfigNameBufSz,
+ IMG_CHAR *puiHeapConfigName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapIndex,
+ IMG_UINT32 ui32HeapNameBufSz,
+ IMG_CHAR *puiHeapNameOut,
+ IMG_DEV_VIRTADDR *psDevVAddrBase,
+ IMG_DEVMEM_SIZE_T *puiHeapLength,
+ IMG_UINT32 *pui32Log2DataPageSizeOut,
+ IMG_UINT32 *pui32Log2ImportAlignmentOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32PID,
+ IMG_BOOL bRegister);
+
+
+#endif /* CLIENT_MM_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_mm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_HANDLE *phPMRExport,
+ IMG_UINT64 *pui64Size,
+ IMG_UINT32 *pui32Log2Contig,
+ IMG_UINT64 *pui64Password)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PMR_EXPORT * psPMRExportInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRExportPMR(
+ psPMRInt,
+ &psPMRExportInt,
+ pui64Size,
+ pui32Log2Contig,
+ pui64Password);
+
+ *phPMRExport = psPMRExportInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRExport)
+{
+ PVRSRV_ERROR eError;
+ PMR_EXPORT * psPMRExportInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+ eError =
+ PMRUnexportPMR(
+ psPMRExportInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 *pui64UID)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRGetUID(
+ psPMRInt,
+ pui64UID);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hBuffer,
+ IMG_HANDLE *phExtMem)
+{
+ PVRSRV_ERROR eError;
+ PMR * psBufferInt;
+ PMR * psExtMemInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psBufferInt = (PMR *) hBuffer;
+
+ eError =
+ PMRMakeLocalImportHandle(
+ psBufferInt,
+ &psExtMemInt);
+
+ *phExtMem = psExtMemInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtMem)
+{
+ PVRSRV_ERROR eError;
+ PMR * psExtMemInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psExtMemInt = (PMR *) hExtMem;
+
+ eError =
+ PMRUnmakeLocalImportHandle(
+ psExtMemInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRExport,
+ IMG_UINT64 ui64uiPassword,
+ IMG_UINT64 ui64uiSize,
+ IMG_UINT32 ui32uiLog2Contig,
+ IMG_HANDLE *phPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR_EXPORT * psPMRExportInt;
+ PMR * psPMRInt;
+
+ psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+ eError =
+ PMRImportPMR(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ psPMRExportInt,
+ ui64uiPassword,
+ ui64uiSize,
+ ui32uiLog2Contig,
+ &psPMRInt);
+
+ *phPMR = psPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtHandle,
+ IMG_HANDLE *phPMR,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *psAlign)
+{
+ PVRSRV_ERROR eError;
+ PMR * psExtHandleInt;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psExtHandleInt = (PMR *) hExtHandle;
+
+ eError =
+ PMRLocalImportPMR(
+ psExtHandleInt,
+ &psPMRInt,
+ puiSize,
+ psAlign);
+
+ *phPMR = psPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRUnrefPMR(
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRUnrefUnlockPMR(
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32Log2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32AnnotationLength,
+ const IMG_CHAR *puiAnnotation,
+ IMG_HANDLE *phPMRPtr)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRPtrInt;
+
+
+ eError =
+ PhysmemNewRamBackedPMR(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ ui32Log2PageSize,
+ uiFlags,
+ ui32AnnotationLength,
+ puiAnnotation,
+ &psPMRPtrInt);
+
+ *phPMRPtr = psPMRPtrInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32Log2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32AnnotationLength,
+ const IMG_CHAR *puiAnnotation,
+ IMG_HANDLE *phPMRPtr)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRPtrInt;
+
+
+ eError =
+ PhysmemNewRamBackedLockedPMR(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ ui32Log2PageSize,
+ uiFlags,
+ ui32AnnotationLength,
+ puiAnnotation,
+ &psPMRPtrInt);
+
+ *phPMRPtr = psPMRPtrInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntPin(
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntUnpin(
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_MAPPING * psMappingInt;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntPinValidate(
+ psMappingInt,
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_MAPPING * psMappingInt;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntUnpinInvalidate(
+ psMappingInt,
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+ IMG_BOOL bbKernelMemoryCtx,
+ IMG_HANDLE *phDevMemServerContext,
+ IMG_HANDLE *phPrivData,
+ IMG_UINT32 *pui32CPUCacheLineSize)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevMemServerContextInt;
+ IMG_HANDLE hPrivDataInt;
+
+
+ eError =
+ DevmemIntCtxCreate(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ bbKernelMemoryCtx,
+ &psDevMemServerContextInt,
+ &hPrivDataInt,
+ pui32CPUCacheLineSize);
+
+ *phDevMemServerContext = psDevMemServerContextInt;
+ *phPrivData = hPrivDataInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemServerContextInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+ eError =
+ DevmemIntCtxDestroy(
+ psDevmemServerContextInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR sHeapBaseAddr,
+ IMG_DEVMEM_SIZE_T uiHeapLength,
+ IMG_UINT32 ui32Log2DataPageSize,
+ IMG_HANDLE *phDevmemHeapPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemCtxInt;
+ DEVMEMINT_HEAP * psDevmemHeapPtrInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntHeapCreate(
+ psDevmemCtxInt,
+ sHeapBaseAddr,
+ uiHeapLength,
+ ui32Log2DataPageSize,
+ &psDevmemHeapPtrInt);
+
+ *phDevmemHeapPtr = psDevmemHeapPtrInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemHeap)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP * psDevmemHeapInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap;
+
+ eError =
+ DevmemIntHeapDestroy(
+ psDevmemHeapInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ IMG_HANDLE *phMapping)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP * psDevmemServerHeapInt;
+ DEVMEMINT_RESERVATION * psReservationInt;
+ PMR * psPMRInt;
+ DEVMEMINT_MAPPING * psMappingInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntMapPMR(
+ psDevmemServerHeapInt,
+ psReservationInt,
+ psPMRInt,
+ uiMapFlags,
+ &psMappingInt);
+
+ *phMapping = psMappingInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_MAPPING * psMappingInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+
+ eError =
+ DevmemIntUnmapPMR(
+ psMappingInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_HANDLE *phReservation)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP * psDevmemServerHeapInt;
+ DEVMEMINT_RESERVATION * psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+
+ eError =
+ DevmemIntReserveRange(
+ psDevmemServerHeapInt,
+ sAddress,
+ uiLength,
+ &psReservationInt);
+
+ *phReservation = psReservationInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_RESERVATION * psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+ eError =
+ DevmemIntUnreserveRange(
+ psReservationInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hSrvDevMemHeap,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pui32FreePageIndices,
+ IMG_UINT32 ui32SparseFlags,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT64 ui64CPUVAddr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP * psSrvDevMemHeapInt;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntChangeSparse(
+ psSrvDevMemHeapInt,
+ psPMRInt,
+ ui32AllocPageCount,
+ pui32AllocPageIndices,
+ ui32FreePageCount,
+ pui32FreePageIndices,
+ ui32SparseFlags,
+ uiFlags,
+ sDevVAddr,
+ ui64CPUVAddr);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysicalPgOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_RESERVATION * psReservationInt;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntMapPages(
+ psReservationInt,
+ psPMRInt,
+ ui32PageCount,
+ ui32PhysicalPgOffset,
+ uiFlags,
+ sDevVAddr);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_RESERVATION * psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+ eError =
+ DevmemIntUnmapPages(
+ psReservationInt,
+ sDevVAddr,
+ ui32PageCount);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR sAddress)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemCtxInt;
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntIsVDevAddrValid(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ psDevmemCtxInt,
+ sAddress);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+ IMG_UINT32 *pui32NumHeapConfigs)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ HeapCfgHeapConfigCount(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ pui32NumHeapConfigs);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 *pui32NumHeaps)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ HeapCfgHeapCount(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32HeapConfigIndex,
+ pui32NumHeaps);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapConfigNameBufSz,
+ IMG_CHAR *puiHeapConfigName)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ HeapCfgHeapConfigName(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32HeapConfigIndex,
+ ui32HeapConfigNameBufSz,
+ puiHeapConfigName);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapIndex,
+ IMG_UINT32 ui32HeapNameBufSz,
+ IMG_CHAR *puiHeapNameOut,
+ IMG_DEV_VIRTADDR *psDevVAddrBase,
+ IMG_DEVMEM_SIZE_T *puiHeapLength,
+ IMG_UINT32 *pui32Log2DataPageSizeOut,
+ IMG_UINT32 *pui32Log2ImportAlignmentOut)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ HeapCfgHeapDetails(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32HeapConfigIndex,
+ ui32HeapIndex,
+ ui32HeapNameBufSz,
+ puiHeapNameOut,
+ psDevVAddrBase,
+ puiHeapLength,
+ pui32Log2DataPageSizeOut,
+ pui32Log2ImportAlignmentOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32PID,
+ IMG_BOOL bRegister)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemCtxInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntRegisterPFNotifyKM(
+ psDevmemCtxInt,
+ ui32PID,
+ bRegister);
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for mm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_MM_BRIDGE_H
+#define COMMON_MM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_MM_CMD_FIRST 0
+#define PVRSRV_BRIDGE_MM_PMREXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_MM_PMRGETUID PVRSRV_BRIDGE_MM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_MM_PMRUNREFPMR PVRSRV_BRIDGE_MM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+9
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+10
+#define PVRSRV_BRIDGE_MM_DEVMEMINTPIN PVRSRV_BRIDGE_MM_CMD_FIRST+11
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN PVRSRV_BRIDGE_MM_CMD_FIRST+12
+#define PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+13
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+14
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+15
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+16
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+17
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+18
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+19
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+20
+#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+21
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+22
+#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+23
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+24
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+25
+#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+26
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+27
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+28
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+29
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+30
+#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+31
+#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+31)
+
+
+/*******************************************
+ PMRExportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMREXPORTPMR;
+
+/* Bridge out structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG
+{
+ IMG_HANDLE hPMRExport;
+ IMG_UINT64 ui64Size;
+ IMG_UINT32 ui32Log2Contig;
+ IMG_UINT64 ui64Password;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMREXPORTPMR;
+
+
+/*******************************************
+ PMRUnexportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG
+{
+ IMG_HANDLE hPMRExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR;
+
+/* Bridge out structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR;
+
+
+/*******************************************
+ PMRGetUID
+ *******************************************/
+
+/* Bridge in structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRGETUID;
+
+/* Bridge out structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG
+{
+ IMG_UINT64 ui64UID;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRGETUID;
+
+
+/*******************************************
+ PMRMakeLocalImportHandle
+ *******************************************/
+
+/* Bridge in structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+ IMG_HANDLE hBuffer;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+ IMG_HANDLE hExtMem;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE;
+
+
+/*******************************************
+ PMRUnmakeLocalImportHandle
+ *******************************************/
+
+/* Bridge in structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+ IMG_HANDLE hExtMem;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE;
+
+
+/*******************************************
+ PMRImportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG
+{
+ IMG_HANDLE hPMRExport;
+ IMG_UINT64 ui64uiPassword;
+ IMG_UINT64 ui64uiSize;
+ IMG_UINT32 ui32uiLog2Contig;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRIMPORTPMR;
+
+/* Bridge out structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG
+{
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRIMPORTPMR;
+
+
+/*******************************************
+ PMRLocalImportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG
+{
+ IMG_HANDLE hExtHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR;
+
+/* Bridge out structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_ALIGN_T sAlign;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR;
+
+
+/*******************************************
+ PMRUnrefPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNREFPMR;
+
+/* Bridge out structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFPMR;
+
+
+/*******************************************
+ PMRUnrefUnlockPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR;
+
+/* Bridge out structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR;
+
+
+/*******************************************
+ PhysmemNewRamBackedPMR
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_SIZE_T uiChunkSize;
+ IMG_UINT32 ui32NumPhysChunks;
+ IMG_UINT32 ui32NumVirtChunks;
+ IMG_UINT32 * pui32MappingTable;
+ IMG_UINT32 ui32Log2PageSize;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+ IMG_UINT32 ui32AnnotationLength;
+ const IMG_CHAR * puiAnnotation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR;
+
+/* Bridge out structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+ IMG_HANDLE hPMRPtr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR;
+
+
+/*******************************************
+ PhysmemNewRamBackedLockedPMR
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedLockedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
+{
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_SIZE_T uiChunkSize;
+ IMG_UINT32 ui32NumPhysChunks;
+ IMG_UINT32 ui32NumVirtChunks;
+ IMG_UINT32 * pui32MappingTable;
+ IMG_UINT32 ui32Log2PageSize;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+ IMG_UINT32 ui32AnnotationLength;
+ const IMG_CHAR * puiAnnotation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
+
+/* Bridge out structure for PhysmemNewRamBackedLockedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
+{
+ IMG_HANDLE hPMRPtr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
+
+
+/*******************************************
+ DevmemIntPin
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPin */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPIN_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPIN;
+
+/* Bridge out structure for DevmemIntPin */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPIN_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPIN;
+
+
+/*******************************************
+ DevmemIntUnpin
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnpin */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN;
+
+/* Bridge out structure for DevmemIntUnpin */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN;
+
+
+/*******************************************
+ DevmemIntPinValidate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPinValidate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE_TAG
+{
+ IMG_HANDLE hMapping;
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE;
+
+/* Bridge out structure for DevmemIntPinValidate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE;
+
+
+/*******************************************
+ DevmemIntUnpinInvalidate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnpinInvalidate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE_TAG
+{
+ IMG_HANDLE hMapping;
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE;
+
+/* Bridge out structure for DevmemIntUnpinInvalidate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE;
+
+
+/*******************************************
+ DevmemIntCtxCreate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG
+{
+ IMG_BOOL bbKernelMemoryCtx;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE;
+
+/* Bridge out structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG
+{
+ IMG_HANDLE hDevMemServerContext;
+ IMG_HANDLE hPrivData;
+ IMG_UINT32 ui32CPUCacheLineSize;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE;
+
+
+/*******************************************
+ DevmemIntCtxDestroy
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG
+{
+ IMG_HANDLE hDevmemServerContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY;
+
+/* Bridge out structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY;
+
+
+/*******************************************
+ DevmemIntHeapCreate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG
+{
+ IMG_HANDLE hDevmemCtx;
+ IMG_DEV_VIRTADDR sHeapBaseAddr;
+ IMG_DEVMEM_SIZE_T uiHeapLength;
+ IMG_UINT32 ui32Log2DataPageSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE;
+
+/* Bridge out structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG
+{
+ IMG_HANDLE hDevmemHeapPtr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE;
+
+
+/*******************************************
+ DevmemIntHeapDestroy
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG
+{
+ IMG_HANDLE hDevmemHeap;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY;
+
+/* Bridge out structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY;
+
+
+/*******************************************
+ DevmemIntMapPMR
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG
+{
+ IMG_HANDLE hDevmemServerHeap;
+ IMG_HANDLE hReservation;
+ IMG_HANDLE hPMR;
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR;
+
+/* Bridge out structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG
+{
+ IMG_HANDLE hMapping;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR;
+
+
+/*******************************************
+ DevmemIntUnmapPMR
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG
+{
+ IMG_HANDLE hMapping;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR;
+
+/* Bridge out structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR;
+
+
+/*******************************************
+ DevmemIntReserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG
+{
+ IMG_HANDLE hDevmemServerHeap;
+ IMG_DEV_VIRTADDR sAddress;
+ IMG_DEVMEM_SIZE_T uiLength;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE;
+
+/* Bridge out structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG
+{
+ IMG_HANDLE hReservation;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE;
+
+
+/*******************************************
+ DevmemIntUnreserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG
+{
+ IMG_HANDLE hReservation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE;
+
+/* Bridge out structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE;
+
+
+/*******************************************
+ ChangeSparseMem
+ *******************************************/
+
+/* Bridge in structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG
+{
+ IMG_HANDLE hSrvDevMemHeap;
+ IMG_HANDLE hPMR;
+ IMG_UINT32 ui32AllocPageCount;
+ IMG_UINT32 * pui32AllocPageIndices;
+ IMG_UINT32 ui32FreePageCount;
+ IMG_UINT32 * pui32FreePageIndices;
+ IMG_UINT32 ui32SparseFlags;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_UINT64 ui64CPUVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CHANGESPARSEMEM;
+
+/* Bridge out structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM;
+
+
+/*******************************************
+ DevmemIntMapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG
+{
+ IMG_HANDLE hReservation;
+ IMG_HANDLE hPMR;
+ IMG_UINT32 ui32PageCount;
+ IMG_UINT32 ui32PhysicalPgOffset;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+ IMG_DEV_VIRTADDR sDevVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES;
+
+/* Bridge out structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES;
+
+
+/*******************************************
+ DevmemIntUnmapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG
+{
+ IMG_HANDLE hReservation;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_UINT32 ui32PageCount;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES;
+
+/* Bridge out structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES;
+
+
+/*******************************************
+ DevmemIsVDevAddrValid
+ *******************************************/
+
+/* Bridge in structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG
+{
+ IMG_HANDLE hDevmemCtx;
+ IMG_DEV_VIRTADDR sAddress;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID;
+
+/* Bridge out structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID;
+
+
+/*******************************************
+ HeapCfgHeapConfigCount
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT;
+
+/* Bridge out structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+ IMG_UINT32 ui32NumHeapConfigs;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT;
+
+
+/*******************************************
+ HeapCfgHeapCount
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG
+{
+ IMG_UINT32 ui32HeapConfigIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT;
+
+/* Bridge out structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG
+{
+ IMG_UINT32 ui32NumHeaps;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT;
+
+
+/*******************************************
+ HeapCfgHeapConfigName
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG
+{
+ IMG_UINT32 ui32HeapConfigIndex;
+ IMG_UINT32 ui32HeapConfigNameBufSz;
+ /* Output pointer puiHeapConfigName is also an implied input */
+ IMG_CHAR * puiHeapConfigName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME;
+
+/* Bridge out structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG
+{
+ IMG_CHAR * puiHeapConfigName;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME;
+
+
+/*******************************************
+ HeapCfgHeapDetails
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG
+{
+ IMG_UINT32 ui32HeapConfigIndex;
+ IMG_UINT32 ui32HeapIndex;
+ IMG_UINT32 ui32HeapNameBufSz;
+ /* Output pointer puiHeapNameOut is also an implied input */
+ IMG_CHAR * puiHeapNameOut;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS;
+
+/* Bridge out structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG
+{
+ IMG_CHAR * puiHeapNameOut;
+ IMG_DEV_VIRTADDR sDevVAddrBase;
+ IMG_DEVMEM_SIZE_T uiHeapLength;
+ IMG_UINT32 ui32Log2DataPageSizeOut;
+ IMG_UINT32 ui32Log2ImportAlignmentOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS;
+
+
+/*******************************************
+ DevmemIntRegisterPFNotifyKM
+ *******************************************/
+
+/* Bridge in structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+ IMG_HANDLE hDevmemCtx;
+ IMG_UINT32 ui32PID;
+ IMG_BOOL bRegister;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM;
+
+/* Bridge out structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM;
+
+
+#endif /* COMMON_MM_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for mm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+
+
+#include "common_mm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+static PVRSRV_ERROR ReleasePMRExport(void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ return PVRSRV_OK;
+}
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMREXPORTPMR *psPMRExportPMRIN,
+ PVRSRV_BRIDGE_OUT_PMREXPORTPMR *psPMRExportPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRExportPMRIN->hPMR;
+ PMR * psPMRInt = NULL;
+ PMR_EXPORT * psPMRExportInt = NULL;
+ IMG_HANDLE hPMRExportInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRExportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRExportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRExportPMR_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRExportPMROUT->eError =
+ PMRExportPMR(
+ psPMRInt,
+ &psPMRExportInt,
+ &psPMRExportPMROUT->ui64Size,
+ &psPMRExportPMROUT->ui32Log2Contig,
+ &psPMRExportPMROUT->ui64Password);
+ /* Exit early if bridged call fails */
+ if(psPMRExportPMROUT->eError != PVRSRV_OK)
+ {
+ goto PMRExportPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+ /*
+ * For cases where we need a cross process handle we actually allocate two.
+ *
+ * The first one is a connection specific handle and it gets given the real
+ * release function. This handle does *NOT* get returned to the caller. It's
+ * purpose is to release any leaked resources when we either have a bad or
+ * abnormally terminated client. If we didn't do this then the resource
+ * wouldn't be freed until driver unload. If the resource is freed normally,
+ * this handle can be looked up via the cross process handle and then
+ * released accordingly.
+ *
+ * The second one is a cross process handle and it gets given a noop release
+ * function. This handle does get returned to the caller.
+ */
+
+
+
+
+ psPMRExportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+
+ &hPMRExportInt,
+ (void *) psPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnexportPMR);
+ if (psPMRExportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRExportPMR_exit;
+ }
+
+ psPMRExportPMROUT->eError = PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE,
+ &psPMRExportPMROUT->hPMRExport,
+ (void *) psPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE)&ReleasePMRExport);
+ if (psPMRExportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRExportPMR_exit;
+ }
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PMRExportPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psPMRExportPMROUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psPMRExportPMROUT->hPMRExport)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+ (IMG_HANDLE) psPMRExportPMROUT->hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRExportPMR: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ }
+
+ if (hPMRExportInt)
+ {
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRExportPMR: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psPMRExportInt = NULL;
+ }
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psPMRExportInt)
+ {
+ PMRUnexportPMR(psPMRExportInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *psPMRUnexportPMRIN,
+ PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *psPMRUnexportPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ PMR_EXPORT * psPMRExportInt = NULL;
+ IMG_HANDLE hPMRExportInt = NULL;
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+ psPMRUnexportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+ (void **) &psPMRExportInt,
+ (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ IMG_FALSE);
+ if (psPMRUnexportPMROUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnexportPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+ }
+ PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+ /*
+ * Find the connection specific handle that represents the same data
+ * as the cross process handle as releasing it will actually call the
+ * data's real release function (see the function where the cross
+ * process handle is allocated for more details).
+ */
+ psPMRUnexportPMROUT->eError =
+ PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &hPMRExportInt,
+ psPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if (psPMRUnexportPMROUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnexportPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+ }
+ PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+ psPMRUnexportPMROUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if ((psPMRUnexportPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnexportPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+ }
+ PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) ||
+ (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY));
+
+
+
+
+
+ psPMRUnexportPMROUT->eError =
+ PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+ (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if ((psPMRUnexportPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnexportPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto PMRUnexportPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+PMRUnexportPMR_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRGETUID *psPMRGetUIDIN,
+ PVRSRV_BRIDGE_OUT_PMRGETUID *psPMRGetUIDOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRGetUIDIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRGetUIDOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRGetUIDOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRGetUID_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRGetUIDOUT->eError =
+ PMRGetUID(
+ psPMRInt,
+ &psPMRGetUIDOUT->ui64UID);
+
+
+
+
+PMRGetUID_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRMakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleIN,
+ PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hBuffer = psPMRMakeLocalImportHandleIN->hBuffer;
+ PMR * psBufferInt = NULL;
+ PMR * psExtMemInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRMakeLocalImportHandleOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psBufferInt,
+ hBuffer,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ IMG_TRUE);
+ if(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRMakeLocalImportHandle_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRMakeLocalImportHandleOUT->eError =
+ PMRMakeLocalImportHandle(
+ psBufferInt,
+ &psExtMemInt);
+ /* Exit early if bridged call fails */
+ if(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+ {
+ goto PMRMakeLocalImportHandle_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPMRMakeLocalImportHandleOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPMRMakeLocalImportHandleOUT->hExtMem,
+ (void *) psExtMemInt,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnmakeLocalImportHandle);
+ if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRMakeLocalImportHandle_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PMRMakeLocalImportHandle_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psBufferInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hBuffer,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+ {
+ if (psExtMemInt)
+ {
+ PMRUnmakeLocalImportHandle(psExtMemInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleIN,
+ PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psPMRUnmakeLocalImportHandleOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psPMRUnmakeLocalImportHandleIN->hExtMem,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+ if ((psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_OK) &&
+ (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnmakeLocalImportHandle: %s",
+ PVRSRVGetErrorStringKM(psPMRUnmakeLocalImportHandleOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto PMRUnmakeLocalImportHandle_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+PMRUnmakeLocalImportHandle_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRIMPORTPMR *psPMRImportPMRIN,
+ PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *psPMRImportPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMRExport = psPMRImportPMRIN->hPMRExport;
+ PMR_EXPORT * psPMRExportInt = NULL;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRImportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+ (void **) &psPMRExportInt,
+ hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ IMG_TRUE);
+ if(psPMRImportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRImportPMR_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRImportPMROUT->eError =
+ PMRImportPMR(psConnection, OSGetDevData(psConnection),
+ psPMRExportInt,
+ psPMRImportPMRIN->ui64uiPassword,
+ psPMRImportPMRIN->ui64uiSize,
+ psPMRImportPMRIN->ui32uiLog2Contig,
+ &psPMRInt);
+ /* Exit early if bridged call fails */
+ if(psPMRImportPMROUT->eError != PVRSRV_OK)
+ {
+ goto PMRImportPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPMRImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPMRImportPMROUT->hPMR,
+ (void *) psPMRInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPMRImportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRImportPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PMRImportPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRExportInt)
+ {
+ PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+ hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psPMRImportPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRInt)
+ {
+ PMRUnrefPMR(psPMRInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *psPMRLocalImportPMRIN,
+ PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *psPMRLocalImportPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hExtHandle = psPMRLocalImportPMRIN->hExtHandle;
+ PMR * psExtHandleInt = NULL;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRLocalImportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psExtHandleInt,
+ hExtHandle,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+ IMG_TRUE);
+ if(psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRLocalImportPMR_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRLocalImportPMROUT->eError =
+ PMRLocalImportPMR(
+ psExtHandleInt,
+ &psPMRInt,
+ &psPMRLocalImportPMROUT->uiSize,
+ &psPMRLocalImportPMROUT->sAlign);
+ /* Exit early if bridged call fails */
+ if(psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+ {
+ goto PMRLocalImportPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPMRLocalImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPMRLocalImportPMROUT->hPMR,
+ (void *) psPMRInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRLocalImportPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PMRLocalImportPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psExtHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hExtHandle,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRInt)
+ {
+ PMRUnrefPMR(psPMRInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRUNREFPMR *psPMRUnrefPMRIN,
+ PVRSRV_BRIDGE_OUT_PMRUNREFPMR *psPMRUnrefPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psPMRUnrefPMROUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psPMRUnrefPMRIN->hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ if ((psPMRUnrefPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnrefPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnrefPMROUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto PMRUnrefPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+PMRUnrefPMR_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN,
+ PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psPMRUnrefUnlockPMROUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psPMRUnrefUnlockPMRIN->hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ if ((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnrefUnlockPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnrefUnlockPMROUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto PMRUnrefUnlockPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+PMRUnrefUnlockPMR_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMRIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32MappingTableInt = NULL;
+ IMG_CHAR *uiAnnotationInt = NULL;
+ PMR * psPMRPtrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) +
+ (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPhysmemNewRamBackedPMRIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+ }
+ }
+
+ if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks != 0)
+ {
+ ui32MappingTableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32MappingTableInt, psPhysmemNewRamBackedPMRIN->pui32MappingTable, psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+ }
+ if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength != 0)
+ {
+ uiAnnotationInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiAnnotationInt, psPhysmemNewRamBackedPMRIN->puiAnnotation, psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+ }
+
+
+ psPhysmemNewRamBackedPMROUT->eError =
+ PhysmemNewRamBackedPMR(psConnection, OSGetDevData(psConnection),
+ psPhysmemNewRamBackedPMRIN->uiSize,
+ psPhysmemNewRamBackedPMRIN->uiChunkSize,
+ psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks,
+ psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks,
+ ui32MappingTableInt,
+ psPhysmemNewRamBackedPMRIN->ui32Log2PageSize,
+ psPhysmemNewRamBackedPMRIN->uiFlags,
+ psPhysmemNewRamBackedPMRIN->ui32AnnotationLength,
+ uiAnnotationInt,
+ &psPMRPtrInt);
+ /* Exit early if bridged call fails */
+ if(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+ {
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPhysmemNewRamBackedPMROUT->hPMRPtr,
+ (void *) psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PhysmemNewRamBackedPMR_exit:
+
+
+
+ if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ PMRUnrefPMR(psPMRPtrInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMRIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32MappingTableInt = NULL;
+ IMG_CHAR *uiAnnotationInt = NULL;
+ PMR * psPMRPtrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) +
+ (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPhysmemNewRamBackedLockedPMRIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysmemNewRamBackedLockedPMR_exit;
+ }
+ }
+ }
+
+ if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0)
+ {
+ ui32MappingTableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32MappingTableInt, psPhysmemNewRamBackedLockedPMRIN->pui32MappingTable, psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemNewRamBackedLockedPMR_exit;
+ }
+ }
+ if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0)
+ {
+ uiAnnotationInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiAnnotationInt, psPhysmemNewRamBackedLockedPMRIN->puiAnnotation, psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemNewRamBackedLockedPMR_exit;
+ }
+ }
+
+
+ psPhysmemNewRamBackedLockedPMROUT->eError =
+ PhysmemNewRamBackedLockedPMR(psConnection, OSGetDevData(psConnection),
+ psPhysmemNewRamBackedLockedPMRIN->uiSize,
+ psPhysmemNewRamBackedLockedPMRIN->uiChunkSize,
+ psPhysmemNewRamBackedLockedPMRIN->ui32NumPhysChunks,
+ psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks,
+ ui32MappingTableInt,
+ psPhysmemNewRamBackedLockedPMRIN->ui32Log2PageSize,
+ psPhysmemNewRamBackedLockedPMRIN->uiFlags,
+ psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength,
+ uiAnnotationInt,
+ &psPMRPtrInt);
+ /* Exit early if bridged call fails */
+ if(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
+ {
+ goto PhysmemNewRamBackedLockedPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPhysmemNewRamBackedLockedPMROUT->hPMRPtr,
+ (void *) psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefUnlockPMR);
+ if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemNewRamBackedLockedPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PhysmemNewRamBackedLockedPMR_exit:
+
+
+
+ if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ PMRUnrefUnlockPMR(psPMRPtrInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPin(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTPIN *psDevmemIntPinIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *psDevmemIntPinOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevmemIntPinIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntPinOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntPinOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntPin_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntPinOUT->eError =
+ DevmemIntPin(
+ psPMRInt);
+
+
+
+
+DevmemIntPin_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnpin(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *psDevmemIntUnpinIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *psDevmemIntUnpinOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevmemIntUnpinIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntUnpinOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntUnpinOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntUnpin_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntUnpinOUT->eError =
+ DevmemIntUnpin(
+ psPMRInt);
+
+
+
+
+DevmemIntUnpin_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPinValidate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hMapping = psDevmemIntPinValidateIN->hMapping;
+ DEVMEMINT_MAPPING * psMappingInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntPinValidateIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntPinValidateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psMappingInt,
+ hMapping,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+ IMG_TRUE);
+ if(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntPinValidate_exit;
+ }
+ }
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntPinValidateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntPinValidate_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntPinValidateOUT->eError =
+ DevmemIntPinValidate(
+ psMappingInt,
+ psPMRInt);
+
+
+
+
+DevmemIntPinValidate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psMappingInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hMapping,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+ }
+ }
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnpinInvalidate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hMapping = psDevmemIntUnpinInvalidateIN->hMapping;
+ DEVMEMINT_MAPPING * psMappingInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntUnpinInvalidateIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntUnpinInvalidateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psMappingInt,
+ hMapping,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+ IMG_TRUE);
+ if(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntUnpinInvalidate_exit;
+ }
+ }
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntUnpinInvalidateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntUnpinInvalidate_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntUnpinInvalidateOUT->eError =
+ DevmemIntUnpinInvalidate(
+ psMappingInt,
+ psPMRInt);
+
+
+
+
+DevmemIntUnpinInvalidate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psMappingInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hMapping,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+ }
+ }
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ DEVMEMINT_CTX * psDevMemServerContextInt = NULL;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+ psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL;
+
+
+
+ psDevmemIntCtxCreateOUT->eError =
+ DevmemIntCtxCreate(psConnection, OSGetDevData(psConnection),
+ psDevmemIntCtxCreateIN->bbKernelMemoryCtx,
+ &psDevMemServerContextInt,
+ &hPrivDataInt,
+ &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntCtxCreate_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntCtxCreateOUT->hDevMemServerContext,
+ (void *) psDevMemServerContextInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&DevmemIntCtxDestroy);
+ if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntCtxCreate_exit;
+ }
+
+
+
+
+
+
+ psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntCtxCreateOUT->hPrivData,
+ (void *) hPrivDataInt,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,psDevmemIntCtxCreateOUT->hDevMemServerContext);
+ if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntCtxCreate_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntCtxCreate_exit:
+
+
+
+ if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psDevmemIntCtxCreateOUT->hDevMemServerContext)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntCtxCreateOUT->hDevMemServerContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntCtxCreate: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psDevMemServerContextInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psDevMemServerContextInt)
+ {
+ DevmemIntCtxDestroy(psDevMemServerContextInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntCtxDestroyOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntCtxDestroyIN->hDevmemServerContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ if ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) &&
+ (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntCtxDestroy: %s",
+ PVRSRVGetErrorStringKM(psDevmemIntCtxDestroyOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto DevmemIntCtxDestroy_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+DevmemIntCtxDestroy_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx;
+ DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+ DEVMEMINT_HEAP * psDevmemHeapPtrInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntHeapCreateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemCtxInt,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntHeapCreate_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntHeapCreateOUT->eError =
+ DevmemIntHeapCreate(
+ psDevmemCtxInt,
+ psDevmemIntHeapCreateIN->sHeapBaseAddr,
+ psDevmemIntHeapCreateIN->uiHeapLength,
+ psDevmemIntHeapCreateIN->ui32Log2DataPageSize,
+ &psDevmemHeapPtrInt);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntHeapCreate_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntHeapCreateOUT->hDevmemHeapPtr,
+ (void *) psDevmemHeapPtrInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&DevmemIntHeapDestroy);
+ if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntHeapCreate_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntHeapCreate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+ {
+ if (psDevmemHeapPtrInt)
+ {
+ DevmemIntHeapDestroy(psDevmemHeapPtrInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntHeapDestroyOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ if ((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) &&
+ (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntHeapDestroy: %s",
+ PVRSRVGetErrorStringKM(psDevmemIntHeapDestroyOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto DevmemIntHeapDestroy_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+DevmemIntHeapDestroy_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap;
+ DEVMEMINT_HEAP * psDevmemServerHeapInt = NULL;
+ IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation;
+ DEVMEMINT_RESERVATION * psReservationInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR;
+ PMR * psPMRInt = NULL;
+ DEVMEMINT_MAPPING * psMappingInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntMapPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemServerHeapInt,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ IMG_TRUE);
+ if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPMR_exit;
+ }
+ }
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntMapPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ IMG_TRUE);
+ if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPMR_exit;
+ }
+ }
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntMapPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPMR_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntMapPMROUT->eError =
+ DevmemIntMapPMR(
+ psDevmemServerHeapInt,
+ psReservationInt,
+ psPMRInt,
+ psDevmemIntMapPMRIN->uiMapFlags,
+ &psMappingInt);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntMapPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntMapPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntMapPMROUT->hMapping,
+ (void *) psMappingInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&DevmemIntUnmapPMR);
+ if (psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntMapPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psDevmemServerHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+ }
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+ }
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ if (psMappingInt)
+ {
+ DevmemIntUnmapPMR(psMappingInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntUnmapPMROUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntUnmapPMRIN->hMapping,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+ if ((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) &&
+ (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntUnmapPMR: %s",
+ PVRSRVGetErrorStringKM(psDevmemIntUnmapPMROUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto DevmemIntUnmapPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+DevmemIntUnmapPMR_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap;
+ DEVMEMINT_HEAP * psDevmemServerHeapInt = NULL;
+ DEVMEMINT_RESERVATION * psReservationInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntReserveRangeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemServerHeapInt,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ IMG_TRUE);
+ if(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntReserveRange_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntReserveRangeOUT->eError =
+ DevmemIntReserveRange(
+ psDevmemServerHeapInt,
+ psDevmemIntReserveRangeIN->sAddress,
+ psDevmemIntReserveRangeIN->uiLength,
+ &psReservationInt);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntReserveRange_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntReserveRangeOUT->hReservation,
+ (void *) psReservationInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&DevmemIntUnreserveRange);
+ if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntReserveRange_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntReserveRange_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psDevmemServerHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+ {
+ if (psReservationInt)
+ {
+ DevmemIntUnreserveRange(psReservationInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntUnreserveRangeOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntUnreserveRangeIN->hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ if ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) &&
+ (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntUnreserveRange: %s",
+ PVRSRVGetErrorStringKM(psDevmemIntUnreserveRangeOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto DevmemIntUnreserveRange_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+DevmemIntUnreserveRange_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN,
+ PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap;
+ DEVMEMINT_HEAP * psSrvDevMemHeapInt = NULL;
+ IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+ IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) +
+ (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psChangeSparseMemIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psChangeSparseMemIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ChangeSparseMem_exit;
+ }
+ }
+ }
+
+ if (psChangeSparseMemIN->ui32AllocPageCount != 0)
+ {
+ ui32AllocPageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32AllocPageIndicesInt, psChangeSparseMemIN->pui32AllocPageIndices, psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ChangeSparseMem_exit;
+ }
+ }
+ if (psChangeSparseMemIN->ui32FreePageCount != 0)
+ {
+ ui32FreePageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FreePageIndicesInt, psChangeSparseMemIN->pui32FreePageIndices, psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ChangeSparseMem_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psChangeSparseMemOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSrvDevMemHeapInt,
+ hSrvDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ IMG_TRUE);
+ if(psChangeSparseMemOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ChangeSparseMem_exit;
+ }
+ }
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psChangeSparseMemOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psChangeSparseMemOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ChangeSparseMem_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psChangeSparseMemOUT->eError =
+ DevmemIntChangeSparse(
+ psSrvDevMemHeapInt,
+ psPMRInt,
+ psChangeSparseMemIN->ui32AllocPageCount,
+ ui32AllocPageIndicesInt,
+ psChangeSparseMemIN->ui32FreePageCount,
+ ui32FreePageIndicesInt,
+ psChangeSparseMemIN->ui32SparseFlags,
+ psChangeSparseMemIN->uiFlags,
+ psChangeSparseMemIN->sDevVAddr,
+ psChangeSparseMemIN->ui64CPUVAddr);
+
+
+
+
+ChangeSparseMem_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSrvDevMemHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSrvDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+ }
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *psDevmemIntMapPagesIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *psDevmemIntMapPagesOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation;
+ DEVMEMINT_RESERVATION * psReservationInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntMapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ IMG_TRUE);
+ if(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPages_exit;
+ }
+ }
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntMapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPages_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntMapPagesOUT->eError =
+ DevmemIntMapPages(
+ psReservationInt,
+ psPMRInt,
+ psDevmemIntMapPagesIN->ui32PageCount,
+ psDevmemIntMapPagesIN->ui32PhysicalPgOffset,
+ psDevmemIntMapPagesIN->uiFlags,
+ psDevmemIntMapPagesIN->sDevVAddr);
+
+
+
+
+DevmemIntMapPages_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+ }
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation;
+ DEVMEMINT_RESERVATION * psReservationInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntUnmapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ IMG_TRUE);
+ if(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntUnmapPages_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntUnmapPagesOUT->eError =
+ DevmemIntUnmapPages(
+ psReservationInt,
+ psDevmemIntUnmapPagesIN->sDevVAddr,
+ psDevmemIntUnmapPagesIN->ui32PageCount);
+
+
+
+
+DevmemIntUnmapPages_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemCtx = psDevmemIsVDevAddrValidIN->hDevmemCtx;
+ DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIsVDevAddrValidOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemCtxInt,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIsVDevAddrValid_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIsVDevAddrValidOUT->eError =
+ DevmemIntIsVDevAddrValid(psConnection, OSGetDevData(psConnection),
+ psDevmemCtxInt,
+ psDevmemIsVDevAddrValidIN->sAddress);
+
+
+
+
+DevmemIsVDevAddrValid_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountIN,
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psHeapCfgHeapConfigCountIN);
+
+
+
+
+
+ psHeapCfgHeapConfigCountOUT->eError =
+ HeapCfgHeapConfigCount(psConnection, OSGetDevData(psConnection),
+ &psHeapCfgHeapConfigCountOUT->ui32NumHeapConfigs);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountIN,
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psHeapCfgHeapCountOUT->eError =
+ HeapCfgHeapCount(psConnection, OSGetDevData(psConnection),
+ psHeapCfgHeapCountIN->ui32HeapConfigIndex,
+ &psHeapCfgHeapCountOUT->ui32NumHeaps);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameIN,
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *puiHeapConfigNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+ psHeapCfgHeapConfigNameOUT->puiHeapConfigName = psHeapCfgHeapConfigNameIN->puiHeapConfigName;
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHeapCfgHeapConfigNameIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HeapCfgHeapConfigName_exit;
+ }
+ }
+ }
+
+ if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0)
+ {
+ puiHeapConfigNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR);
+ }
+
+
+
+ psHeapCfgHeapConfigNameOUT->eError =
+ HeapCfgHeapConfigName(psConnection, OSGetDevData(psConnection),
+ psHeapCfgHeapConfigNameIN->ui32HeapConfigIndex,
+ psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz,
+ puiHeapConfigNameInt);
+
+
+
+ if ((psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) > 0)
+ {
+ if ( OSCopyToUser(NULL, psHeapCfgHeapConfigNameOUT->puiHeapConfigName, puiHeapConfigNameInt,
+ (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK )
+ {
+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HeapCfgHeapConfigName_exit;
+ }
+ }
+
+
+HeapCfgHeapConfigName_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsIN,
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *puiHeapNameOutInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+ psHeapCfgHeapDetailsOUT->puiHeapNameOut = psHeapCfgHeapDetailsIN->puiHeapNameOut;
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHeapCfgHeapDetailsIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HeapCfgHeapDetails_exit;
+ }
+ }
+ }
+
+ if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0)
+ {
+ puiHeapNameOutInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR);
+ }
+
+
+
+ psHeapCfgHeapDetailsOUT->eError =
+ HeapCfgHeapDetails(psConnection, OSGetDevData(psConnection),
+ psHeapCfgHeapDetailsIN->ui32HeapConfigIndex,
+ psHeapCfgHeapDetailsIN->ui32HeapIndex,
+ psHeapCfgHeapDetailsIN->ui32HeapNameBufSz,
+ puiHeapNameOutInt,
+ &psHeapCfgHeapDetailsOUT->sDevVAddrBase,
+ &psHeapCfgHeapDetailsOUT->uiHeapLength,
+ &psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut,
+ &psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut);
+
+
+
+ if ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0)
+ {
+ if ( OSCopyToUser(NULL, psHeapCfgHeapDetailsOUT->puiHeapNameOut, puiHeapNameOutInt,
+ (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK )
+ {
+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HeapCfgHeapDetails_exit;
+ }
+ }
+
+
+HeapCfgHeapDetails_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx;
+ DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntRegisterPFNotifyKMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemCtxInt,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntRegisterPFNotifyKM_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntRegisterPFNotifyKMOUT->eError =
+ DevmemIntRegisterPFNotifyKM(
+ psDevmemCtxInt,
+ psDevmemIntRegisterPFNotifyKMIN->ui32PID,
+ psDevmemIntRegisterPFNotifyKMIN->bRegister);
+
+
+
+
+DevmemIntRegisterPFNotifyKM_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitMMBridge(void);
+PVRSRV_ERROR DeinitMMBridge(void);
+
+/*
+ * Register all MM functions with services
+ */
+PVRSRV_ERROR InitMMBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR, PVRSRVBridgePMRExportPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR, PVRSRVBridgePMRUnexportPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, PVRSRVBridgePMRGetUID,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE, PVRSRVBridgePMRMakeLocalImportHandle,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE, PVRSRVBridgePMRUnmakeLocalImportHandle,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR, PVRSRVBridgePMRImportPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR, PVRSRVBridgePMRLocalImportPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR, PVRSRVBridgePMRUnrefPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR, PVRSRVBridgePMRUnrefUnlockPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR, PVRSRVBridgePhysmemNewRamBackedPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR, PVRSRVBridgePhysmemNewRamBackedLockedPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN, PVRSRVBridgeDevmemIntPin,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN, PVRSRVBridgeDevmemIntUnpin,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE, PVRSRVBridgeDevmemIntPinValidate,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE, PVRSRVBridgeDevmemIntUnpinInvalidate,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE, PVRSRVBridgeDevmemIntCtxCreate,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY, PVRSRVBridgeDevmemIntCtxDestroy,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE, PVRSRVBridgeDevmemIntHeapCreate,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY, PVRSRVBridgeDevmemIntHeapDestroy,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR, PVRSRVBridgeDevmemIntMapPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR, PVRSRVBridgeDevmemIntUnmapPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE, PVRSRVBridgeDevmemIntReserveRange,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE, PVRSRVBridgeDevmemIntUnreserveRange,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM, PVRSRVBridgeChangeSparseMem,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES, PVRSRVBridgeDevmemIntMapPages,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES, PVRSRVBridgeDevmemIntUnmapPages,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID, PVRSRVBridgeDevmemIsVDevAddrValid,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT, PVRSRVBridgeHeapCfgHeapConfigCount,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT, PVRSRVBridgeHeapCfgHeapCount,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME, PVRSRVBridgeHeapCfgHeapConfigName,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS, PVRSRVBridgeHeapCfgHeapDetails,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM, PVRSRVBridgeDevmemIntRegisterPFNotifyKM,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all mm functions with services
+ */
+PVRSRV_ERROR DeinitMMBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_PDUMP_BRIDGE_H
+#define CLIENT_PDUMP_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdump_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemPDumpBitmap(IMG_HANDLE hBridge,
+ IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpComment(IMG_HANDLE hBridge,
+ IMG_CHAR *puiComment,
+ IMG_UINT32 ui32Flags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetFrame(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32Frame);
+
+
+#endif /* CLIENT_PDUMP_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_pdump_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+
+#include "devicemem_server.h"
+#include "pdump_km.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemPDumpBitmap(IMG_HANDLE hBridge,
+ IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemCtxInt;
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntPDumpBitmap(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ puiFileName,
+ ui32FileOffset,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ sDevBaseAddr,
+ psDevmemCtxInt,
+ ui32Size,
+ ePixelFormat,
+ ui32AddrMode,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpComment(IMG_HANDLE hBridge,
+ IMG_CHAR *puiComment,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ PDumpCommentKM(
+ puiComment,
+ ui32Flags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetFrame(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32Frame)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PDumpSetFrameKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32Frame);
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PDUMP_BRIDGE_H
+#define COMMON_PDUMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+
+
+#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2)
+
+
+/*******************************************
+ DevmemPDumpBitmap
+ *******************************************/
+
+/* Bridge in structure for DevmemPDumpBitmap */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP_TAG
+{
+ IMG_CHAR * puiFileName;
+ IMG_UINT32 ui32FileOffset;
+ IMG_UINT32 ui32Width;
+ IMG_UINT32 ui32Height;
+ IMG_UINT32 ui32StrideInBytes;
+ IMG_DEV_VIRTADDR sDevBaseAddr;
+ IMG_HANDLE hDevmemCtx;
+ IMG_UINT32 ui32Size;
+ PDUMP_PIXEL_FORMAT ePixelFormat;
+ IMG_UINT32 ui32AddrMode;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP;
+
+/* Bridge out structure for DevmemPDumpBitmap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP;
+
+
+/*******************************************
+ PVRSRVPDumpComment
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT_TAG
+{
+ IMG_CHAR * puiComment;
+ IMG_UINT32 ui32Flags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT;
+
+/* Bridge out structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT;
+
+
+/*******************************************
+ PVRSRVPDumpSetFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME_TAG
+{
+ IMG_UINT32 ui32Frame;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME;
+
+/* Bridge out structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME;
+
+
+#endif /* COMMON_PDUMP_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pdump_km.h"
+
+
+#include "common_pdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeDevmemPDumpBitmap(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiFileNameInt = NULL;
+ IMG_HANDLE hDevmemCtx = psDevmemPDumpBitmapIN->hDevmemCtx;
+ DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevmemPDumpBitmapIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevmemPDumpBitmapIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevmemPDumpBitmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevmemPDumpBitmap_exit;
+ }
+ }
+ }
+
+
+ {
+ uiFileNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiFileNameInt, psDevmemPDumpBitmapIN->puiFileName, PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevmemPDumpBitmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevmemPDumpBitmap_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemPDumpBitmapOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemCtxInt,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemPDumpBitmapOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemPDumpBitmap_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemPDumpBitmapOUT->eError =
+ DevmemIntPDumpBitmap(psConnection, OSGetDevData(psConnection),
+ uiFileNameInt,
+ psDevmemPDumpBitmapIN->ui32FileOffset,
+ psDevmemPDumpBitmapIN->ui32Width,
+ psDevmemPDumpBitmapIN->ui32Height,
+ psDevmemPDumpBitmapIN->ui32StrideInBytes,
+ psDevmemPDumpBitmapIN->sDevBaseAddr,
+ psDevmemCtxInt,
+ psDevmemPDumpBitmapIN->ui32Size,
+ psDevmemPDumpBitmapIN->ePixelFormat,
+ psDevmemPDumpBitmapIN->ui32AddrMode,
+ psDevmemPDumpBitmapIN->ui32PDumpFlags);
+
+
+
+
+DevmemPDumpBitmap_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiCommentInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPVRSRVPDumpCommentIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPVRSRVPDumpCommentIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PVRSRVPDumpComment_exit;
+ }
+ }
+ }
+
+
+ {
+ uiCommentInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiCommentInt, psPVRSRVPDumpCommentIN->puiComment, PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PVRSRVPDumpComment_exit;
+ }
+ }
+
+
+ psPVRSRVPDumpCommentOUT->eError =
+ PDumpCommentKM(
+ uiCommentInt,
+ psPVRSRVPDumpCommentIN->ui32Flags);
+
+
+
+
+PVRSRVPDumpComment_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetFrame(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psPVRSRVPDumpSetFrameOUT->eError =
+ PDumpSetFrameKM(psConnection, OSGetDevData(psConnection),
+ psPVRSRVPDumpSetFrameIN->ui32Frame);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitPDUMPBridge(void);
+PVRSRV_ERROR DeinitPDUMPBridge(void);
+
+/*
+ * Register all PDUMP functions with services
+ */
+PVRSRV_ERROR InitPDUMPBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP, PVRSRVBridgeDevmemPDumpBitmap,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT, PVRSRVBridgePVRSRVPDumpComment,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME, PVRSRVBridgePVRSRVPDumpSetFrame,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdump functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pdumpctrl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_PDUMPCTRL_BRIDGE_H
+#define CLIENT_PDUMPCTRL_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdumpctrl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpIsCapturing(IMG_HANDLE hBridge,
+ IMG_BOOL *pbIsCapturing);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetFrame(IMG_HANDLE hBridge,
+ IMG_UINT32 *pui32Frame);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge,
+ IMG_BOOL *pbpbIsLastCaptureFrame);
+
+
+#endif /* CLIENT_PDUMPCTRL_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_pdumpctrl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+
+#include "pdump_km.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpIsCapturing(IMG_HANDLE hBridge,
+ IMG_BOOL *pbIsCapturing)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ PDumpIsCaptureFrameKM(
+ pbIsCapturing);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetFrame(IMG_HANDLE hBridge,
+ IMG_UINT32 *pui32Frame)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PDumpGetFrameKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ pui32Frame);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ PDumpSetDefaultCaptureParamsKM(
+ ui32Mode,
+ ui32Start,
+ ui32End,
+ ui32Interval,
+ ui32MaxParamFileSize);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge,
+ IMG_BOOL *pbpbIsLastCaptureFrame)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ PDumpIsLastCaptureFrameKM(
+ pbpbIsLastCaptureFrame);
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pdumpctrl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PDUMPCTRL_BRIDGE_H
+#define COMMON_PDUMPCTRL_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISCAPTURING PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST (PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+3)
+
+
+/*******************************************
+ PVRSRVPDumpIsCapturing
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpIsCapturing */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPISCAPTURING_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPISCAPTURING;
+
+/* Bridge out structure for PVRSRVPDumpIsCapturing */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISCAPTURING_TAG
+{
+ IMG_BOOL bIsCapturing;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISCAPTURING;
+
+
+/*******************************************
+ PVRSRVPDumpGetFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME;
+
+/* Bridge out structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME_TAG
+{
+ IMG_UINT32 ui32Frame;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME;
+
+
+/*******************************************
+ PVRSRVPDumpSetDefaultCaptureParams
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+ IMG_UINT32 ui32Mode;
+ IMG_UINT32 ui32Start;
+ IMG_UINT32 ui32End;
+ IMG_UINT32 ui32Interval;
+ IMG_UINT32 ui32MaxParamFileSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+/* Bridge out structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+
+/*******************************************
+ PVRSRVPDumpIsLastCaptureFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+/* Bridge out structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+ IMG_BOOL bpbIsLastCaptureFrame;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+
+#endif /* COMMON_PDUMPCTRL_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pdumpctrl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pdump_km.h"
+
+
+#include "common_pdumpctrl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpIsCapturing(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPISCAPTURING *psPVRSRVPDumpIsCapturingIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISCAPTURING *psPVRSRVPDumpIsCapturingOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpIsCapturingIN);
+
+
+
+
+
+ psPVRSRVPDumpIsCapturingOUT->eError =
+ PDumpIsCaptureFrameKM(
+ &psPVRSRVPDumpIsCapturingOUT->bIsCapturing);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpGetFrame(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetFrameIN);
+
+
+
+
+
+ psPVRSRVPDumpGetFrameOUT->eError =
+ PDumpGetFrameKM(psConnection, OSGetDevData(psConnection),
+ &psPVRSRVPDumpGetFrameOUT->ui32Frame);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *psPVRSRVPDumpSetDefaultCaptureParamsIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *psPVRSRVPDumpSetDefaultCaptureParamsOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+ psPVRSRVPDumpSetDefaultCaptureParamsOUT->eError =
+ PDumpSetDefaultCaptureParamsKM(
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Mode,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Start,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32End,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Interval,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32MaxParamFileSize);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpIsLastCaptureFrameIN);
+
+
+
+
+
+ psPVRSRVPDumpIsLastCaptureFrameOUT->eError =
+ PDumpIsLastCaptureFrameKM(
+ &psPVRSRVPDumpIsLastCaptureFrameOUT->bpbIsLastCaptureFrame);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pPDUMPCTRLBridgeLock;
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitPDUMPCTRLBridge(void);
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(void);
+
+/*
+ * Register all PDUMPCTRL functions with services
+ */
+PVRSRV_ERROR InitPDUMPCTRLBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockCreate(&pPDUMPCTRLBridgeLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISCAPTURING, PVRSRVBridgePVRSRVPDumpIsCapturing,
+ pPDUMPCTRLBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME, PVRSRVBridgePVRSRVPDumpGetFrame,
+ pPDUMPCTRLBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS, PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams,
+ pPDUMPCTRLBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME, PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame,
+ pPDUMPCTRLBridgeLock, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpctrl functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockDestroy(pPDUMPCTRLBridgeLock), "OSLockDestroy");
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pdumpmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_PDUMPMM_BRIDGE_H
+#define CLIENT_PDUMPMM_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdumpmm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_BOOL bbZero);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32uiFileOffset);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32MemspaceNameLen,
+ IMG_CHAR *puiMemspaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *puiSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags);
+
+
+#endif /* CLIENT_PDUMPMM_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_pdumpmm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_BOOL bbZero)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpLoadMem(
+ psPMRInt,
+ uiOffset,
+ uiSize,
+ ui32PDumpFlags,
+ bbZero);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpLoadMemValue32(
+ psPMRInt,
+ uiOffset,
+ ui32Value,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpLoadMemValue64(
+ psPMRInt,
+ uiOffset,
+ ui64Value,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32uiFileOffset)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpSaveToFile(
+ psPMRInt,
+ uiOffset,
+ uiSize,
+ ui32ArraySize,
+ puiFileName,
+ ui32uiFileOffset);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32MemspaceNameLen,
+ IMG_CHAR *puiMemspaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *puiSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMR_PDumpSymbolicAddr(
+ psPMRInt,
+ uiOffset,
+ ui32MemspaceNameLen,
+ puiMemspaceName,
+ ui32SymbolicAddrLen,
+ puiSymbolicAddr,
+ puiNewOffset,
+ puiNextSymName);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpPol32(
+ psPMRInt,
+ uiOffset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpCBP(
+ psPMRInt,
+ uiReadOffset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemServerContextInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+ eError =
+ DevmemIntPDumpSaveToFileVirtual(
+ psDevmemServerContextInt,
+ sAddress,
+ uiSize,
+ ui32ArraySize,
+ puiFileName,
+ ui32FileOffset,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pdumpmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PDUMPMM_BRIDGE_H
+#define COMMON_PDUMPMM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_LAST (PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7)
+
+
+/*******************************************
+ PMRPDumpLoadMem
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_BOOL bbZero;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM;
+
+/* Bridge out structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM;
+
+
+/*******************************************
+ PMRPDumpLoadMemValue32
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32;
+
+/* Bridge out structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32;
+
+
+/*******************************************
+ PMRPDumpLoadMemValue64
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_UINT64 ui64Value;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64;
+
+/* Bridge out structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64;
+
+
+/*******************************************
+ PMRPDumpSaveToFile
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32ArraySize;
+ const IMG_CHAR * puiFileName;
+ IMG_UINT32 ui32uiFileOffset;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE;
+
+/* Bridge out structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE;
+
+
+/*******************************************
+ PMRPDumpSymbolicAddr
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_UINT32 ui32MemspaceNameLen;
+ IMG_UINT32 ui32SymbolicAddrLen;
+ /* Output pointer puiMemspaceName is also an implied input */
+ IMG_CHAR * puiMemspaceName;
+ /* Output pointer puiSymbolicAddr is also an implied input */
+ IMG_CHAR * puiSymbolicAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR;
+
+/* Bridge out structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR_TAG
+{
+ IMG_CHAR * puiMemspaceName;
+ IMG_CHAR * puiSymbolicAddr;
+ IMG_DEVMEM_OFFSET_T uiNewOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR;
+
+
+/*******************************************
+ PMRPDumpPol32
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPPOL32_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Mask;
+ PDUMP_POLL_OPERATOR eOperator;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPPOL32;
+
+/* Bridge out structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32;
+
+
+/*******************************************
+ PMRPDumpCBP
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCBP_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiReadOffset;
+ IMG_DEVMEM_OFFSET_T uiWriteOffset;
+ IMG_DEVMEM_SIZE_T uiPacketSize;
+ IMG_DEVMEM_SIZE_T uiBufferSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPCBP;
+
+/* Bridge out structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCBP_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPCBP;
+
+
+/*******************************************
+ DevmemIntPDumpSaveToFileVirtual
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+ IMG_HANDLE hDevmemServerContext;
+ IMG_DEV_VIRTADDR sAddress;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32ArraySize;
+ const IMG_CHAR * puiFileName;
+ IMG_UINT32 ui32FileOffset;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+/* Bridge out structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+
+#endif /* COMMON_PDUMPMM_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pdumpmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+
+
+#include "common_pdumpmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMem(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *psPMRPDumpLoadMemIN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *psPMRPDumpLoadMemOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpLoadMemIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRPDumpLoadMemOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpLoadMemOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpLoadMem_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpLoadMemOUT->eError =
+ PMRPDumpLoadMem(
+ psPMRInt,
+ psPMRPDumpLoadMemIN->uiOffset,
+ psPMRPDumpLoadMemIN->uiSize,
+ psPMRPDumpLoadMemIN->ui32PDumpFlags,
+ psPMRPDumpLoadMemIN->bbZero);
+
+
+
+
+PMRPDumpLoadMem_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue32(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32IN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32OUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpLoadMemValue32IN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRPDumpLoadMemValue32OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpLoadMemValue32OUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpLoadMemValue32_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpLoadMemValue32OUT->eError =
+ PMRPDumpLoadMemValue32(
+ psPMRInt,
+ psPMRPDumpLoadMemValue32IN->uiOffset,
+ psPMRPDumpLoadMemValue32IN->ui32Value,
+ psPMRPDumpLoadMemValue32IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpLoadMemValue32_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue64(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64IN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64OUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpLoadMemValue64IN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRPDumpLoadMemValue64OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpLoadMemValue64OUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpLoadMemValue64_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpLoadMemValue64OUT->eError =
+ PMRPDumpLoadMemValue64(
+ psPMRInt,
+ psPMRPDumpLoadMemValue64IN->uiOffset,
+ psPMRPDumpLoadMemValue64IN->ui64Value,
+ psPMRPDumpLoadMemValue64IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpLoadMemValue64_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileIN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpSaveToFileIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_CHAR *uiFileNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPMRPDumpSaveToFileIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPMRPDumpSaveToFileIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PMRPDumpSaveToFile_exit;
+ }
+ }
+ }
+
+ if (psPMRPDumpSaveToFileIN->ui32ArraySize != 0)
+ {
+ uiFileNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiFileNameInt, psPMRPDumpSaveToFileIN->puiFileName, psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PMRPDumpSaveToFile_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRPDumpSaveToFileOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpSaveToFileOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpSaveToFile_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpSaveToFileOUT->eError =
+ PMRPDumpSaveToFile(
+ psPMRInt,
+ psPMRPDumpSaveToFileIN->uiOffset,
+ psPMRPDumpSaveToFileIN->uiSize,
+ psPMRPDumpSaveToFileIN->ui32ArraySize,
+ uiFileNameInt,
+ psPMRPDumpSaveToFileIN->ui32uiFileOffset);
+
+
+
+
+PMRPDumpSaveToFile_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrIN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpSymbolicAddrIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_CHAR *puiMemspaceNameInt = NULL;
+ IMG_CHAR *puiSymbolicAddrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) +
+ (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+ psPMRPDumpSymbolicAddrOUT->puiMemspaceName = psPMRPDumpSymbolicAddrIN->puiMemspaceName;
+ psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr = psPMRPDumpSymbolicAddrIN->puiSymbolicAddr;
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPMRPDumpSymbolicAddrIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPMRPDumpSymbolicAddrIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ }
+ }
+
+ if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen != 0)
+ {
+ puiMemspaceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR);
+ }
+
+ if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen != 0)
+ {
+ puiSymbolicAddrInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR);
+ }
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRPDumpSymbolicAddrOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpSymbolicAddrOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpSymbolicAddrOUT->eError =
+ PMR_PDumpSymbolicAddr(
+ psPMRInt,
+ psPMRPDumpSymbolicAddrIN->uiOffset,
+ psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen,
+ puiMemspaceNameInt,
+ psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen,
+ puiSymbolicAddrInt,
+ &psPMRPDumpSymbolicAddrOUT->uiNewOffset,
+ &psPMRPDumpSymbolicAddrOUT->uiNextSymName);
+
+
+
+ if ((psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) > 0)
+ {
+ if ( OSCopyToUser(NULL, psPMRPDumpSymbolicAddrOUT->puiMemspaceName, puiMemspaceNameInt,
+ (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR))) != PVRSRV_OK )
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ }
+
+ if ((psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) > 0)
+ {
+ if ( OSCopyToUser(NULL, psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr, puiSymbolicAddrInt,
+ (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR))) != PVRSRV_OK )
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ }
+
+
+PMRPDumpSymbolicAddr_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpPol32(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *psPMRPDumpPol32IN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *psPMRPDumpPol32OUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpPol32IN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRPDumpPol32OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpPol32OUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpPol32_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpPol32OUT->eError =
+ PMRPDumpPol32(
+ psPMRInt,
+ psPMRPDumpPol32IN->uiOffset,
+ psPMRPDumpPol32IN->ui32Value,
+ psPMRPDumpPol32IN->ui32Mask,
+ psPMRPDumpPol32IN->eOperator,
+ psPMRPDumpPol32IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpPol32_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPCBP *psPMRPDumpCBPIN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *psPMRPDumpCBPOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpCBPIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psPMRPDumpCBPOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpCBPOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpCBP_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpCBPOUT->eError =
+ PMRPDumpCBP(
+ psPMRInt,
+ psPMRPDumpCBPIN->uiReadOffset,
+ psPMRPDumpCBPIN->uiWriteOffset,
+ psPMRPDumpCBPIN->uiPacketSize,
+ psPMRPDumpCBPIN->uiBufferSize);
+
+
+
+
+PMRPDumpCBP_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemServerContext = psDevmemIntPDumpSaveToFileVirtualIN->hDevmemServerContext;
+ DEVMEMINT_CTX * psDevmemServerContextInt = NULL;
+ IMG_CHAR *uiFileNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevmemIntPDumpSaveToFileVirtualIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevmemIntPDumpSaveToFileVirtualIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevmemIntPDumpSaveToFileVirtual_exit;
+ }
+ }
+ }
+
+ if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize != 0)
+ {
+ uiFileNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiFileNameInt, psDevmemIntPDumpSaveToFileVirtualIN->puiFileName, psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevmemIntPDumpSaveToFileVirtual_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemServerContextInt,
+ hDevmemServerContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemIntPDumpSaveToFileVirtualOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntPDumpSaveToFileVirtual_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+ DevmemIntPDumpSaveToFileVirtual(
+ psDevmemServerContextInt,
+ psDevmemIntPDumpSaveToFileVirtualIN->sAddress,
+ psDevmemIntPDumpSaveToFileVirtualIN->uiSize,
+ psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize,
+ uiFileNameInt,
+ psDevmemIntPDumpSaveToFileVirtualIN->ui32FileOffset,
+ psDevmemIntPDumpSaveToFileVirtualIN->ui32PDumpFlags);
+
+
+
+
+DevmemIntPDumpSaveToFileVirtual_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psDevmemServerContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemServerContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitPDUMPMMBridge(void);
+PVRSRV_ERROR DeinitPDUMPMMBridge(void);
+
+/*
+ * Register all PDUMPMM functions with services
+ */
+PVRSRV_ERROR InitPDUMPMMBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM, PVRSRVBridgePMRPDumpLoadMem,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32, PVRSRVBridgePMRPDumpLoadMemValue32,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64, PVRSRVBridgePMRPDumpLoadMemValue64,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE, PVRSRVBridgePMRPDumpSaveToFile,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR, PVRSRVBridgePMRPDumpSymbolicAddr,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32, PVRSRVBridgePMRPDumpPol32,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP, PVRSRVBridgePMRPDumpCBP,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL, PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpmm functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPMMBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pvrtl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_PVRTL_BRIDGE_H
+#define CLIENT_PVRTL_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pvrtl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge,
+ const IMG_CHAR *puiName,
+ IMG_UINT32 ui32Mode,
+ IMG_HANDLE *phSD,
+ IMG_HANDLE *phTLPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 *pui32ReadOffset,
+ IMG_UINT32 *pui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32ReadOffset,
+ IMG_UINT32 ui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE hBridge,
+ const IMG_CHAR *puiNamePattern,
+ IMG_UINT32 ui32Max,
+ IMG_UINT32 *pui32Streams,
+ IMG_UINT32 *pui32NumFound);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 *pui32BufferOffset,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32 *pui32Available);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32ReqSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE *psData);
+
+
+#endif /* CLIENT_PVRTL_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_pvrtl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+#include "tlserver.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge,
+ const IMG_CHAR *puiName,
+ IMG_UINT32 ui32Mode,
+ IMG_HANDLE *phSD,
+ IMG_HANDLE *phTLPMR)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PMR * psTLPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ TLServerOpenStreamKM(
+ puiName,
+ ui32Mode,
+ &psSDInt,
+ &psTLPMRInt);
+
+ *phSD = psSDInt;
+ *phTLPMR = psTLPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerCloseStreamKM(
+ psSDInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 *pui32ReadOffset,
+ IMG_UINT32 *pui32ReadLen)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerAcquireDataKM(
+ psSDInt,
+ pui32ReadOffset,
+ pui32ReadLen);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32ReadOffset,
+ IMG_UINT32 ui32ReadLen)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerReleaseDataKM(
+ psSDInt,
+ ui32ReadOffset,
+ ui32ReadLen);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE hBridge,
+ const IMG_CHAR *puiNamePattern,
+ IMG_UINT32 ui32Max,
+ IMG_UINT32 *pui32Streams,
+ IMG_UINT32 *pui32NumFound)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ TLServerDiscoverStreamsKM(
+ puiNamePattern,
+ ui32Max,
+ pui32Streams,
+ pui32NumFound);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 *pui32BufferOffset,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32 *pui32Available)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerReserveStreamKM(
+ psSDInt,
+ pui32BufferOffset,
+ ui32Size,
+ ui32SizeMin,
+ pui32Available);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32ReqSize)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerCommitStreamKM(
+ psSDInt,
+ ui32ReqSize);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE *psData)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerWriteDataKM(
+ psSDInt,
+ ui32Size,
+ psData);
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pvrtl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PVRTL_BRIDGE_H
+#define COMMON_PVRTL_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+
+#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PVRTL_TLWRITEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PVRTL_CMD_LAST (PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7)
+
+
+/*******************************************
+ TLOpenStream
+ *******************************************/
+
+/* Bridge in structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG
+{
+ const IMG_CHAR * puiName;
+ IMG_UINT32 ui32Mode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLOPENSTREAM;
+
+/* Bridge out structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_HANDLE hTLPMR;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLOPENSTREAM;
+
+
+/*******************************************
+ TLCloseStream
+ *******************************************/
+
+/* Bridge in structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG
+{
+ IMG_HANDLE hSD;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLCLOSESTREAM;
+
+/* Bridge out structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLCLOSESTREAM;
+
+
+/*******************************************
+ TLAcquireData
+ *******************************************/
+
+/* Bridge in structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG
+{
+ IMG_HANDLE hSD;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLACQUIREDATA;
+
+/* Bridge out structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG
+{
+ IMG_UINT32 ui32ReadOffset;
+ IMG_UINT32 ui32ReadLen;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLACQUIREDATA;
+
+
+/*******************************************
+ TLReleaseData
+ *******************************************/
+
+/* Bridge in structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_UINT32 ui32ReadOffset;
+ IMG_UINT32 ui32ReadLen;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLRELEASEDATA;
+
+/* Bridge out structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLRELEASEDATA;
+
+
+/*******************************************
+ TLDiscoverStreams
+ *******************************************/
+
+/* Bridge in structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS_TAG
+{
+ const IMG_CHAR * puiNamePattern;
+ IMG_UINT32 ui32Max;
+ /* Output pointer pui32Streams is also an implied input */
+ IMG_UINT32 * pui32Streams;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS;
+
+/* Bridge out structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS_TAG
+{
+ IMG_UINT32 * pui32Streams;
+ IMG_UINT32 ui32NumFound;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS;
+
+
+/*******************************************
+ TLReserveStream
+ *******************************************/
+
+/* Bridge in structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_IN_TLRESERVESTREAM_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_UINT32 ui32Size;
+ IMG_UINT32 ui32SizeMin;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLRESERVESTREAM;
+
+/* Bridge out structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLRESERVESTREAM_TAG
+{
+ IMG_UINT32 ui32BufferOffset;
+ IMG_UINT32 ui32Available;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLRESERVESTREAM;
+
+
+/*******************************************
+ TLCommitStream
+ *******************************************/
+
+/* Bridge in structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCOMMITSTREAM_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_UINT32 ui32ReqSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLCOMMITSTREAM;
+
+/* Bridge out structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM;
+
+
+/*******************************************
+ TLWriteData
+ *******************************************/
+
+/* Bridge in structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_IN_TLWRITEDATA_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_UINT32 ui32Size;
+ IMG_BYTE * psData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLWRITEDATA;
+
+/* Bridge out structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_OUT_TLWRITEDATA_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLWRITEDATA;
+
+
+#endif /* COMMON_PVRTL_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pvrtl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "tlserver.h"
+
+
+#include "common_pvrtl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLOPENSTREAM *psTLOpenStreamIN,
+ PVRSRV_BRIDGE_OUT_TLOPENSTREAM *psTLOpenStreamOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiNameInt = NULL;
+ TL_STREAM_DESC * psSDInt = NULL;
+ PMR * psTLPMRInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+ psTLOpenStreamOUT->hSD = NULL;
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psTLOpenStreamIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psTLOpenStreamOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto TLOpenStream_exit;
+ }
+ }
+ }
+
+
+ {
+ uiNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiNameInt, psTLOpenStreamIN->puiName, PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLOpenStream_exit;
+ }
+ }
+
+
+ psTLOpenStreamOUT->eError =
+ TLServerOpenStreamKM(
+ uiNameInt,
+ psTLOpenStreamIN->ui32Mode,
+ &psSDInt,
+ &psTLPMRInt);
+ /* Exit early if bridged call fails */
+ if(psTLOpenStreamOUT->eError != PVRSRV_OK)
+ {
+ goto TLOpenStream_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psTLOpenStreamOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psTLOpenStreamOUT->hSD,
+ (void *) psSDInt,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&TLServerCloseStreamKM);
+ if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLOpenStream_exit;
+ }
+
+
+
+
+
+
+ psTLOpenStreamOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psTLOpenStreamOUT->hTLPMR,
+ (void *) psTLPMRInt,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,psTLOpenStreamOUT->hSD);
+ if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLOpenStream_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+TLOpenStream_exit:
+
+
+
+ if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psTLOpenStreamOUT->hSD)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psTLOpenStreamOUT->hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeTLOpenStream: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psSDInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psSDInt)
+ {
+ TLServerCloseStreamKM(psSDInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLCLOSESTREAM *psTLCloseStreamIN,
+ PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *psTLCloseStreamOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psTLCloseStreamOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psTLCloseStreamIN->hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ if ((psTLCloseStreamOUT->eError != PVRSRV_OK) &&
+ (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeTLCloseStream: %s",
+ PVRSRVGetErrorStringKM(psTLCloseStreamOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto TLCloseStream_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+TLCloseStream_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLACQUIREDATA *psTLAcquireDataIN,
+ PVRSRV_BRIDGE_OUT_TLACQUIREDATA *psTLAcquireDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSD = psTLAcquireDataIN->hSD;
+ TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psTLAcquireDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSDInt,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ IMG_TRUE);
+ if(psTLAcquireDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLAcquireData_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psTLAcquireDataOUT->eError =
+ TLServerAcquireDataKM(
+ psSDInt,
+ &psTLAcquireDataOUT->ui32ReadOffset,
+ &psTLAcquireDataOUT->ui32ReadLen);
+
+
+
+
+TLAcquireData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLRELEASEDATA *psTLReleaseDataIN,
+ PVRSRV_BRIDGE_OUT_TLRELEASEDATA *psTLReleaseDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSD = psTLReleaseDataIN->hSD;
+ TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psTLReleaseDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSDInt,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ IMG_TRUE);
+ if(psTLReleaseDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLReleaseData_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psTLReleaseDataOUT->eError =
+ TLServerReleaseDataKM(
+ psSDInt,
+ psTLReleaseDataIN->ui32ReadOffset,
+ psTLReleaseDataIN->ui32ReadLen);
+
+
+
+
+TLReleaseData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *psTLDiscoverStreamsIN,
+ PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *psTLDiscoverStreamsOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiNamePatternInt = NULL;
+ IMG_UINT32 *pui32StreamsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) +
+ (psTLDiscoverStreamsIN->ui32Max * sizeof(IMG_UINT32)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psTLDiscoverStreamsOUT->pui32Streams = psTLDiscoverStreamsIN->pui32Streams;
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psTLDiscoverStreamsIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto TLDiscoverStreams_exit;
+ }
+ }
+ }
+
+
+ {
+ uiNamePatternInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiNamePatternInt, psTLDiscoverStreamsIN->puiNamePattern, PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLDiscoverStreams_exit;
+ }
+ }
+ if (psTLDiscoverStreamsIN->ui32Max != 0)
+ {
+ pui32StreamsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psTLDiscoverStreamsIN->ui32Max * sizeof(IMG_UINT32);
+ }
+
+
+
+ psTLDiscoverStreamsOUT->eError =
+ TLServerDiscoverStreamsKM(
+ uiNamePatternInt,
+ psTLDiscoverStreamsIN->ui32Max,
+ pui32StreamsInt,
+ &psTLDiscoverStreamsOUT->ui32NumFound);
+
+
+
+ if ((psTLDiscoverStreamsIN->ui32Max * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyToUser(NULL, psTLDiscoverStreamsOUT->pui32Streams, pui32StreamsInt,
+ (psTLDiscoverStreamsIN->ui32Max * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLDiscoverStreams_exit;
+ }
+ }
+
+
+TLDiscoverStreams_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLReserveStream(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLRESERVESTREAM *psTLReserveStreamIN,
+ PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *psTLReserveStreamOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSD = psTLReserveStreamIN->hSD;
+ TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psTLReserveStreamOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSDInt,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ IMG_TRUE);
+ if(psTLReserveStreamOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLReserveStream_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psTLReserveStreamOUT->eError =
+ TLServerReserveStreamKM(
+ psSDInt,
+ &psTLReserveStreamOUT->ui32BufferOffset,
+ psTLReserveStreamIN->ui32Size,
+ psTLReserveStreamIN->ui32SizeMin,
+ &psTLReserveStreamOUT->ui32Available);
+
+
+
+
+TLReserveStream_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLCommitStream(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *psTLCommitStreamIN,
+ PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *psTLCommitStreamOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSD = psTLCommitStreamIN->hSD;
+ TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psTLCommitStreamOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSDInt,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ IMG_TRUE);
+ if(psTLCommitStreamOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLCommitStream_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psTLCommitStreamOUT->eError =
+ TLServerCommitStreamKM(
+ psSDInt,
+ psTLCommitStreamIN->ui32ReqSize);
+
+
+
+
+TLCommitStream_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLWRITEDATA *psTLWriteDataIN,
+ PVRSRV_BRIDGE_OUT_TLWRITEDATA *psTLWriteDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSD = psTLWriteDataIN->hSD;
+ TL_STREAM_DESC * psSDInt = NULL;
+ IMG_BYTE *psDataInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psTLWriteDataIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psTLWriteDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto TLWriteData_exit;
+ }
+ }
+ }
+
+ if (psTLWriteDataIN->ui32Size != 0)
+ {
+ psDataInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psDataInt, psTLWriteDataIN->psData, psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psTLWriteDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLWriteData_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psTLWriteDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSDInt,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ IMG_TRUE);
+ if(psTLWriteDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLWriteData_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psTLWriteDataOUT->eError =
+ TLServerWriteDataKM(
+ psSDInt,
+ psTLWriteDataIN->ui32Size,
+ psDataInt);
+
+
+
+
+TLWriteData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_FALSE;
+
+PVRSRV_ERROR InitPVRTLBridge(void);
+PVRSRV_ERROR DeinitPVRTLBridge(void);
+
+/*
+ * Register all PVRTL functions with services
+ */
+PVRSRV_ERROR InitPVRTLBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM, PVRSRVBridgeTLOpenStream,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM, PVRSRVBridgeTLCloseStream,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA, PVRSRVBridgeTLAcquireData,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA, PVRSRVBridgeTLReleaseData,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS, PVRSRVBridgeTLDiscoverStreams,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM, PVRSRVBridgeTLReserveStream,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM, PVRSRVBridgeTLCommitStream,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA, PVRSRVBridgeTLWriteData,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pvrtl functions with services
+ */
+PVRSRV_ERROR DeinitPVRTLBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for regconfig
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for regconfig
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_REGCONFIG_BRIDGE_H
+#define COMMON_REGCONFIG_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST 0
+#define PVRSRV_BRIDGE_REGCONFIG_RGXSETREGCONFIGTYPE PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+0
+#define PVRSRV_BRIDGE_REGCONFIG_RGXADDREGCONFIG PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+1
+#define PVRSRV_BRIDGE_REGCONFIG_RGXCLEARREGCONFIG PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+2
+#define PVRSRV_BRIDGE_REGCONFIG_RGXENABLEREGCONFIG PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+3
+#define PVRSRV_BRIDGE_REGCONFIG_RGXDISABLEREGCONFIG PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+4
+#define PVRSRV_BRIDGE_REGCONFIG_CMD_LAST (PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+4)
+
+
+/*******************************************
+ RGXSetRegConfigType
+ *******************************************/
+
+/* Bridge in structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE_TAG
+{
+ IMG_UINT8 ui8RegPowerIsland;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE;
+
+/* Bridge out structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE;
+
+
+/*******************************************
+ RGXAddRegconfig
+ *******************************************/
+
+/* Bridge in structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG
+{
+ IMG_UINT32 ui32RegAddr;
+ IMG_UINT64 ui64RegValue;
+ IMG_UINT64 ui64RegMask;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXADDREGCONFIG;
+
+/* Bridge out structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG;
+
+
+/*******************************************
+ RGXClearRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG;
+
+/* Bridge out structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG;
+
+
+/*******************************************
+ RGXEnableRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG;
+
+/* Bridge out structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG;
+
+
+/*******************************************
+ RGXDisableRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG;
+
+/* Bridge out structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG;
+
+
+#endif /* COMMON_REGCONFIG_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for regconfig
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for regconfig
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxregconfig.h"
+
+
+#include "common_regconfig_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXSetRegConfigType(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeIN,
+ PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXSetRegConfigTypeOUT->eError =
+ PVRSRVRGXSetRegConfigTypeKM(psConnection, OSGetDevData(psConnection),
+ psRGXSetRegConfigTypeIN->ui8RegPowerIsland);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *psRGXAddRegconfigIN,
+ PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *psRGXAddRegconfigOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXAddRegconfigOUT->eError =
+ PVRSRVRGXAddRegConfigKM(psConnection, OSGetDevData(psConnection),
+ psRGXAddRegconfigIN->ui32RegAddr,
+ psRGXAddRegconfigIN->ui64RegValue,
+ psRGXAddRegconfigIN->ui64RegMask);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *psRGXClearRegConfigIN,
+ PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *psRGXClearRegConfigOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXClearRegConfigIN);
+
+
+
+
+
+ psRGXClearRegConfigOUT->eError =
+ PVRSRVRGXClearRegConfigKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *psRGXEnableRegConfigIN,
+ PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *psRGXEnableRegConfigOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXEnableRegConfigIN);
+
+
+
+
+
+ psRGXEnableRegConfigOUT->eError =
+ PVRSRVRGXEnableRegConfigKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *psRGXDisableRegConfigIN,
+ PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *psRGXDisableRegConfigOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXDisableRegConfigIN);
+
+
+
+
+
+ psRGXDisableRegConfigOUT->eError =
+ PVRSRVRGXDisableRegConfigKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_REGCONFIG_BRIDGE */
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+PVRSRV_ERROR InitREGCONFIGBridge(void);
+PVRSRV_ERROR DeinitREGCONFIGBridge(void);
+
+/*
+ * Register all REGCONFIG functions with services
+ */
+PVRSRV_ERROR InitREGCONFIGBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXSETREGCONFIGTYPE, PVRSRVBridgeRGXSetRegConfigType,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXADDREGCONFIG, PVRSRVBridgeRGXAddRegconfig,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXCLEARREGCONFIG, PVRSRVBridgeRGXClearRegConfig,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXENABLEREGCONFIG, PVRSRVBridgeRGXEnableRegConfig,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXDISABLEREGCONFIG, PVRSRVBridgeRGXDisableRegConfig,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all regconfig functions with services
+ */
+PVRSRV_ERROR DeinitREGCONFIGBridge(void)
+{
+ return PVRSRV_OK;
+}
+#else /* EXCLUDE_REGCONFIG_BRIDGE */
+/* This bridge is conditional on EXCLUDE_REGCONFIG_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitREGCONFIGBridge() \
+ PVRSRV_OK
+
+#define DeinitREGCONFIGBridge() \
+ PVRSRV_OK
+
+#endif /* EXCLUDE_REGCONFIG_BRIDGE */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for rgxcmp
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxcmp
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXCMP_BRIDGE_H
+#define COMMON_RGXCMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+
+
+#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6)
+
+
+/*******************************************
+ RGXCreateComputeContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG
+{
+ IMG_UINT32 ui32Priority;
+ IMG_DEV_VIRTADDR sMCUFenceAddr;
+ IMG_UINT32 ui32FrameworkCmdize;
+ IMG_BYTE * psFrameworkCmd;
+ IMG_HANDLE hPrivData;
+ IMG_DEV_VIRTADDR sResumeSignalAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT;
+
+/* Bridge out structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG
+{
+ IMG_HANDLE hComputeContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT;
+
+
+/*******************************************
+ RGXDestroyComputeContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+ IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT;
+
+/* Bridge out structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT;
+
+
+/*******************************************
+ RGXKickCDM
+ *******************************************/
+
+/* Bridge in structure for RGXKickCDM */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM_TAG
+{
+ IMG_HANDLE hComputeContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientFenceCount;
+ IMG_HANDLE * phClientFenceUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientFenceOffset;
+ IMG_UINT32 * pui32ClientFenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_HANDLE * phClientUpdateUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientUpdateOffset;
+ IMG_UINT32 * pui32ClientUpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerSyncFlags;
+ IMG_HANDLE * phServerSyncs;
+ IMG_INT32 i32CheckFenceFd;
+ IMG_INT32 i32UpdateTimelineFd;
+ IMG_CHAR * puiUpdateFenceName;
+ IMG_UINT32 ui32CmdSize;
+ IMG_BYTE * psDMCmd;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32ExtJobRef;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKCDM;
+
+/* Bridge out structure for RGXKickCDM */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM_TAG
+{
+ IMG_INT32 i32UpdateFenceFd;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKCDM;
+
+
+/*******************************************
+ RGXFlushComputeData
+ *******************************************/
+
+/* Bridge in structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG
+{
+ IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA;
+
+/* Bridge out structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA;
+
+
+/*******************************************
+ RGXSetComputeContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hComputeContext;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY;
+
+
+/*******************************************
+ RGXGetLastComputeContextResetReason
+ *******************************************/
+
+/* Bridge in structure for RGXGetLastComputeContextResetReason */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG
+{
+ IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON;
+
+/* Bridge out structure for RGXGetLastComputeContextResetReason */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG
+{
+ IMG_UINT32 ui32LastResetReason;
+ IMG_UINT32 ui32LastResetJobRef;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON;
+
+
+/*******************************************
+ RGXNotifyComputeWriteOffsetUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+ IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+
+#endif /* COMMON_RGXCMP_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for rgxcmp
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxcmp
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxcompute.h"
+
+
+#include "common_rgxcmp_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_BYTE *psFrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXCreateComputeContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXCreateComputeContext_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateComputeContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateComputeContext_exit;
+ }
+ }
+ }
+
+ if (psRGXCreateComputeContextIN->ui32FrameworkCmdize != 0)
+ {
+ psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateComputeContextIN->psFrameworkCmd, psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateComputeContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXCreateComputeContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateComputeContext_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateComputeContextOUT->eError =
+ PVRSRVRGXCreateComputeContextKM(psConnection, OSGetDevData(psConnection),
+ psRGXCreateComputeContextIN->ui32Priority,
+ psRGXCreateComputeContextIN->sMCUFenceAddr,
+ psRGXCreateComputeContextIN->ui32FrameworkCmdize,
+ psFrameworkCmdInt,
+ hPrivDataInt,
+ psRGXCreateComputeContextIN->sResumeSignalAddr,
+ &psComputeContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateComputeContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateComputeContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateComputeContextOUT->hComputeContext,
+ (void *) psComputeContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyComputeContextKM);
+ if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateComputeContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateComputeContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+ {
+ if (psComputeContextInt)
+ {
+ PVRSRVRGXDestroyComputeContextKM(psComputeContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXDestroyComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXDestroyComputeContext_exit;
+ }
+ }
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyComputeContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyComputeContextIN->hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ if ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyComputeContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyComputeContextOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXDestroyComputeContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyComputeContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickCDM(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXKICKCDM *psRGXKickCDMIN,
+ PVRSRV_BRIDGE_OUT_RGXKICKCDM *psRGXKickCDMOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hComputeContext = psRGXKickCDMIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientFenceOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientFenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = NULL;
+ IMG_HANDLE *hServerSyncsInt2 = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_BYTE *psDMCmdInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ (32 * sizeof(IMG_CHAR)) +
+ (psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickCDMIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickCDMIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickCDM_exit;
+ }
+ }
+ }
+
+ if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+ {
+ psClientFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientFenceUFOSyncPrimBlockInt2, psRGXKickCDMIN->phClientFenceUFOSyncPrimBlock, psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceOffsetInt, psRGXKickCDMIN->pui32ClientFenceOffset, psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceValueInt, psRGXKickCDMIN->pui32ClientFenceValue, psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+ {
+ psClientUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientUpdateUFOSyncPrimBlockInt2, psRGXKickCDMIN->phClientUpdateUFOSyncPrimBlock, psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateOffsetInt, psRGXKickCDMIN->pui32ClientUpdateOffset, psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateValueInt, psRGXKickCDMIN->pui32ClientUpdateValue, psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXKickCDMIN->pui32ServerSyncFlags, psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncsInt2, psRGXKickCDMIN->phServerSyncs, psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 32 * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (32 * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXKickCDMIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32CmdSize != 0)
+ {
+ psDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psDMCmdInt, psRGXKickCDMIN->psDMCmd, psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXKickCDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ IMG_TRUE);
+ if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickCDM_exit;
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ClientFenceCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickCDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientFenceUFOSyncPrimBlockInt[i],
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickCDM_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ClientUpdateCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickCDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientUpdateUFOSyncPrimBlockInt[i],
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickCDM_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickCDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncsInt[i],
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickCDM_exit;
+ }
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXKickCDMOUT->eError =
+ PVRSRVRGXKickCDMKM(
+ psComputeContextInt,
+ psRGXKickCDMIN->ui32ClientCacheOpSeqNum,
+ psRGXKickCDMIN->ui32ClientFenceCount,
+ psClientFenceUFOSyncPrimBlockInt,
+ ui32ClientFenceOffsetInt,
+ ui32ClientFenceValueInt,
+ psRGXKickCDMIN->ui32ClientUpdateCount,
+ psClientUpdateUFOSyncPrimBlockInt,
+ ui32ClientUpdateOffsetInt,
+ ui32ClientUpdateValueInt,
+ psRGXKickCDMIN->ui32ServerSyncCount,
+ ui32ServerSyncFlagsInt,
+ psServerSyncsInt,
+ psRGXKickCDMIN->i32CheckFenceFd,
+ psRGXKickCDMIN->i32UpdateTimelineFd,
+ &psRGXKickCDMOUT->i32UpdateFenceFd,
+ uiUpdateFenceNameInt,
+ psRGXKickCDMIN->ui32CmdSize,
+ psDMCmdInt,
+ psRGXKickCDMIN->ui32PDumpFlags,
+ psRGXKickCDMIN->ui32ExtJobRef);
+
+
+
+
+RGXKickCDM_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ClientFenceCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psClientFenceUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ClientUpdateCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psClientUpdateUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerSyncsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataIN,
+ PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hComputeContext = psRGXFlushComputeDataIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXFlushComputeDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXFlushComputeData_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXFlushComputeDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ IMG_TRUE);
+ if(psRGXFlushComputeDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXFlushComputeData_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXFlushComputeDataOUT->eError =
+ PVRSRVRGXFlushComputeDataKM(
+ psComputeContextInt);
+
+
+
+
+RGXFlushComputeData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hComputeContext = psRGXSetComputeContextPriorityIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXSetComputeContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXSetComputeContextPriority_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXSetComputeContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ IMG_TRUE);
+ if(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSetComputeContextPriority_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSetComputeContextPriorityOUT->eError =
+ PVRSRVRGXSetComputeContextPriorityKM(psConnection, OSGetDevData(psConnection),
+ psComputeContextInt,
+ psRGXSetComputeContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetComputeContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXGetLastComputeContextResetReason(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON *psRGXGetLastComputeContextResetReasonIN,
+ PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON *psRGXGetLastComputeContextResetReasonOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hComputeContext = psRGXGetLastComputeContextResetReasonIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXGetLastComputeContextResetReasonOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXGetLastComputeContextResetReason_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXGetLastComputeContextResetReasonOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ IMG_TRUE);
+ if(psRGXGetLastComputeContextResetReasonOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXGetLastComputeContextResetReason_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXGetLastComputeContextResetReasonOUT->eError =
+ PVRSRVRGXGetLastComputeContextResetReasonKM(
+ psComputeContextInt,
+ &psRGXGetLastComputeContextResetReasonOUT->ui32LastResetReason,
+ &psRGXGetLastComputeContextResetReasonOUT->ui32LastResetJobRef);
+
+
+
+
+RGXGetLastComputeContextResetReason_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateIN,
+ PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hComputeContext = psRGXNotifyComputeWriteOffsetUpdateIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXNotifyComputeWriteOffsetUpdate_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ IMG_TRUE);
+ if(psRGXNotifyComputeWriteOffsetUpdateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXNotifyComputeWriteOffsetUpdate_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+ PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(
+ psComputeContextInt);
+
+
+
+
+RGXNotifyComputeWriteOffsetUpdate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXCMPBridge(void);
+PVRSRV_ERROR DeinitRGXCMPBridge(void);
+
+/*
+ * Register all RGXCMP functions with services
+ */
+PVRSRV_ERROR InitRGXCMPBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT, PVRSRVBridgeRGXCreateComputeContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT, PVRSRVBridgeRGXDestroyComputeContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM, PVRSRVBridgeRGXKickCDM,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA, PVRSRVBridgeRGXFlushComputeData,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY, PVRSRVBridgeRGXSetComputeContextPriority,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON, PVRSRVBridgeRGXGetLastComputeContextResetReason,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE, PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxcmp functions with services
+ */
+PVRSRV_ERROR DeinitRGXCMPBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for rgxhwperf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxhwperf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXHWPERF_BRIDGE_H
+#define COMMON_RGXHWPERF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "rgx_hwperf_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3)
+
+
+/*******************************************
+ RGXCtrlHWPerf
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG
+{
+ IMG_UINT32 ui32StreamId;
+ IMG_BOOL bToggle;
+ IMG_UINT64 ui64Mask;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERF;
+
+/* Bridge out structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF;
+
+
+/*******************************************
+ RGXConfigEnableHWPerfCounters
+ *******************************************/
+
+/* Bridge in structure for RGXConfigEnableHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS_TAG
+{
+ IMG_UINT32 ui32ArrayLen;
+ RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS;
+
+/* Bridge out structure for RGXConfigEnableHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS;
+
+
+/*******************************************
+ RGXCtrlHWPerfCounters
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS_TAG
+{
+ IMG_BOOL bEnable;
+ IMG_UINT32 ui32ArrayLen;
+ IMG_UINT16 * pui16BlockIDs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS;
+
+/* Bridge out structure for RGXCtrlHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS;
+
+
+/*******************************************
+ RGXConfigCustomCounters
+ *******************************************/
+
+/* Bridge in structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+ IMG_UINT16 ui16CustomBlockID;
+ IMG_UINT16 ui16NumCustomCounters;
+ IMG_UINT32 * pui32CustomCounterIDs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS;
+
+/* Bridge out structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS;
+
+
+#endif /* COMMON_RGXHWPERF_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for rgxhwperf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxhwperf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxhwperf.h"
+
+
+#include "common_rgxhwperf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN,
+ PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXCtrlHWPerfOUT->eError =
+ PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevData(psConnection),
+ psRGXCtrlHWPerfIN->ui32StreamId,
+ psRGXCtrlHWPerfIN->bToggle,
+ psRGXCtrlHWPerfIN->ui64Mask);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXConfigEnableHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS *psRGXConfigEnableHWPerfCountersIN,
+ PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS *psRGXConfigEnableHWPerfCountersOUT,
+ CONNECTION_DATA *psConnection)
+{
+ RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXConfigEnableHWPerfCountersIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXConfigEnableHWPerfCountersIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXConfigEnableHWPerfCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXConfigEnableHWPerfCounters_exit;
+ }
+ }
+ }
+
+ if (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen != 0)
+ {
+ psBlockConfigsInt = (RGX_HWPERF_CONFIG_CNTBLK*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK);
+ }
+
+ /* Copy the data over */
+ if (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psBlockConfigsInt, psRGXConfigEnableHWPerfCountersIN->psBlockConfigs, psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK )
+ {
+ psRGXConfigEnableHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXConfigEnableHWPerfCounters_exit;
+ }
+ }
+
+
+ psRGXConfigEnableHWPerfCountersOUT->eError =
+ PVRSRVRGXConfigEnableHWPerfCountersKM(psConnection, OSGetDevData(psConnection),
+ psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen,
+ psBlockConfigsInt);
+
+
+
+
+RGXConfigEnableHWPerfCounters_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS *psRGXCtrlHWPerfCountersIN,
+ PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS *psRGXCtrlHWPerfCountersOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT16 *ui16BlockIDsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCtrlHWPerfCountersIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCtrlHWPerfCountersIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCtrlHWPerfCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCtrlHWPerfCounters_exit;
+ }
+ }
+ }
+
+ if (psRGXCtrlHWPerfCountersIN->ui32ArrayLen != 0)
+ {
+ ui16BlockIDsInt = (IMG_UINT16*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16);
+ }
+
+ /* Copy the data over */
+ if (psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui16BlockIDsInt, psRGXCtrlHWPerfCountersIN->pui16BlockIDs, psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16)) != PVRSRV_OK )
+ {
+ psRGXCtrlHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCtrlHWPerfCounters_exit;
+ }
+ }
+
+
+ psRGXCtrlHWPerfCountersOUT->eError =
+ PVRSRVRGXCtrlHWPerfCountersKM(psConnection, OSGetDevData(psConnection),
+ psRGXCtrlHWPerfCountersIN->bEnable,
+ psRGXCtrlHWPerfCountersIN->ui32ArrayLen,
+ ui16BlockIDsInt);
+
+
+
+
+RGXCtrlHWPerfCounters_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersIN,
+ PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32CustomCounterIDsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXConfigCustomCountersIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXConfigCustomCountersIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXConfigCustomCounters_exit;
+ }
+ }
+ }
+
+ if (psRGXConfigCustomCountersIN->ui16NumCustomCounters != 0)
+ {
+ ui32CustomCounterIDsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32CustomCounterIDsInt, psRGXConfigCustomCountersIN->pui32CustomCounterIDs, psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXConfigCustomCounters_exit;
+ }
+ }
+
+
+ psRGXConfigCustomCountersOUT->eError =
+ PVRSRVRGXConfigCustomCountersKM(psConnection, OSGetDevData(psConnection),
+ psRGXConfigCustomCountersIN->ui16CustomBlockID,
+ psRGXConfigCustomCountersIN->ui16NumCustomCounters,
+ ui32CustomCounterIDsInt);
+
+
+
+
+RGXConfigCustomCounters_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void);
+
+/*
+ * Register all RGXHWPERF functions with services
+ */
+PVRSRV_ERROR InitRGXHWPERFBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, PVRSRVBridgeRGXCtrlHWPerf,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS, PVRSRVBridgeRGXConfigEnableHWPerfCounters,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS, PVRSRVBridgeRGXCtrlHWPerfCounters,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS, PVRSRVBridgeRGXConfigCustomCounters,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxhwperf functions with services
+ */
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for rgxinit
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for rgxinit
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_RGXINIT_BRIDGE_H
+#define CLIENT_RGXINIT_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_rgxinit_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitAllocFWImgMem(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiFWCodeLen,
+ IMG_DEVMEM_SIZE_T uiFWDataLen,
+ IMG_DEVMEM_SIZE_T uiFWCoremem,
+ IMG_HANDLE *phFWCodePMR,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ IMG_HANDLE *phFWDataPMR,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ IMG_HANDLE *phFWCorememPMR,
+ IMG_DEV_VIRTADDR *psFWCorememDevVAddrBase,
+ RGXFWIF_DEV_VIRTADDR *psFWCorememMetaVAddrBase);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitFirmware(IMG_HANDLE hBridge,
+ RGXFWIF_DEV_VIRTADDR *pspsRGXFwInit,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32HWPerfFWBufSizeKB,
+ IMG_UINT64 ui64HWPerfFilter,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32ConfigFlags,
+ IMG_UINT32 ui32LogType,
+ IMG_UINT32 ui32FilterFlags,
+ IMG_UINT32 ui32JonesDisableMask,
+ IMG_UINT32 ui32ui32HWRDebugDumpLimit,
+ RGXFWIF_COMPCHECKS_BVNC *psClientBVNC,
+ IMG_UINT32 ui32HWPerfCountersDataSize,
+ IMG_HANDLE *phHWPerfPMR,
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+ FW_PERF_CONF eFirmwarePerf);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitFinaliseFWImage(IMG_HANDLE hBridge);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitDevPart2(IMG_HANDLE hBridge,
+ RGX_INIT_COMMAND *psDbgScript,
+ IMG_UINT32 ui32DeviceFlags,
+ IMG_UINT32 ui32HWPerfHostBufSize,
+ IMG_UINT32 ui32HWPerfHostFilter,
+ IMG_UINT32 ui32RGXActivePMConf,
+ IMG_HANDLE hFWCodePMR,
+ IMG_HANDLE hFWDataPMR,
+ IMG_HANDLE hFWCorememPMR,
+ IMG_HANDLE hHWPerfPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGPUVIRTPopulateLMASubArenas(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumElements,
+ IMG_UINT32 *pui32Elements,
+ IMG_BOOL bEnableTrustedDeviceAceConfig);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitGuest(IMG_HANDLE hBridge,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32DeviceFlags,
+ RGXFWIF_COMPCHECKS_BVNC *psClientBVNC);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitFirmwareExtended(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ RGXFWIF_DEV_VIRTADDR *pspsRGXFwInit,
+ IMG_HANDLE *phHWPerfPMR2,
+ RGX_FW_INIT_IN_PARAMS *pspsInParams);
+
+
+#endif /* CLIENT_RGXINIT_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for rgxinit
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_rgxinit_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "rgx_bridge.h"
+#include "rgxscript.h"
+#include "devicemem_typedefs.h"
+#include "rgx_fwif.h"
+
+#include "rgxinit.h"
+#include "pmr.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitAllocFWImgMem(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiFWCodeLen,
+ IMG_DEVMEM_SIZE_T uiFWDataLen,
+ IMG_DEVMEM_SIZE_T uiFWCoremem,
+ IMG_HANDLE *phFWCodePMR,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ IMG_HANDLE *phFWDataPMR,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ IMG_HANDLE *phFWCorememPMR,
+ IMG_DEV_VIRTADDR *psFWCorememDevVAddrBase,
+ RGXFWIF_DEV_VIRTADDR *psFWCorememMetaVAddrBase)
+{
+ PVRSRV_ERROR eError;
+ PMR * psFWCodePMRInt;
+ PMR * psFWDataPMRInt;
+ PMR * psFWCorememPMRInt;
+
+
+ eError =
+ PVRSRVRGXInitAllocFWImgMemKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ uiFWCodeLen,
+ uiFWDataLen,
+ uiFWCoremem,
+ &psFWCodePMRInt,
+ psFWCodeDevVAddrBase,
+ &psFWDataPMRInt,
+ psFWDataDevVAddrBase,
+ &psFWCorememPMRInt,
+ psFWCorememDevVAddrBase,
+ psFWCorememMetaVAddrBase);
+
+ *phFWCodePMR = psFWCodePMRInt;
+ *phFWDataPMR = psFWDataPMRInt;
+ *phFWCorememPMR = psFWCorememPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitFirmware(IMG_HANDLE hBridge,
+ RGXFWIF_DEV_VIRTADDR *pspsRGXFwInit,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32HWPerfFWBufSizeKB,
+ IMG_UINT64 ui64HWPerfFilter,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32ConfigFlags,
+ IMG_UINT32 ui32LogType,
+ IMG_UINT32 ui32FilterFlags,
+ IMG_UINT32 ui32JonesDisableMask,
+ IMG_UINT32 ui32ui32HWRDebugDumpLimit,
+ RGXFWIF_COMPCHECKS_BVNC *psClientBVNC,
+ IMG_UINT32 ui32HWPerfCountersDataSize,
+ IMG_HANDLE *phHWPerfPMR,
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+ FW_PERF_CONF eFirmwarePerf)
+{
+ PVRSRV_ERROR eError;
+ PMR * psHWPerfPMRInt;
+
+
+ eError =
+ PVRSRVRGXInitFirmwareKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ pspsRGXFwInit,
+ bEnableSignatureChecks,
+ ui32SignatureChecksBufSize,
+ ui32HWPerfFWBufSizeKB,
+ ui64HWPerfFilter,
+ ui32RGXFWAlignChecksArrLength,
+ pui32RGXFWAlignChecks,
+ ui32ConfigFlags,
+ ui32LogType,
+ ui32FilterFlags,
+ ui32JonesDisableMask,
+ ui32ui32HWRDebugDumpLimit,
+ psClientBVNC,
+ ui32HWPerfCountersDataSize,
+ &psHWPerfPMRInt,
+ eRGXRDPowerIslandConf,
+ eFirmwarePerf);
+
+ *phHWPerfPMR = psHWPerfPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitFinaliseFWImage(IMG_HANDLE hBridge)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PVRSRVRGXInitFinaliseFWImageKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ );
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitDevPart2(IMG_HANDLE hBridge,
+ RGX_INIT_COMMAND *psDbgScript,
+ IMG_UINT32 ui32DeviceFlags,
+ IMG_UINT32 ui32HWPerfHostBufSize,
+ IMG_UINT32 ui32HWPerfHostFilter,
+ IMG_UINT32 ui32RGXActivePMConf,
+ IMG_HANDLE hFWCodePMR,
+ IMG_HANDLE hFWDataPMR,
+ IMG_HANDLE hFWCorememPMR,
+ IMG_HANDLE hHWPerfPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR * psFWCodePMRInt;
+ PMR * psFWDataPMRInt;
+ PMR * psFWCorememPMRInt;
+ PMR * psHWPerfPMRInt;
+
+ psFWCodePMRInt = (PMR *) hFWCodePMR;
+ psFWDataPMRInt = (PMR *) hFWDataPMR;
+ psFWCorememPMRInt = (PMR *) hFWCorememPMR;
+ psHWPerfPMRInt = (PMR *) hHWPerfPMR;
+
+ eError =
+ PVRSRVRGXInitDevPart2KM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ psDbgScript,
+ ui32DeviceFlags,
+ ui32HWPerfHostBufSize,
+ ui32HWPerfHostFilter,
+ ui32RGXActivePMConf,
+ psFWCodePMRInt,
+ psFWDataPMRInt,
+ psFWCorememPMRInt,
+ psHWPerfPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGPUVIRTPopulateLMASubArenas(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumElements,
+ IMG_UINT32 *pui32Elements,
+ IMG_BOOL bEnableTrustedDeviceAceConfig)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PVRSRVGPUVIRTPopulateLMASubArenasKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32NumElements,
+ pui32Elements,
+ bEnableTrustedDeviceAceConfig);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitGuest(IMG_HANDLE hBridge,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32DeviceFlags,
+ RGXFWIF_COMPCHECKS_BVNC *psClientBVNC)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PVRSRVRGXInitGuestKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ bEnableSignatureChecks,
+ ui32SignatureChecksBufSize,
+ ui32RGXFWAlignChecksArrLength,
+ pui32RGXFWAlignChecks,
+ ui32DeviceFlags,
+ psClientBVNC);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRGXInitFirmwareExtended(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ RGXFWIF_DEV_VIRTADDR *pspsRGXFwInit,
+ IMG_HANDLE *phHWPerfPMR2,
+ RGX_FW_INIT_IN_PARAMS *pspsInParams)
+{
+ PVRSRV_ERROR eError;
+ PMR * psHWPerfPMR2Int;
+
+
+ eError =
+ PVRSRVRGXInitFirmwareExtendedKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32RGXFWAlignChecksArrLength,
+ pui32RGXFWAlignChecks,
+ pspsRGXFwInit,
+ &psHWPerfPMR2Int,
+ pspsInParams);
+
+ *phHWPerfPMR2 = psHWPerfPMR2Int;
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for rgxinit
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxinit
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXINIT_BRIDGE_H
+#define COMMON_RGXINIT_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "rgxscript.h"
+#include "devicemem_typedefs.h"
+#include "rgx_fwif.h"
+
+
+#define PVRSRV_BRIDGE_RGXINIT_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXINIT_RGXINITALLOCFWIMGMEM PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXINIT_RGXINITFIRMWARE PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXINIT_RGXINITFINALISEFWIMAGE PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXINIT_RGXINITDEVPART2 PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXINIT_GPUVIRTPOPULATELMASUBARENAS PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXINIT_RGXINITGUEST PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXINIT_RGXINITFIRMWAREEXTENDED PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXINIT_CMD_LAST (PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+6)
+
+
+/*******************************************
+ RGXInitAllocFWImgMem
+ *******************************************/
+
+/* Bridge in structure for RGXInitAllocFWImgMem */
+typedef struct PVRSRV_BRIDGE_IN_RGXINITALLOCFWIMGMEM_TAG
+{
+ IMG_DEVMEM_SIZE_T uiFWCodeLen;
+ IMG_DEVMEM_SIZE_T uiFWDataLen;
+ IMG_DEVMEM_SIZE_T uiFWCoremem;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXINITALLOCFWIMGMEM;
+
+/* Bridge out structure for RGXInitAllocFWImgMem */
+typedef struct PVRSRV_BRIDGE_OUT_RGXINITALLOCFWIMGMEM_TAG
+{
+ IMG_HANDLE hFWCodePMR;
+ IMG_DEV_VIRTADDR sFWCodeDevVAddrBase;
+ IMG_HANDLE hFWDataPMR;
+ IMG_DEV_VIRTADDR sFWDataDevVAddrBase;
+ IMG_HANDLE hFWCorememPMR;
+ IMG_DEV_VIRTADDR sFWCorememDevVAddrBase;
+ RGXFWIF_DEV_VIRTADDR sFWCorememMetaVAddrBase;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXINITALLOCFWIMGMEM;
+
+
+/*******************************************
+ RGXInitFirmware
+ *******************************************/
+
+/* Bridge in structure for RGXInitFirmware */
+typedef struct PVRSRV_BRIDGE_IN_RGXINITFIRMWARE_TAG
+{
+ IMG_BOOL bEnableSignatureChecks;
+ IMG_UINT32 ui32SignatureChecksBufSize;
+ IMG_UINT32 ui32HWPerfFWBufSizeKB;
+ IMG_UINT64 ui64HWPerfFilter;
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength;
+ IMG_UINT32 * pui32RGXFWAlignChecks;
+ IMG_UINT32 ui32ConfigFlags;
+ IMG_UINT32 ui32LogType;
+ IMG_UINT32 ui32FilterFlags;
+ IMG_UINT32 ui32JonesDisableMask;
+ IMG_UINT32 ui32ui32HWRDebugDumpLimit;
+ RGXFWIF_COMPCHECKS_BVNC sClientBVNC;
+ IMG_UINT32 ui32HWPerfCountersDataSize;
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf;
+ FW_PERF_CONF eFirmwarePerf;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXINITFIRMWARE;
+
+/* Bridge out structure for RGXInitFirmware */
+typedef struct PVRSRV_BRIDGE_OUT_RGXINITFIRMWARE_TAG
+{
+ RGXFWIF_DEV_VIRTADDR spsRGXFwInit;
+ IMG_HANDLE hHWPerfPMR;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXINITFIRMWARE;
+
+
+/*******************************************
+ RGXInitFinaliseFWImage
+ *******************************************/
+
+/* Bridge in structure for RGXInitFinaliseFWImage */
+typedef struct PVRSRV_BRIDGE_IN_RGXINITFINALISEFWIMAGE_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXINITFINALISEFWIMAGE;
+
+/* Bridge out structure for RGXInitFinaliseFWImage */
+typedef struct PVRSRV_BRIDGE_OUT_RGXINITFINALISEFWIMAGE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXINITFINALISEFWIMAGE;
+
+
+/*******************************************
+ RGXInitDevPart2
+ *******************************************/
+
+/* Bridge in structure for RGXInitDevPart2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXINITDEVPART2_TAG
+{
+ RGX_INIT_COMMAND * psDbgScript;
+ IMG_UINT32 ui32DeviceFlags;
+ IMG_UINT32 ui32HWPerfHostBufSize;
+ IMG_UINT32 ui32HWPerfHostFilter;
+ IMG_UINT32 ui32RGXActivePMConf;
+ IMG_HANDLE hFWCodePMR;
+ IMG_HANDLE hFWDataPMR;
+ IMG_HANDLE hFWCorememPMR;
+ IMG_HANDLE hHWPerfPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXINITDEVPART2;
+
+/* Bridge out structure for RGXInitDevPart2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXINITDEVPART2_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXINITDEVPART2;
+
+
+/*******************************************
+ GPUVIRTPopulateLMASubArenas
+ *******************************************/
+
+/* Bridge in structure for GPUVIRTPopulateLMASubArenas */
+typedef struct PVRSRV_BRIDGE_IN_GPUVIRTPOPULATELMASUBARENAS_TAG
+{
+ IMG_UINT32 ui32NumElements;
+ IMG_UINT32 * pui32Elements;
+ IMG_BOOL bEnableTrustedDeviceAceConfig;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_GPUVIRTPOPULATELMASUBARENAS;
+
+/* Bridge out structure for GPUVIRTPopulateLMASubArenas */
+typedef struct PVRSRV_BRIDGE_OUT_GPUVIRTPOPULATELMASUBARENAS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_GPUVIRTPOPULATELMASUBARENAS;
+
+
+/*******************************************
+ RGXInitGuest
+ *******************************************/
+
+/* Bridge in structure for RGXInitGuest */
+typedef struct PVRSRV_BRIDGE_IN_RGXINITGUEST_TAG
+{
+ IMG_BOOL bEnableSignatureChecks;
+ IMG_UINT32 ui32SignatureChecksBufSize;
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength;
+ IMG_UINT32 * pui32RGXFWAlignChecks;
+ IMG_UINT32 ui32DeviceFlags;
+ RGXFWIF_COMPCHECKS_BVNC sClientBVNC;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXINITGUEST;
+
+/* Bridge out structure for RGXInitGuest */
+typedef struct PVRSRV_BRIDGE_OUT_RGXINITGUEST_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXINITGUEST;
+
+
+/*******************************************
+ RGXInitFirmwareExtended
+ *******************************************/
+
+/* Bridge in structure for RGXInitFirmwareExtended */
+typedef struct PVRSRV_BRIDGE_IN_RGXINITFIRMWAREEXTENDED_TAG
+{
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength;
+ IMG_UINT32 * pui32RGXFWAlignChecks;
+ RGX_FW_INIT_IN_PARAMS spsInParams;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXINITFIRMWAREEXTENDED;
+
+/* Bridge out structure for RGXInitFirmwareExtended */
+typedef struct PVRSRV_BRIDGE_OUT_RGXINITFIRMWAREEXTENDED_TAG
+{
+ RGXFWIF_DEV_VIRTADDR spsRGXFwInit;
+ IMG_HANDLE hHWPerfPMR2;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXINITFIRMWAREEXTENDED;
+
+
+#endif /* COMMON_RGXINIT_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for rgxkicksync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxkicksync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXKICKSYNC_BRIDGE_H
+#define COMMON_RGXKICKSYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+
+
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2)
+
+
+/*******************************************
+ RGXCreateKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT;
+
+
+/*******************************************
+ RGXDestroyKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT;
+
+
+/*******************************************
+ RGXKickSync
+ *******************************************/
+
+/* Bridge in structure for RGXKickSync */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientFenceCount;
+ IMG_HANDLE * phFenceUFOSyncPrimBlock;
+ IMG_UINT32 * pui32FenceSyncOffset;
+ IMG_UINT32 * pui32FenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_HANDLE * phUpdateUFOSyncPrimBlock;
+ IMG_UINT32 * pui32UpdateSyncOffset;
+ IMG_UINT32 * pui32UpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerSyncFlags;
+ IMG_HANDLE * phServerSync;
+ IMG_INT32 i32CheckFenceFD;
+ IMG_INT32 i32TimelineFenceFD;
+ IMG_CHAR * puiUpdateFenceName;
+ IMG_UINT32 ui32ExtJobRef;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKSYNC;
+
+/* Bridge out structure for RGXKickSync */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC_TAG
+{
+ IMG_INT32 i32UpdateFenceFD;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKSYNC;
+
+
+#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for rgxkicksync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxkicksync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxkicksync.h"
+
+
+#include "common_rgxkicksync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXCreateKickSyncContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContextInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXCreateKickSyncContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateKickSyncContext_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateKickSyncContextOUT->eError =
+ PVRSRVRGXCreateKickSyncContextKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt,
+ &psKickSyncContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateKickSyncContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateKickSyncContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateKickSyncContextOUT->hKickSyncContext,
+ (void *) psKickSyncContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyKickSyncContextKM);
+ if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateKickSyncContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateKickSyncContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+ {
+ if (psKickSyncContextInt)
+ {
+ PVRSRVRGXDestroyKickSyncContextKM(psKickSyncContextInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyKickSyncContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyKickSyncContextIN->hKickSyncContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+ if ((psRGXDestroyKickSyncContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyKickSyncContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyKickSyncContextOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXDestroyKickSyncContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyKickSyncContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickSync(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXKICKSYNC *psRGXKickSyncIN,
+ PVRSRV_BRIDGE_OUT_RGXKICKSYNC *psRGXKickSyncOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hKickSyncContext = psRGXKickSyncIN->hKickSyncContext;
+ RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32FenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32FenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32UpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncInt = NULL;
+ IMG_HANDLE *hServerSyncInt2 = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ (32 * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickSyncIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickSyncIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickSync_exit;
+ }
+ }
+ }
+
+ if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+ {
+ psFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hFenceUFOSyncPrimBlockInt2, psRGXKickSyncIN->phFenceUFOSyncPrimBlock, psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+ {
+ ui32FenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FenceSyncOffsetInt, psRGXKickSyncIN->pui32FenceSyncOffset, psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+ {
+ ui32FenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FenceValueInt, psRGXKickSyncIN->pui32FenceValue, psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+ {
+ psUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hUpdateUFOSyncPrimBlockInt2, psRGXKickSyncIN->phUpdateUFOSyncPrimBlock, psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32UpdateSyncOffsetInt, psRGXKickSyncIN->pui32UpdateSyncOffset, psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32UpdateValueInt, psRGXKickSyncIN->pui32UpdateValue, psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXKickSyncIN->pui32ServerSyncFlags, psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncInt2, psRGXKickSyncIN->phServerSync, psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 32 * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (32 * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXKickSyncIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXKickSyncOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psKickSyncContextInt,
+ hKickSyncContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+ IMG_TRUE);
+ if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickSync_exit;
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ClientFenceCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickSyncOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psFenceUFOSyncPrimBlockInt[i],
+ hFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickSync_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ClientUpdateCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickSyncOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psUpdateUFOSyncPrimBlockInt[i],
+ hUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickSync_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickSyncOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncInt[i],
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickSync_exit;
+ }
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXKickSyncOUT->eError =
+ PVRSRVRGXKickSyncKM(
+ psKickSyncContextInt,
+ psRGXKickSyncIN->ui32ClientCacheOpSeqNum,
+ psRGXKickSyncIN->ui32ClientFenceCount,
+ psFenceUFOSyncPrimBlockInt,
+ ui32FenceSyncOffsetInt,
+ ui32FenceValueInt,
+ psRGXKickSyncIN->ui32ClientUpdateCount,
+ psUpdateUFOSyncPrimBlockInt,
+ ui32UpdateSyncOffsetInt,
+ ui32UpdateValueInt,
+ psRGXKickSyncIN->ui32ServerSyncCount,
+ ui32ServerSyncFlagsInt,
+ psServerSyncInt,
+ psRGXKickSyncIN->i32CheckFenceFD,
+ psRGXKickSyncIN->i32TimelineFenceFD,
+ &psRGXKickSyncOUT->i32UpdateFenceFD,
+ uiUpdateFenceNameInt,
+ psRGXKickSyncIN->ui32ExtJobRef);
+
+
+
+
+RGXKickSync_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psKickSyncContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hKickSyncContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ClientFenceCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psFenceUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ClientUpdateCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psUpdateUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerSyncInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void);
+
+/*
+ * Register all RGXKICKSYNC functions with services
+ */
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT, PVRSRVBridgeRGXCreateKickSyncContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT, PVRSRVBridgeRGXDestroyKickSyncContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC, PVRSRVBridgeRGXKickSync,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxkicksync functions with services
+ */
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for rgxpdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_RGXPDUMP_BRIDGE_H
+#define CLIENT_RGXPDUMP_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_rgxpdump_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpTraceBuffer(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpSignatureBuffer(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags);
+
+
+#endif /* CLIENT_RGXPDUMP_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_rgxpdump_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "rgx_bridge.h"
+
+#include "rgxpdump.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpTraceBuffer(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PVRSRVPDumpTraceBufferKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpSignatureBuffer(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PVRSRVPDumpSignatureBufferKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxpdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXPDUMP_BRIDGE_H
+#define COMMON_RGXPDUMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST (PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1)
+
+
+/*******************************************
+ PDumpTraceBuffer
+ *******************************************/
+
+/* Bridge in structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER_TAG
+{
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER;
+
+/* Bridge out structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER;
+
+
+/*******************************************
+ PDumpSignatureBuffer
+ *******************************************/
+
+/* Bridge in structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER_TAG
+{
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER;
+
+/* Bridge out structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER;
+
+
+#endif /* COMMON_RGXPDUMP_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxpdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxpdump.h"
+
+
+#include "common_rgxpdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePDumpTraceBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *psPDumpTraceBufferIN,
+ PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *psPDumpTraceBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psPDumpTraceBufferOUT->eError =
+ PVRSRVPDumpTraceBufferKM(psConnection, OSGetDevData(psConnection),
+ psPDumpTraceBufferIN->ui32PDumpFlags);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePDumpSignatureBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferIN,
+ PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psPDumpSignatureBufferOUT->eError =
+ PVRSRVPDumpSignatureBufferKM(psConnection, OSGetDevData(psConnection),
+ psPDumpSignatureBufferIN->ui32PDumpFlags);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXPDUMPBridge(void);
+PVRSRV_ERROR DeinitRGXPDUMPBridge(void);
+
+/*
+ * Register all RGXPDUMP functions with services
+ */
+PVRSRV_ERROR InitRGXPDUMPBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER, PVRSRVBridgePDumpTraceBuffer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER, PVRSRVBridgePDumpSignatureBuffer,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxpdump functions with services
+ */
+PVRSRV_ERROR DeinitRGXPDUMPBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for rgxray
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxray
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXRAY_BRIDGE_H
+#define COMMON_RGXRAY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_devmem.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_RGXRAY_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMFREELIST PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMFREELIST PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXRAY_RGXKICKRS PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXRAY_RGXKICKVRDM PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXRAY_RGXSETRAYCONTEXTPRIORITY PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXRAY_CMD_LAST (PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+8)
+
+
+/*******************************************
+ RGXCreateRPMFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRPMFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERPMFREELIST_TAG
+{
+ IMG_HANDLE hRPMContext;
+ IMG_UINT32 ui32InitFLPages;
+ IMG_UINT32 ui32GrowFLPages;
+ IMG_DEV_VIRTADDR sFreeListDevVAddr;
+ IMG_BOOL bIsExternal;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERPMFREELIST;
+
+/* Bridge out structure for RGXCreateRPMFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERPMFREELIST_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+ IMG_UINT32 ui32HWFreeList;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERPMFREELIST;
+
+
+/*******************************************
+ RGXDestroyRPMFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRPMFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRPMFREELIST_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRPMFREELIST;
+
+/* Bridge out structure for RGXDestroyRPMFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRPMFREELIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRPMFREELIST;
+
+
+/*******************************************
+ RGXCreateRPMContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRPMContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERPMCONTEXT_TAG
+{
+ IMG_UINT32 ui32TotalRPMPages;
+ IMG_UINT32 ui32Log2DopplerPageSize;
+ IMG_DEV_VIRTADDR sSceneMemoryBaseAddr;
+ IMG_DEV_VIRTADDR sDopplerHeapBaseAddr;
+ IMG_HANDLE hSceneHeap;
+ IMG_DEV_VIRTADDR sRPMPageTableBaseAddr;
+ IMG_HANDLE hRPMPageTableHeap;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERPMCONTEXT;
+
+/* Bridge out structure for RGXCreateRPMContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERPMCONTEXT_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+ IMG_HANDLE hHWMemDesc;
+ IMG_UINT32 ui32HWFrameData;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERPMCONTEXT;
+
+
+/*******************************************
+ RGXDestroyRPMContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRPMContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRPMCONTEXT_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRPMCONTEXT;
+
+/* Bridge out structure for RGXDestroyRPMContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRPMCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRPMCONTEXT;
+
+
+/*******************************************
+ RGXKickRS
+ *******************************************/
+
+/* Bridge in structure for RGXKickRS */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKRS_TAG
+{
+ IMG_HANDLE hRayContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientFenceCount;
+ IMG_HANDLE * phClientFenceUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientFenceSyncOffset;
+ IMG_UINT32 * pui32ClientFenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_HANDLE * phClientUpdateUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientUpdateSyncOffset;
+ IMG_UINT32 * pui32ClientUpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerSyncFlags;
+ IMG_HANDLE * phServerSyncs;
+ IMG_UINT32 ui32CmdSize;
+ IMG_BYTE * psDMCmd;
+ IMG_UINT32 ui32FCCmdSize;
+ IMG_BYTE * psFCDMCmd;
+ IMG_UINT32 ui32FrameContext;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32ExtJobRef;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKRS;
+
+/* Bridge out structure for RGXKickRS */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKRS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKRS;
+
+
+/*******************************************
+ RGXKickVRDM
+ *******************************************/
+
+/* Bridge in structure for RGXKickVRDM */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKVRDM_TAG
+{
+ IMG_HANDLE hRayContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientFenceCount;
+ IMG_HANDLE * phClientFenceUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientFenceSyncOffset;
+ IMG_UINT32 * pui32ClientFenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_HANDLE * phClientUpdateUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientUpdateSyncOffset;
+ IMG_UINT32 * pui32ClientUpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerSyncFlags;
+ IMG_HANDLE * phServerSyncs;
+ IMG_UINT32 ui32CmdSize;
+ IMG_BYTE * psDMCmd;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32ExtJobRef;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKVRDM;
+
+/* Bridge out structure for RGXKickVRDM */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKVRDM_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKVRDM;
+
+
+/*******************************************
+ RGXCreateRayContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRayContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT_TAG
+{
+ IMG_UINT32 ui32Priority;
+ IMG_DEV_VIRTADDR sMCUFenceAddr;
+ IMG_DEV_VIRTADDR sVRMCallStackAddr;
+ IMG_UINT32 ui32FrameworkCmdSize;
+ IMG_BYTE * psFrameworkCmd;
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT;
+
+/* Bridge out structure for RGXCreateRayContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT_TAG
+{
+ IMG_HANDLE hRayContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT;
+
+
+/*******************************************
+ RGXDestroyRayContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRayContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT_TAG
+{
+ IMG_HANDLE hRayContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT;
+
+/* Bridge out structure for RGXDestroyRayContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT;
+
+
+/*******************************************
+ RGXSetRayContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetRayContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRAYCONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hRayContext;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETRAYCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetRayContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRAYCONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETRAYCONTEXTPRIORITY;
+
+
+#endif /* COMMON_RGXRAY_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for rgxray
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxray
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxray.h"
+#include "devicemem_server.h"
+
+
+#include "common_rgxray_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRPMFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATERPMFREELIST *psRGXCreateRPMFreeListIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATERPMFREELIST *psRGXCreateRPMFreeListOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRPMContext = psRGXCreateRPMFreeListIN->hRPMContext;
+ RGX_SERVER_RPM_CONTEXT * psRPMContextInt = NULL;
+ RGX_RPM_FREELIST * psCleanupCookieInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+ {
+ psRGXCreateRPMFreeListOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXCreateRPMFreeList_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXCreateRPMFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRPMContextInt,
+ hRPMContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT,
+ IMG_TRUE);
+ if(psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMFreeList_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateRPMFreeListOUT->eError =
+ RGXCreateRPMFreeList(psConnection, OSGetDevData(psConnection),
+ psRPMContextInt,
+ psRGXCreateRPMFreeListIN->ui32InitFLPages,
+ psRGXCreateRPMFreeListIN->ui32GrowFLPages,
+ psRGXCreateRPMFreeListIN->sFreeListDevVAddr,
+ &psCleanupCookieInt,
+ &psRGXCreateRPMFreeListOUT->ui32HWFreeList,
+ psRGXCreateRPMFreeListIN->bIsExternal);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateRPMFreeList_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateRPMFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRPMFreeListOUT->hCleanupCookie,
+ (void *) psCleanupCookieInt,
+ PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyRPMFreeList);
+ if (psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMFreeList_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateRPMFreeList_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psRPMContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRPMContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+ {
+ if (psCleanupCookieInt)
+ {
+ RGXDestroyRPMFreeList(psCleanupCookieInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRPMFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYRPMFREELIST *psRGXDestroyRPMFreeListIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRPMFREELIST *psRGXDestroyRPMFreeListOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+ {
+ psRGXDestroyRPMFreeListOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXDestroyRPMFreeList_exit;
+ }
+ }
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyRPMFreeListOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRPMFreeListIN->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST);
+ if ((psRGXDestroyRPMFreeListOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyRPMFreeListOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyRPMFreeList: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyRPMFreeListOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXDestroyRPMFreeList_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyRPMFreeList_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRPMContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATERPMCONTEXT *psRGXCreateRPMContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATERPMCONTEXT *psRGXCreateRPMContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ RGX_SERVER_RPM_CONTEXT * psCleanupCookieInt = NULL;
+ IMG_HANDLE hSceneHeap = psRGXCreateRPMContextIN->hSceneHeap;
+ DEVMEMINT_HEAP * psSceneHeapInt = NULL;
+ IMG_HANDLE hRPMPageTableHeap = psRGXCreateRPMContextIN->hRPMPageTableHeap;
+ DEVMEMINT_HEAP * psRPMPageTableHeapInt = NULL;
+ DEVMEM_MEMDESC * psHWMemDescInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+ {
+ psRGXCreateRPMContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXCreateRPMContext_exit;
+ }
+ }
+
+
+
+ psRGXCreateRPMContextOUT->hCleanupCookie = NULL;
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXCreateRPMContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSceneHeapInt,
+ hSceneHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ IMG_TRUE);
+ if(psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMContext_exit;
+ }
+ }
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXCreateRPMContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRPMPageTableHeapInt,
+ hRPMPageTableHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ IMG_TRUE);
+ if(psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMContext_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateRPMContextOUT->eError =
+ RGXCreateRPMContext(psConnection, OSGetDevData(psConnection),
+ &psCleanupCookieInt,
+ psRGXCreateRPMContextIN->ui32TotalRPMPages,
+ psRGXCreateRPMContextIN->ui32Log2DopplerPageSize,
+ psRGXCreateRPMContextIN->sSceneMemoryBaseAddr,
+ psRGXCreateRPMContextIN->sDopplerHeapBaseAddr,
+ psSceneHeapInt,
+ psRGXCreateRPMContextIN->sRPMPageTableBaseAddr,
+ psRPMPageTableHeapInt,
+ &psHWMemDescInt,
+ &psRGXCreateRPMContextOUT->ui32HWFrameData);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateRPMContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateRPMContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRPMContextOUT->hCleanupCookie,
+ (void *) psCleanupCookieInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyRPMContext);
+ if (psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMContext_exit;
+ }
+
+
+
+
+
+
+ psRGXCreateRPMContextOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRPMContextOUT->hHWMemDesc,
+ (void *) psHWMemDescInt,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,psRGXCreateRPMContextOUT->hCleanupCookie);
+ if (psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateRPMContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSceneHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSceneHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+ }
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psRPMPageTableHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRPMPageTableHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psRGXCreateRPMContextOUT->hCleanupCookie)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXCreateRPMContextOUT->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXCreateRPMContext: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psCleanupCookieInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psCleanupCookieInt)
+ {
+ RGXDestroyRPMContext(psCleanupCookieInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRPMContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYRPMCONTEXT *psRGXDestroyRPMContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRPMCONTEXT *psRGXDestroyRPMContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+ {
+ psRGXDestroyRPMContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXDestroyRPMContext_exit;
+ }
+ }
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyRPMContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRPMContextIN->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT);
+ if ((psRGXDestroyRPMContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyRPMContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyRPMContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyRPMContextOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXDestroyRPMContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyRPMContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickRS(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXKICKRS *psRGXKickRSIN,
+ PVRSRV_BRIDGE_OUT_RGXKICKRS *psRGXKickRSOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRayContext = psRGXKickRSIN->hRayContext;
+ RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientFenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientFenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientUpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = NULL;
+ IMG_HANDLE *hServerSyncsInt2 = NULL;
+ IMG_BYTE *psDMCmdInt = NULL;
+ IMG_BYTE *psFCDMCmdInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXKickRSIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ (psRGXKickRSIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE)) +
+ (psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXKickRS_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickRSIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickRSIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickRS_exit;
+ }
+ }
+ }
+
+ if (psRGXKickRSIN->ui32ClientFenceCount != 0)
+ {
+ psClientFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientFenceUFOSyncPrimBlockInt2, psRGXKickRSIN->phClientFenceUFOSyncPrimBlock, psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceSyncOffsetInt, psRGXKickRSIN->pui32ClientFenceSyncOffset, psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceValueInt, psRGXKickRSIN->pui32ClientFenceValue, psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ClientUpdateCount != 0)
+ {
+ psClientUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientUpdateUFOSyncPrimBlockInt2, psRGXKickRSIN->phClientUpdateUFOSyncPrimBlock, psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateSyncOffsetInt, psRGXKickRSIN->pui32ClientUpdateSyncOffset, psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateValueInt, psRGXKickRSIN->pui32ClientUpdateValue, psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXKickRSIN->pui32ServerSyncFlags, psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncsInt2, psRGXKickRSIN->phServerSyncs, psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32CmdSize != 0)
+ {
+ psDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psDMCmdInt, psRGXKickRSIN->psDMCmd, psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32FCCmdSize != 0)
+ {
+ psFCDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFCDMCmdInt, psRGXKickRSIN->psFCDMCmd, psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXKickRSOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRayContextInt,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ IMG_TRUE);
+ if(psRGXKickRSOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickRS_exit;
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ClientFenceCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickRSOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientFenceUFOSyncPrimBlockInt[i],
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickRSOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickRS_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ClientUpdateCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickRSOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientUpdateUFOSyncPrimBlockInt[i],
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickRSOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickRS_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickRSOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncsInt[i],
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickRSOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickRS_exit;
+ }
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXKickRSOUT->eError =
+ PVRSRVRGXKickRSKM(
+ psRayContextInt,
+ psRGXKickRSIN->ui32ClientCacheOpSeqNum,
+ psRGXKickRSIN->ui32ClientFenceCount,
+ psClientFenceUFOSyncPrimBlockInt,
+ ui32ClientFenceSyncOffsetInt,
+ ui32ClientFenceValueInt,
+ psRGXKickRSIN->ui32ClientUpdateCount,
+ psClientUpdateUFOSyncPrimBlockInt,
+ ui32ClientUpdateSyncOffsetInt,
+ ui32ClientUpdateValueInt,
+ psRGXKickRSIN->ui32ServerSyncCount,
+ ui32ServerSyncFlagsInt,
+ psServerSyncsInt,
+ psRGXKickRSIN->ui32CmdSize,
+ psDMCmdInt,
+ psRGXKickRSIN->ui32FCCmdSize,
+ psFCDMCmdInt,
+ psRGXKickRSIN->ui32FrameContext,
+ psRGXKickRSIN->ui32PDumpFlags,
+ psRGXKickRSIN->ui32ExtJobRef);
+
+
+
+
+RGXKickRS_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psRayContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ClientFenceCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psClientFenceUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ClientUpdateCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psClientUpdateUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerSyncsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickVRDM(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXKICKVRDM *psRGXKickVRDMIN,
+ PVRSRV_BRIDGE_OUT_RGXKICKVRDM *psRGXKickVRDMOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRayContext = psRGXKickVRDMIN->hRayContext;
+ RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientFenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientFenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientUpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = NULL;
+ IMG_HANDLE *hServerSyncsInt2 = NULL;
+ IMG_BYTE *psDMCmdInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickVRDMIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickVRDMIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickVRDM_exit;
+ }
+ }
+ }
+
+ if (psRGXKickVRDMIN->ui32ClientFenceCount != 0)
+ {
+ psClientFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientFenceUFOSyncPrimBlockInt2, psRGXKickVRDMIN->phClientFenceUFOSyncPrimBlock, psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceSyncOffsetInt, psRGXKickVRDMIN->pui32ClientFenceSyncOffset, psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceValueInt, psRGXKickVRDMIN->pui32ClientFenceValue, psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount != 0)
+ {
+ psClientUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientUpdateUFOSyncPrimBlockInt2, psRGXKickVRDMIN->phClientUpdateUFOSyncPrimBlock, psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateSyncOffsetInt, psRGXKickVRDMIN->pui32ClientUpdateSyncOffset, psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateValueInt, psRGXKickVRDMIN->pui32ClientUpdateValue, psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXKickVRDMIN->pui32ServerSyncFlags, psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncsInt2, psRGXKickVRDMIN->phServerSyncs, psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32CmdSize != 0)
+ {
+ psDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psDMCmdInt, psRGXKickVRDMIN->psDMCmd, psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXKickVRDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRayContextInt,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ IMG_TRUE);
+ if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickVRDM_exit;
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ClientFenceCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickVRDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientFenceUFOSyncPrimBlockInt[i],
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickVRDM_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ClientUpdateCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickVRDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientUpdateUFOSyncPrimBlockInt[i],
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickVRDM_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickVRDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncsInt[i],
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickVRDM_exit;
+ }
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXKickVRDMOUT->eError =
+ PVRSRVRGXKickVRDMKM(
+ psRayContextInt,
+ psRGXKickVRDMIN->ui32ClientCacheOpSeqNum,
+ psRGXKickVRDMIN->ui32ClientFenceCount,
+ psClientFenceUFOSyncPrimBlockInt,
+ ui32ClientFenceSyncOffsetInt,
+ ui32ClientFenceValueInt,
+ psRGXKickVRDMIN->ui32ClientUpdateCount,
+ psClientUpdateUFOSyncPrimBlockInt,
+ ui32ClientUpdateSyncOffsetInt,
+ ui32ClientUpdateValueInt,
+ psRGXKickVRDMIN->ui32ServerSyncCount,
+ ui32ServerSyncFlagsInt,
+ psServerSyncsInt,
+ psRGXKickVRDMIN->ui32CmdSize,
+ psDMCmdInt,
+ psRGXKickVRDMIN->ui32PDumpFlags,
+ psRGXKickVRDMIN->ui32ExtJobRef);
+
+
+
+
+RGXKickVRDM_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psRayContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ClientFenceCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psClientFenceUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ClientUpdateCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psClientUpdateUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerSyncsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRayContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT *psRGXCreateRayContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT *psRGXCreateRayContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_BYTE *psFrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXCreateRayContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+ {
+ psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXCreateRayContext_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateRayContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateRayContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateRayContext_exit;
+ }
+ }
+ }
+
+ if (psRGXCreateRayContextIN->ui32FrameworkCmdSize != 0)
+ {
+ psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateRayContextIN->psFrameworkCmd, psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateRayContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXCreateRayContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRayContext_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateRayContextOUT->eError =
+ PVRSRVRGXCreateRayContextKM(psConnection, OSGetDevData(psConnection),
+ psRGXCreateRayContextIN->ui32Priority,
+ psRGXCreateRayContextIN->sMCUFenceAddr,
+ psRGXCreateRayContextIN->sVRMCallStackAddr,
+ psRGXCreateRayContextIN->ui32FrameworkCmdSize,
+ psFrameworkCmdInt,
+ hPrivDataInt,
+ &psRayContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateRayContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateRayContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRayContextOUT->hRayContext,
+ (void *) psRayContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyRayContextKM);
+ if (psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRayContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateRayContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+ {
+ if (psRayContextInt)
+ {
+ PVRSRVRGXDestroyRayContextKM(psRayContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRayContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT *psRGXDestroyRayContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT *psRGXDestroyRayContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+ {
+ psRGXDestroyRayContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXDestroyRayContext_exit;
+ }
+ }
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyRayContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRayContextIN->hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+ if ((psRGXDestroyRayContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyRayContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyRayContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyRayContextOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXDestroyRayContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyRayContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetRayContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETRAYCONTEXTPRIORITY *psRGXSetRayContextPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXSETRAYCONTEXTPRIORITY *psRGXSetRayContextPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRayContext = psRGXSetRayContextPriorityIN->hRayContext;
+ RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+ {
+ psRGXSetRayContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXSetRayContextPriority_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXSetRayContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRayContextInt,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ IMG_TRUE);
+ if(psRGXSetRayContextPriorityOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSetRayContextPriority_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSetRayContextPriorityOUT->eError =
+ PVRSRVRGXSetRayContextPriorityKM(psConnection, OSGetDevData(psConnection),
+ psRayContextInt,
+ psRGXSetRayContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetRayContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psRayContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXRAYBridge(void);
+PVRSRV_ERROR DeinitRGXRAYBridge(void);
+
+/*
+ * Register all RGXRAY functions with services
+ */
+PVRSRV_ERROR InitRGXRAYBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMFREELIST, PVRSRVBridgeRGXCreateRPMFreeList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMFREELIST, PVRSRVBridgeRGXDestroyRPMFreeList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMCONTEXT, PVRSRVBridgeRGXCreateRPMContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMCONTEXT, PVRSRVBridgeRGXDestroyRPMContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKRS, PVRSRVBridgeRGXKickRS,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKVRDM, PVRSRVBridgeRGXKickVRDM,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT, PVRSRVBridgeRGXCreateRayContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT, PVRSRVBridgeRGXDestroyRayContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXSETRAYCONTEXTPRIORITY, PVRSRVBridgeRGXSetRayContextPriority,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxray functions with services
+ */
+PVRSRV_ERROR DeinitRGXRAYBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for rgxsignals
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxsignals
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXSIGNALS_BRIDGE_H
+#define COMMON_RGXSIGNALS_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST (PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0)
+
+
+/*******************************************
+ RGXNotifySignalUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXNotifySignalUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE_TAG
+{
+ IMG_HANDLE hPrivData;
+ IMG_DEV_VIRTADDR sDevSignalAddress;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE;
+
+/* Bridge out structure for RGXNotifySignalUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE;
+
+
+#endif /* COMMON_RGXSIGNALS_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for rgxsignals
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxsignals
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxsignals.h"
+
+
+#include "common_rgxsignals_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXNotifySignalUpdate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateIN,
+ PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXNotifySignalUpdateIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK))
+ {
+ psRGXNotifySignalUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXNotifySignalUpdate_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXNotifySignalUpdateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXNotifySignalUpdateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXNotifySignalUpdate_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXNotifySignalUpdateOUT->eError =
+ PVRSRVRGXNotifySignalUpdateKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt,
+ psRGXNotifySignalUpdateIN->sDevSignalAddress);
+
+
+
+
+RGXNotifySignalUpdate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXSIGNALSBridge(void);
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void);
+
+/*
+ * Register all RGXSIGNALS functions with services
+ */
+PVRSRV_ERROR InitRGXSIGNALSBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS, PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE, PVRSRVBridgeRGXNotifySignalUpdate,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxsignals functions with services
+ */
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for rgxta3d
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxta3d
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXTA3D_BRIDGE_H
+#define COMMON_RGXTA3D_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "rgx_fwif_shared.h"
+
+
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9
+#define PVRSRV_BRIDGE_RGXTA3D_RGXADDBLOCKTOFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10
+#define PVRSRV_BRIDGE_RGXTA3D_RGXREMOVEBLOCKFROMFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13
+#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14
+#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+15
+#define PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+16
+#define PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+17
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+17)
+
+
+/*******************************************
+ RGXCreateHWRTData
+ *******************************************/
+
+/* Bridge in structure for RGXCreateHWRTData */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA_TAG
+{
+ IMG_UINT32 ui32RenderTarget;
+ IMG_DEV_VIRTADDR sPMMlistDevVAddr;
+ IMG_DEV_VIRTADDR sVFPPageTableAddr;
+ IMG_HANDLE * phapsFreeLists;
+ IMG_UINT32 ui32PPPScreen;
+ IMG_UINT32 ui32PPPGridOffset;
+ IMG_UINT64 ui64PPPMultiSampleCtl;
+ IMG_UINT32 ui32TPCStride;
+ IMG_DEV_VIRTADDR sTailPtrsDevVAddr;
+ IMG_UINT32 ui32TPCSize;
+ IMG_UINT32 ui32TEScreen;
+ IMG_UINT32 ui32TEAA;
+ IMG_UINT32 ui32TEMTILE1;
+ IMG_UINT32 ui32TEMTILE2;
+ IMG_UINT32 ui32MTileStride;
+ IMG_UINT32 ui32ui32ISPMergeLowerX;
+ IMG_UINT32 ui32ui32ISPMergeLowerY;
+ IMG_UINT32 ui32ui32ISPMergeUpperX;
+ IMG_UINT32 ui32ui32ISPMergeUpperY;
+ IMG_UINT32 ui32ui32ISPMergeScaleX;
+ IMG_UINT32 ui32ui32ISPMergeScaleY;
+ IMG_UINT16 ui16MaxRTs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA;
+
+/* Bridge out structure for RGXCreateHWRTData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+ IMG_HANDLE hRTACtlMemDesc;
+ IMG_HANDLE hsHWRTDataMemDesc;
+ IMG_UINT32 ui32FWHWRTData;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA;
+
+
+/*******************************************
+ RGXDestroyHWRTData
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyHWRTData */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA;
+
+/* Bridge out structure for RGXDestroyHWRTData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA;
+
+
+/*******************************************
+ RGXCreateRenderTarget
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderTarget */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET_TAG
+{
+ IMG_DEV_VIRTADDR spsVHeapTableDevVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET;
+
+/* Bridge out structure for RGXCreateRenderTarget */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET_TAG
+{
+ IMG_HANDLE hsRenderTargetMemDesc;
+ IMG_UINT32 ui32sRenderTargetFWDevVAddr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET;
+
+
+/*******************************************
+ RGXDestroyRenderTarget
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderTarget */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET_TAG
+{
+ IMG_HANDLE hsRenderTargetMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET;
+
+/* Bridge out structure for RGXDestroyRenderTarget */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET;
+
+
+/*******************************************
+ RGXCreateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG
+{
+ IMG_HANDLE hReservation;
+ IMG_HANDLE hPMR;
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER;
+
+/* Bridge out structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsZSBufferKM;
+ IMG_UINT32 ui32sZSBufferFWDevVAddr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER;
+
+
+/*******************************************
+ RGXDestroyZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG
+{
+ IMG_HANDLE hsZSBufferMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER;
+
+/* Bridge out structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER;
+
+
+/*******************************************
+ RGXPopulateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsZSBufferKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsPopulation;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER;
+
+
+/*******************************************
+ RGXUnpopulateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsPopulation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER;
+
+
+/*******************************************
+ RGXCreateFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG
+{
+ IMG_UINT32 ui32ui32MaxFLPages;
+ IMG_UINT32 ui32ui32InitFLPages;
+ IMG_UINT32 ui32ui32GrowFLPages;
+ IMG_HANDLE hsGlobalFreeList;
+ IMG_BOOL bbFreeListCheck;
+ IMG_DEV_VIRTADDR spsFreeListDevVAddr;
+ IMG_HANDLE hsFreeListPMR;
+ IMG_DEVMEM_OFFSET_T uiPMROffset;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEFREELIST;
+
+/* Bridge out structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST;
+
+
+/*******************************************
+ RGXDestroyFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST;
+
+/* Bridge out structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST;
+
+
+/*******************************************
+ RGXAddBlockToFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXAddBlockToFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXADDBLOCKTOFREELIST_TAG
+{
+ IMG_HANDLE hsFreeList;
+ IMG_UINT32 ui3232NumPages;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXADDBLOCKTOFREELIST;
+
+/* Bridge out structure for RGXAddBlockToFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXADDBLOCKTOFREELIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXADDBLOCKTOFREELIST;
+
+
+/*******************************************
+ RGXRemoveBlockFromFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXRemoveBlockFromFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXREMOVEBLOCKFROMFREELIST_TAG
+{
+ IMG_HANDLE hsFreeList;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXREMOVEBLOCKFROMFREELIST;
+
+/* Bridge out structure for RGXRemoveBlockFromFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXREMOVEBLOCKFROMFREELIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXREMOVEBLOCKFROMFREELIST;
+
+
+/*******************************************
+ RGXCreateRenderContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG
+{
+ IMG_UINT32 ui32Priority;
+ IMG_DEV_VIRTADDR sMCUFenceAddr;
+ IMG_DEV_VIRTADDR sVDMCallStackAddr;
+ IMG_UINT32 ui32FrameworkCmdize;
+ IMG_BYTE * psFrameworkCmd;
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT;
+
+/* Bridge out structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG
+{
+ IMG_HANDLE hRenderContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT;
+
+
+/*******************************************
+ RGXDestroyRenderContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT;
+
+/* Bridge out structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT;
+
+
+/*******************************************
+ RGXKickTA3D
+ *******************************************/
+
+/* Bridge in structure for RGXKickTA3D */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D_TAG
+{
+ IMG_HANDLE hRenderContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientTAFenceCount;
+ IMG_HANDLE * phClientTAFenceSyncPrimBlock;
+ IMG_UINT32 * pui32ClientTAFenceSyncOffset;
+ IMG_UINT32 * pui32ClientTAFenceValue;
+ IMG_UINT32 ui32ClientTAUpdateCount;
+ IMG_HANDLE * phClientTAUpdateSyncPrimBlock;
+ IMG_UINT32 * pui32ClientTAUpdateSyncOffset;
+ IMG_UINT32 * pui32ClientTAUpdateValue;
+ IMG_UINT32 ui32ServerTASyncPrims;
+ IMG_UINT32 * pui32ServerTASyncFlags;
+ IMG_HANDLE * phServerTASyncs;
+ IMG_UINT32 ui32Client3DFenceCount;
+ IMG_HANDLE * phClient3DFenceSyncPrimBlock;
+ IMG_UINT32 * pui32Client3DFenceSyncOffset;
+ IMG_UINT32 * pui32Client3DFenceValue;
+ IMG_UINT32 ui32Client3DUpdateCount;
+ IMG_HANDLE * phClient3DUpdateSyncPrimBlock;
+ IMG_UINT32 * pui32Client3DUpdateSyncOffset;
+ IMG_UINT32 * pui32Client3DUpdateValue;
+ IMG_UINT32 ui32Server3DSyncPrims;
+ IMG_UINT32 * pui32Server3DSyncFlags;
+ IMG_HANDLE * phServer3DSyncs;
+ IMG_HANDLE hPRFenceUFOSyncPrimBlock;
+ IMG_UINT32 ui32FRFenceUFOSyncOffset;
+ IMG_UINT32 ui32FRFenceValue;
+ IMG_INT32 i32CheckFenceFD;
+ IMG_INT32 i32UpdateTimelineFD;
+ IMG_CHAR * puiUpdateFenceName;
+ IMG_UINT32 ui32TACmdSize;
+ IMG_BYTE * psTACmd;
+ IMG_UINT32 ui323DPRCmdSize;
+ IMG_BYTE * ps3DPRCmd;
+ IMG_UINT32 ui323DCmdSize;
+ IMG_BYTE * ps3DCmd;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_BOOL bbLastTAInScene;
+ IMG_BOOL bbKickTA;
+ IMG_BOOL bbKickPR;
+ IMG_BOOL bbKick3D;
+ IMG_BOOL bbAbort;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_HANDLE hRTDataCleanup;
+ IMG_HANDLE hZBuffer;
+ IMG_HANDLE hSBuffer;
+ IMG_BOOL bbCommitRefCountsTA;
+ IMG_BOOL bbCommitRefCounts3D;
+ IMG_UINT32 ui32SyncPMRCount;
+ IMG_UINT32 * pui32SyncPMRFlags;
+ IMG_HANDLE * phSyncPMRs;
+ IMG_UINT32 ui32RenderTargetSize;
+ IMG_UINT32 ui32NumberOfDrawCalls;
+ IMG_UINT32 ui32NumberOfIndices;
+ IMG_UINT32 ui32NumberOfMRTs;
+ IMG_UINT64 ui64Deadline;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKTA3D;
+
+/* Bridge out structure for RGXKickTA3D */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D_TAG
+{
+ IMG_INT32 i32UpdateFenceFD;
+ IMG_BOOL bbCommittedRefCountsTA;
+ IMG_BOOL bbCommittedRefCounts3D;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKTA3D;
+
+
+/*******************************************
+ RGXSetRenderContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hRenderContext;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY;
+
+
+/*******************************************
+ RGXGetLastRenderContextResetReason
+ *******************************************/
+
+/* Bridge in structure for RGXGetLastRenderContextResetReason */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON_TAG
+{
+ IMG_HANDLE hRenderContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON;
+
+/* Bridge out structure for RGXGetLastRenderContextResetReason */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON_TAG
+{
+ IMG_UINT32 ui32LastResetReason;
+ IMG_UINT32 ui32LastResetJobRef;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON;
+
+
+/*******************************************
+ RGXGetPartialRenderCount
+ *******************************************/
+
+/* Bridge in structure for RGXGetPartialRenderCount */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT_TAG
+{
+ IMG_HANDLE hHWRTDataMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT;
+
+/* Bridge out structure for RGXGetPartialRenderCount */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT_TAG
+{
+ IMG_UINT32 ui32NumPartialRenders;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT;
+
+
+#endif /* COMMON_RGXTA3D_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for rgxta3d
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxta3d
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxta3d.h"
+
+
+#include "common_rgxta3d_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateHWRTData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA *psRGXCreateHWRTDataIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA *psRGXCreateHWRTDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+ RGX_FREELIST * *psapsFreeListsInt = NULL;
+ IMG_HANDLE *hapsFreeListsInt2 = NULL;
+ RGX_RTDATA_CLEANUP_DATA * psCleanupCookieInt = NULL;
+ DEVMEM_MEMDESC * psRTACtlMemDescInt = NULL;
+ DEVMEM_MEMDESC * pssHWRTDataMemDescInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *)) +
+ (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) +
+ 0;
+
+
+
+
+ psRGXCreateHWRTDataOUT->hCleanupCookie = NULL;
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateHWRTDataIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateHWRTDataIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCreateHWRTDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateHWRTData_exit;
+ }
+ }
+ }
+
+
+ {
+ psapsFreeListsInt = (RGX_FREELIST **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *);
+ hapsFreeListsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hapsFreeListsInt2, psRGXCreateHWRTDataIN->phapsFreeLists, RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXCreateHWRTDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateHWRTData_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<RGXFW_MAX_FREELISTS;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXCreateHWRTDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psapsFreeListsInt[i],
+ hapsFreeListsInt2[i],
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ IMG_TRUE);
+ if(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateHWRTData_exit;
+ }
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateHWRTDataOUT->eError =
+ RGXCreateHWRTData(psConnection, OSGetDevData(psConnection),
+ psRGXCreateHWRTDataIN->ui32RenderTarget,
+ psRGXCreateHWRTDataIN->sPMMlistDevVAddr,
+ psRGXCreateHWRTDataIN->sVFPPageTableAddr,
+ psapsFreeListsInt,
+ &psCleanupCookieInt,
+ &psRTACtlMemDescInt,
+ psRGXCreateHWRTDataIN->ui32PPPScreen,
+ psRGXCreateHWRTDataIN->ui32PPPGridOffset,
+ psRGXCreateHWRTDataIN->ui64PPPMultiSampleCtl,
+ psRGXCreateHWRTDataIN->ui32TPCStride,
+ psRGXCreateHWRTDataIN->sTailPtrsDevVAddr,
+ psRGXCreateHWRTDataIN->ui32TPCSize,
+ psRGXCreateHWRTDataIN->ui32TEScreen,
+ psRGXCreateHWRTDataIN->ui32TEAA,
+ psRGXCreateHWRTDataIN->ui32TEMTILE1,
+ psRGXCreateHWRTDataIN->ui32TEMTILE2,
+ psRGXCreateHWRTDataIN->ui32MTileStride,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeLowerX,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeLowerY,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeUpperX,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeUpperY,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeScaleX,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeScaleY,
+ psRGXCreateHWRTDataIN->ui16MaxRTs,
+ &pssHWRTDataMemDescInt,
+ &psRGXCreateHWRTDataOUT->ui32FWHWRTData);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateHWRTData_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateHWRTDataOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateHWRTDataOUT->hCleanupCookie,
+ (void *) psCleanupCookieInt,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyHWRTData);
+ if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateHWRTData_exit;
+ }
+
+
+
+
+
+
+ psRGXCreateHWRTDataOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateHWRTDataOUT->hRTACtlMemDesc,
+ (void *) psRTACtlMemDescInt,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,psRGXCreateHWRTDataOUT->hCleanupCookie);
+ if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateHWRTData_exit;
+ }
+
+
+
+
+
+
+ psRGXCreateHWRTDataOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateHWRTDataOUT->hsHWRTDataMemDesc,
+ (void *) pssHWRTDataMemDescInt,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,psRGXCreateHWRTDataOUT->hCleanupCookie);
+ if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateHWRTData_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateHWRTData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<RGXFW_MAX_FREELISTS;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psapsFreeListsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hapsFreeListsInt2[i],
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+ }
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psRGXCreateHWRTDataOUT->hCleanupCookie)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXCreateHWRTDataOUT->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXCreateHWRTData: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psCleanupCookieInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psCleanupCookieInt)
+ {
+ RGXDestroyHWRTData(psCleanupCookieInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyHWRTData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA *psRGXDestroyHWRTDataIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA *psRGXDestroyHWRTDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyHWRTDataOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyHWRTDataIN->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+ if ((psRGXDestroyHWRTDataOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyHWRTDataOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyHWRTData: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyHWRTDataOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXDestroyHWRTData_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyHWRTData_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderTarget(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET *psRGXCreateRenderTargetIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET *psRGXCreateRenderTargetOUT,
+ CONNECTION_DATA *psConnection)
+{
+ RGX_RT_CLEANUP_DATA * pssRenderTargetMemDescInt = NULL;
+
+
+
+
+
+
+
+
+ psRGXCreateRenderTargetOUT->eError =
+ RGXCreateRenderTarget(psConnection, OSGetDevData(psConnection),
+ psRGXCreateRenderTargetIN->spsVHeapTableDevVAddr,
+ &pssRenderTargetMemDescInt,
+ &psRGXCreateRenderTargetOUT->ui32sRenderTargetFWDevVAddr);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateRenderTarget_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateRenderTargetOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRenderTargetOUT->hsRenderTargetMemDesc,
+ (void *) pssRenderTargetMemDescInt,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyRenderTarget);
+ if (psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRenderTarget_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateRenderTarget_exit:
+
+
+
+ if (psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+ {
+ if (pssRenderTargetMemDescInt)
+ {
+ RGXDestroyRenderTarget(pssRenderTargetMemDescInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderTarget(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET *psRGXDestroyRenderTargetIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET *psRGXDestroyRenderTargetOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyRenderTargetOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRenderTargetIN->hsRenderTargetMemDesc,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET);
+ if ((psRGXDestroyRenderTargetOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyRenderTargetOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyRenderTarget: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyRenderTargetOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXDestroyRenderTarget_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyRenderTarget_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation;
+ DEVMEMINT_RESERVATION * psReservationInt = NULL;
+ IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR;
+ PMR * psPMRInt = NULL;
+ RGX_ZSBUFFER_DATA * pssZSBufferKMInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXCreateZSBufferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ IMG_TRUE);
+ if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateZSBuffer_exit;
+ }
+ }
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXCreateZSBufferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateZSBuffer_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateZSBufferOUT->eError =
+ RGXCreateZSBufferKM(psConnection, OSGetDevData(psConnection),
+ psReservationInt,
+ psPMRInt,
+ psRGXCreateZSBufferIN->uiMapFlags,
+ &pssZSBufferKMInt,
+ &psRGXCreateZSBufferOUT->ui32sZSBufferFWDevVAddr);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateZSBuffer_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateZSBufferOUT->hsZSBufferKM,
+ (void *) pssZSBufferKMInt,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyZSBufferKM);
+ if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateZSBuffer_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateZSBuffer_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+ }
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ if (pssZSBufferKMInt)
+ {
+ RGXDestroyZSBufferKM(pssZSBufferKMInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyZSBufferOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyZSBufferIN->hsZSBufferMemDesc,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ if ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyZSBuffer: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyZSBufferOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXDestroyZSBuffer_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyZSBuffer_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN,
+ PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM;
+ RGX_ZSBUFFER_DATA * pssZSBufferKMInt = NULL;
+ RGX_POPULATION * pssPopulationInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXPopulateZSBufferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &pssZSBufferKMInt,
+ hsZSBufferKM,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ IMG_TRUE);
+ if(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXPopulateZSBuffer_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXPopulateZSBufferOUT->eError =
+ RGXPopulateZSBufferKM(
+ pssZSBufferKMInt,
+ &pssPopulationInt);
+ /* Exit early if bridged call fails */
+ if(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ goto RGXPopulateZSBuffer_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXPopulateZSBufferOUT->hsPopulation,
+ (void *) pssPopulationInt,
+ PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RGXUnpopulateZSBufferKM);
+ if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXPopulateZSBuffer_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXPopulateZSBuffer_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(pssZSBufferKMInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsZSBufferKM,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ if (pssPopulationInt)
+ {
+ RGXUnpopulateZSBufferKM(pssPopulationInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN,
+ PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXUnpopulateZSBufferOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation,
+ PVRSRV_HANDLE_TYPE_RGX_POPULATION);
+ if ((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) &&
+ (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXUnpopulateZSBuffer: %s",
+ PVRSRVGetErrorStringKM(psRGXUnpopulateZSBufferOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXUnpopulateZSBuffer_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXUnpopulateZSBuffer_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList;
+ RGX_FREELIST * pssGlobalFreeListInt = NULL;
+ IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR;
+ PMR * pssFreeListPMRInt = NULL;
+ RGX_FREELIST * psCleanupCookieInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ if (psRGXCreateFreeListIN->hsGlobalFreeList)
+ {
+ /* Look up the address from the handle */
+ psRGXCreateFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &pssGlobalFreeListInt,
+ hsGlobalFreeList,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ IMG_TRUE);
+ if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateFreeList_exit;
+ }
+ }
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXCreateFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &pssFreeListPMRInt,
+ hsFreeListPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateFreeList_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateFreeListOUT->eError =
+ RGXCreateFreeList(psConnection, OSGetDevData(psConnection),
+ psRGXCreateFreeListIN->ui32ui32MaxFLPages,
+ psRGXCreateFreeListIN->ui32ui32InitFLPages,
+ psRGXCreateFreeListIN->ui32ui32GrowFLPages,
+ pssGlobalFreeListInt,
+ psRGXCreateFreeListIN->bbFreeListCheck,
+ psRGXCreateFreeListIN->spsFreeListDevVAddr,
+ pssFreeListPMRInt,
+ psRGXCreateFreeListIN->uiPMROffset,
+ &psCleanupCookieInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateFreeList_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateFreeListOUT->hCleanupCookie,
+ (void *) psCleanupCookieInt,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyFreeList);
+ if (psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateFreeList_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateFreeList_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ if (psRGXCreateFreeListIN->hsGlobalFreeList)
+ {
+ /* Unreference the previously looked up handle */
+ if(pssGlobalFreeListInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsGlobalFreeList,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+ }
+ }
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(pssFreeListPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsFreeListPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+ {
+ if (psCleanupCookieInt)
+ {
+ RGXDestroyFreeList(psCleanupCookieInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyFreeListOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+ if ((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyFreeList: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyFreeListOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXDestroyFreeList_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyFreeList_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXAddBlockToFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXADDBLOCKTOFREELIST *psRGXAddBlockToFreeListIN,
+ PVRSRV_BRIDGE_OUT_RGXADDBLOCKTOFREELIST *psRGXAddBlockToFreeListOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hsFreeList = psRGXAddBlockToFreeListIN->hsFreeList;
+ RGX_FREELIST * pssFreeListInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXAddBlockToFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &pssFreeListInt,
+ hsFreeList,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ IMG_TRUE);
+ if(psRGXAddBlockToFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXAddBlockToFreeList_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXAddBlockToFreeListOUT->eError =
+ RGXAddBlockToFreeListKM(
+ pssFreeListInt,
+ psRGXAddBlockToFreeListIN->ui3232NumPages);
+
+
+
+
+RGXAddBlockToFreeList_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(pssFreeListInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsFreeList,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXRemoveBlockFromFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXREMOVEBLOCKFROMFREELIST *psRGXRemoveBlockFromFreeListIN,
+ PVRSRV_BRIDGE_OUT_RGXREMOVEBLOCKFROMFREELIST *psRGXRemoveBlockFromFreeListOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hsFreeList = psRGXRemoveBlockFromFreeListIN->hsFreeList;
+ RGX_FREELIST * pssFreeListInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXRemoveBlockFromFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &pssFreeListInt,
+ hsFreeList,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ IMG_TRUE);
+ if(psRGXRemoveBlockFromFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXRemoveBlockFromFreeList_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXRemoveBlockFromFreeListOUT->eError =
+ RGXRemoveBlockFromFreeListKM(
+ pssFreeListInt);
+
+
+
+
+RGXRemoveBlockFromFreeList_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(pssFreeListInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsFreeList,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_BYTE *psFrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateRenderContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateRenderContext_exit;
+ }
+ }
+ }
+
+ if (psRGXCreateRenderContextIN->ui32FrameworkCmdize != 0)
+ {
+ psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateRenderContextIN->psFrameworkCmd, psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateRenderContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXCreateRenderContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRenderContext_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateRenderContextOUT->eError =
+ PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevData(psConnection),
+ psRGXCreateRenderContextIN->ui32Priority,
+ psRGXCreateRenderContextIN->sMCUFenceAddr,
+ psRGXCreateRenderContextIN->sVDMCallStackAddr,
+ psRGXCreateRenderContextIN->ui32FrameworkCmdize,
+ psFrameworkCmdInt,
+ hPrivDataInt,
+ &psRenderContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateRenderContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRenderContextOUT->hRenderContext,
+ (void *) psRenderContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyRenderContextKM);
+ if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRenderContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateRenderContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+ {
+ if (psRenderContextInt)
+ {
+ PVRSRVRGXDestroyRenderContextKM(psRenderContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyRenderContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRenderContextIN->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ if ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyRenderContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyRenderContextOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXDestroyRenderContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyRenderContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickTA3D(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXKICKTA3D *psRGXKickTA3DIN,
+ PVRSRV_BRIDGE_OUT_RGXKICKTA3D *psRGXKickTA3DOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRenderContext = psRGXKickTA3DIN->hRenderContext;
+ RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientTAFenceSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientTAFenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientTAUpdateSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerTASyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerTASyncsInt = NULL;
+ IMG_HANDLE *hServerTASyncsInt2 = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClient3DFenceSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClient3DFenceSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32Client3DFenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32Client3DFenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClient3DUpdateSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32Client3DUpdateValueInt = NULL;
+ IMG_UINT32 *ui32Server3DSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServer3DSyncsInt = NULL;
+ IMG_HANDLE *hServer3DSyncsInt2 = NULL;
+ IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3DIN->hPRFenceUFOSyncPrimBlock;
+ SYNC_PRIMITIVE_BLOCK * psPRFenceUFOSyncPrimBlockInt = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_BYTE *psTACmdInt = NULL;
+ IMG_BYTE *ps3DPRCmdInt = NULL;
+ IMG_BYTE *ps3DCmdInt = NULL;
+ IMG_HANDLE hRTDataCleanup = psRGXKickTA3DIN->hRTDataCleanup;
+ RGX_RTDATA_CLEANUP_DATA * psRTDataCleanupInt = NULL;
+ IMG_HANDLE hZBuffer = psRGXKickTA3DIN->hZBuffer;
+ RGX_ZSBUFFER_DATA * psZBufferInt = NULL;
+ IMG_HANDLE hSBuffer = psRGXKickTA3DIN->hSBuffer;
+ RGX_ZSBUFFER_DATA * psSBufferInt = NULL;
+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+ PMR * *psSyncPMRsInt = NULL;
+ IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE)) +
+ (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE)) +
+ (32 * sizeof(IMG_CHAR)) +
+ (psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE)) +
+ (psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE)) +
+ (psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE)) +
+ (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(PMR *)) +
+ (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickTA3DIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickTA3DIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+ {
+ psClientTAFenceSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientTAFenceSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientTAFenceSyncPrimBlockInt2, psRGXKickTA3DIN->phClientTAFenceSyncPrimBlock, psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+ {
+ ui32ClientTAFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientTAFenceSyncOffsetInt, psRGXKickTA3DIN->pui32ClientTAFenceSyncOffset, psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+ {
+ ui32ClientTAFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientTAFenceValueInt, psRGXKickTA3DIN->pui32ClientTAFenceValue, psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+ {
+ psClientTAUpdateSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientTAUpdateSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientTAUpdateSyncPrimBlockInt2, psRGXKickTA3DIN->phClientTAUpdateSyncPrimBlock, psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+ {
+ ui32ClientTAUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientTAUpdateSyncOffsetInt, psRGXKickTA3DIN->pui32ClientTAUpdateSyncOffset, psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+ {
+ ui32ClientTAUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientTAUpdateValueInt, psRGXKickTA3DIN->pui32ClientTAUpdateValue, psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ServerTASyncPrims != 0)
+ {
+ ui32ServerTASyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerTASyncFlagsInt, psRGXKickTA3DIN->pui32ServerTASyncFlags, psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ServerTASyncPrims != 0)
+ {
+ psServerTASyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerTASyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerTASyncsInt2, psRGXKickTA3DIN->phServerTASyncs, psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+ {
+ psClient3DFenceSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClient3DFenceSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClient3DFenceSyncPrimBlockInt2, psRGXKickTA3DIN->phClient3DFenceSyncPrimBlock, psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+ {
+ ui32Client3DFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32Client3DFenceSyncOffsetInt, psRGXKickTA3DIN->pui32Client3DFenceSyncOffset, psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+ {
+ ui32Client3DFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32Client3DFenceValueInt, psRGXKickTA3DIN->pui32Client3DFenceValue, psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+ {
+ psClient3DUpdateSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClient3DUpdateSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClient3DUpdateSyncPrimBlockInt2, psRGXKickTA3DIN->phClient3DUpdateSyncPrimBlock, psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+ {
+ ui32Client3DUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32Client3DUpdateSyncOffsetInt, psRGXKickTA3DIN->pui32Client3DUpdateSyncOffset, psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+ {
+ ui32Client3DUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32Client3DUpdateValueInt, psRGXKickTA3DIN->pui32Client3DUpdateValue, psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Server3DSyncPrims != 0)
+ {
+ ui32Server3DSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32Server3DSyncFlagsInt, psRGXKickTA3DIN->pui32Server3DSyncFlags, psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Server3DSyncPrims != 0)
+ {
+ psServer3DSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServer3DSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServer3DSyncsInt2, psRGXKickTA3DIN->phServer3DSyncs, psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 32 * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (32 * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXKickTA3DIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32TACmdSize != 0)
+ {
+ psTACmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psTACmdInt, psRGXKickTA3DIN->psTACmd, psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui323DPRCmdSize != 0)
+ {
+ ps3DPRCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ps3DPRCmdInt, psRGXKickTA3DIN->ps3DPRCmd, psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui323DCmdSize != 0)
+ {
+ ps3DCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ps3DCmdInt, psRGXKickTA3DIN->ps3DCmd, psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32SyncPMRCount != 0)
+ {
+ ui32SyncPMRFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32SyncPMRFlagsInt, psRGXKickTA3DIN->pui32SyncPMRFlags, psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32SyncPMRCount != 0)
+ {
+ psSyncPMRsInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(PMR *);
+ hSyncPMRsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hSyncPMRsInt2, psRGXKickTA3DIN->phSyncPMRs, psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRenderContextInt,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ClientTAFenceCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientTAFenceSyncPrimBlockInt[i],
+ hClientTAFenceSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ClientTAUpdateCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientTAUpdateSyncPrimBlockInt[i],
+ hClientTAUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ServerTASyncPrims;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerTASyncsInt[i],
+ hServerTASyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Client3DFenceCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClient3DFenceSyncPrimBlockInt[i],
+ hClient3DFenceSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Client3DUpdateCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClient3DUpdateSyncPrimBlockInt[i],
+ hClient3DUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Server3DSyncPrims;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServer3DSyncsInt[i],
+ hServer3DSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPRFenceUFOSyncPrimBlockInt,
+ hPRFenceUFOSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hRTDataCleanup)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRTDataCleanupInt,
+ hRTDataCleanup,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hZBuffer)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psZBufferInt,
+ hZBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hSBuffer)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSBufferInt,
+ hSBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32SyncPMRCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncPMRsInt[i],
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXKickTA3DOUT->eError =
+ PVRSRVRGXKickTA3DKM(
+ psRenderContextInt,
+ psRGXKickTA3DIN->ui32ClientCacheOpSeqNum,
+ psRGXKickTA3DIN->ui32ClientTAFenceCount,
+ psClientTAFenceSyncPrimBlockInt,
+ ui32ClientTAFenceSyncOffsetInt,
+ ui32ClientTAFenceValueInt,
+ psRGXKickTA3DIN->ui32ClientTAUpdateCount,
+ psClientTAUpdateSyncPrimBlockInt,
+ ui32ClientTAUpdateSyncOffsetInt,
+ ui32ClientTAUpdateValueInt,
+ psRGXKickTA3DIN->ui32ServerTASyncPrims,
+ ui32ServerTASyncFlagsInt,
+ psServerTASyncsInt,
+ psRGXKickTA3DIN->ui32Client3DFenceCount,
+ psClient3DFenceSyncPrimBlockInt,
+ ui32Client3DFenceSyncOffsetInt,
+ ui32Client3DFenceValueInt,
+ psRGXKickTA3DIN->ui32Client3DUpdateCount,
+ psClient3DUpdateSyncPrimBlockInt,
+ ui32Client3DUpdateSyncOffsetInt,
+ ui32Client3DUpdateValueInt,
+ psRGXKickTA3DIN->ui32Server3DSyncPrims,
+ ui32Server3DSyncFlagsInt,
+ psServer3DSyncsInt,
+ psPRFenceUFOSyncPrimBlockInt,
+ psRGXKickTA3DIN->ui32FRFenceUFOSyncOffset,
+ psRGXKickTA3DIN->ui32FRFenceValue,
+ psRGXKickTA3DIN->i32CheckFenceFD,
+ psRGXKickTA3DIN->i32UpdateTimelineFD,
+ &psRGXKickTA3DOUT->i32UpdateFenceFD,
+ uiUpdateFenceNameInt,
+ psRGXKickTA3DIN->ui32TACmdSize,
+ psTACmdInt,
+ psRGXKickTA3DIN->ui323DPRCmdSize,
+ ps3DPRCmdInt,
+ psRGXKickTA3DIN->ui323DCmdSize,
+ ps3DCmdInt,
+ psRGXKickTA3DIN->ui32ExtJobRef,
+ psRGXKickTA3DIN->bbLastTAInScene,
+ psRGXKickTA3DIN->bbKickTA,
+ psRGXKickTA3DIN->bbKickPR,
+ psRGXKickTA3DIN->bbKick3D,
+ psRGXKickTA3DIN->bbAbort,
+ psRGXKickTA3DIN->ui32PDumpFlags,
+ psRTDataCleanupInt,
+ psZBufferInt,
+ psSBufferInt,
+ psRGXKickTA3DIN->bbCommitRefCountsTA,
+ psRGXKickTA3DIN->bbCommitRefCounts3D,
+ &psRGXKickTA3DOUT->bbCommittedRefCountsTA,
+ &psRGXKickTA3DOUT->bbCommittedRefCounts3D,
+ psRGXKickTA3DIN->ui32SyncPMRCount,
+ ui32SyncPMRFlagsInt,
+ psSyncPMRsInt,
+ psRGXKickTA3DIN->ui32RenderTargetSize,
+ psRGXKickTA3DIN->ui32NumberOfDrawCalls,
+ psRGXKickTA3DIN->ui32NumberOfIndices,
+ psRGXKickTA3DIN->ui32NumberOfMRTs,
+ psRGXKickTA3DIN->ui64Deadline);
+
+
+
+
+RGXKickTA3D_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psRenderContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ClientTAFenceCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psClientTAFenceSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientTAFenceSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ClientTAUpdateCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psClientTAUpdateSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientTAUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ServerTASyncPrims;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerTASyncsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerTASyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Client3DFenceCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psClient3DFenceSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClient3DFenceSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Client3DUpdateCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psClient3DUpdateSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClient3DUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Server3DSyncPrims;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psServer3DSyncsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServer3DSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPRFenceUFOSyncPrimBlockInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPRFenceUFOSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hRTDataCleanup)
+ {
+ /* Unreference the previously looked up handle */
+ if(psRTDataCleanupInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRTDataCleanup,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hZBuffer)
+ {
+ /* Unreference the previously looked up handle */
+ if(psZBufferInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hZBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hSBuffer)
+ {
+ /* Unreference the previously looked up handle */
+ if(psSBufferInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32SyncPMRCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psSyncPMRsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext;
+ RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXSetRenderContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRenderContextInt,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ IMG_TRUE);
+ if(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSetRenderContextPriority_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSetRenderContextPriorityOUT->eError =
+ PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevData(psConnection),
+ psRenderContextInt,
+ psRGXSetRenderContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetRenderContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psRenderContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXGetLastRenderContextResetReason(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON *psRGXGetLastRenderContextResetReasonIN,
+ PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON *psRGXGetLastRenderContextResetReasonOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRenderContext = psRGXGetLastRenderContextResetReasonIN->hRenderContext;
+ RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXGetLastRenderContextResetReasonOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRenderContextInt,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ IMG_TRUE);
+ if(psRGXGetLastRenderContextResetReasonOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXGetLastRenderContextResetReason_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXGetLastRenderContextResetReasonOUT->eError =
+ PVRSRVRGXGetLastRenderContextResetReasonKM(
+ psRenderContextInt,
+ &psRGXGetLastRenderContextResetReasonOUT->ui32LastResetReason,
+ &psRGXGetLastRenderContextResetReasonOUT->ui32LastResetJobRef);
+
+
+
+
+RGXGetLastRenderContextResetReason_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psRenderContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXGetPartialRenderCount(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT *psRGXGetPartialRenderCountIN,
+ PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT *psRGXGetPartialRenderCountOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hHWRTDataMemDesc = psRGXGetPartialRenderCountIN->hHWRTDataMemDesc;
+ DEVMEM_MEMDESC * psHWRTDataMemDescInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXGetPartialRenderCountOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psHWRTDataMemDescInt,
+ hHWRTDataMemDesc,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+ IMG_TRUE);
+ if(psRGXGetPartialRenderCountOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXGetPartialRenderCount_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXGetPartialRenderCountOUT->eError =
+ PVRSRVRGXGetPartialRenderCountKM(
+ psHWRTDataMemDescInt,
+ &psRGXGetPartialRenderCountOUT->ui32NumPartialRenders);
+
+
+
+
+RGXGetPartialRenderCount_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psHWRTDataMemDescInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hHWRTDataMemDesc,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+PVRSRV_ERROR DeinitRGXTA3DBridge(void);
+
+/*
+ * Register all RGXTA3D functions with services
+ */
+PVRSRV_ERROR InitRGXTA3DBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA, PVRSRVBridgeRGXCreateHWRTData,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA, PVRSRVBridgeRGXDestroyHWRTData,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET, PVRSRVBridgeRGXCreateRenderTarget,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET, PVRSRVBridgeRGXDestroyRenderTarget,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER, PVRSRVBridgeRGXCreateZSBuffer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER, PVRSRVBridgeRGXDestroyZSBuffer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER, PVRSRVBridgeRGXPopulateZSBuffer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER, PVRSRVBridgeRGXUnpopulateZSBuffer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, PVRSRVBridgeRGXCreateFreeList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST, PVRSRVBridgeRGXDestroyFreeList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXADDBLOCKTOFREELIST, PVRSRVBridgeRGXAddBlockToFreeList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXREMOVEBLOCKFROMFREELIST, PVRSRVBridgeRGXRemoveBlockFromFreeList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, PVRSRVBridgeRGXCreateRenderContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT, PVRSRVBridgeRGXDestroyRenderContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D, PVRSRVBridgeRGXKickTA3D,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY, PVRSRVBridgeRGXSetRenderContextPriority,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON, PVRSRVBridgeRGXGetLastRenderContextResetReason,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT, PVRSRVBridgeRGXGetPartialRenderCount,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxta3d functions with services
+ */
+PVRSRV_ERROR DeinitRGXTA3DBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for rgxtq2
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxtq2
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXTQ2_BRIDGE_H
+#define COMMON_RGXTQ2_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+
+
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4)
+
+
+/*******************************************
+ RGXTDMCreateTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+ IMG_UINT32 ui32Priority;
+ IMG_DEV_VIRTADDR sMCUFenceAddr;
+ IMG_UINT32 ui32FrameworkCmdize;
+ IMG_BYTE * psFrameworkCmd;
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+ IMG_HANDLE hTransferContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT;
+
+
+/*******************************************
+ RGXTDMDestroyTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+ IMG_HANDLE hTransferContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT;
+
+
+/*******************************************
+ RGXTDMSubmitTransfer
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientFenceCount;
+ IMG_HANDLE * phFenceUFOSyncPrimBlock;
+ IMG_UINT32 * pui32FenceSyncOffset;
+ IMG_UINT32 * pui32FenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_HANDLE * phUpdateUFOSyncPrimBlock;
+ IMG_UINT32 * pui32UpdateSyncOffset;
+ IMG_UINT32 * pui32UpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerSyncFlags;
+ IMG_HANDLE * phServerSync;
+ IMG_INT32 i32CheckFenceFD;
+ IMG_INT32 i32UpdateTimelineFD;
+ IMG_CHAR * puiUpdateFenceName;
+ IMG_UINT32 ui32CommandSize;
+ IMG_UINT8 * pui8FWCommand;
+ IMG_UINT32 ui32ExternalJobReference;
+ IMG_UINT32 ui32SyncPMRCount;
+ IMG_UINT32 * pui32SyncPMRFlags;
+ IMG_HANDLE * phSyncPMRs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER;
+
+/* Bridge out structure for RGXTDMSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER_TAG
+{
+ IMG_INT32 i32UpdateFenceFD;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER;
+
+
+/*******************************************
+ RGXTDMSetTransferContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+
+/*******************************************
+ RGXTDMNotifyWriteOffsetUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+
+#endif /* COMMON_RGXTQ2_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for rgxtq2
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxtq2
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtdmtransfer.h"
+
+
+#include "common_rgxtq2_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextIN,
+ PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_BYTE *psFrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXTDMCreateTransferContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXTDMCreateTransferContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ }
+ }
+
+ if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize != 0)
+ {
+ psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXTDMCreateTransferContextIN->psFrameworkCmd, psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXTDMCreateTransferContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXTDMCreateTransferContextOUT->eError =
+ PVRSRVRGXTDMCreateTransferContextKM(psConnection, OSGetDevData(psConnection),
+ psRGXTDMCreateTransferContextIN->ui32Priority,
+ psRGXTDMCreateTransferContextIN->sMCUFenceAddr,
+ psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize,
+ psFrameworkCmdInt,
+ hPrivDataInt,
+ &psTransferContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXTDMCreateTransferContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXTDMCreateTransferContextOUT->hTransferContext,
+ (void *) psTransferContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXTDMDestroyTransferContextKM);
+ if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMCreateTransferContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXTDMCreateTransferContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ if (psTransferContextInt)
+ {
+ PVRSRVRGXTDMDestroyTransferContextKM(psTransferContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextIN,
+ PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMDestroyTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMDestroyTransferContext_exit;
+ }
+ }
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXTDMDestroyTransferContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXTDMDestroyTransferContextIN->hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ if ((psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_OK) &&
+ (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXTDMDestroyTransferContext: %s",
+ PVRSRVGetErrorStringKM(psRGXTDMDestroyTransferContextOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXTDMDestroyTransferContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXTDMDestroyTransferContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSubmitTransfer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER *psRGXTDMSubmitTransferIN,
+ PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER *psRGXTDMSubmitTransferOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hTransferContext = psRGXTDMSubmitTransferIN->hTransferContext;
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32FenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32FenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32UpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncInt = NULL;
+ IMG_HANDLE *hServerSyncInt2 = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_UINT8 *ui8FWCommandInt = NULL;
+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+ PMR * *psSyncPMRsInt = NULL;
+ IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ (32 * sizeof(IMG_CHAR)) +
+ (psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8)) +
+ (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *)) +
+ (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXTDMSubmitTransferIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXTDMSubmitTransferIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ }
+
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+ {
+ psFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hFenceUFOSyncPrimBlockInt2, psRGXTDMSubmitTransferIN->phFenceUFOSyncPrimBlock, psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+ {
+ ui32FenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FenceSyncOffsetInt, psRGXTDMSubmitTransferIN->pui32FenceSyncOffset, psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+ {
+ ui32FenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FenceValueInt, psRGXTDMSubmitTransferIN->pui32FenceValue, psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+ {
+ psUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hUpdateUFOSyncPrimBlockInt2, psRGXTDMSubmitTransferIN->phUpdateUFOSyncPrimBlock, psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32UpdateSyncOffsetInt, psRGXTDMSubmitTransferIN->pui32UpdateSyncOffset, psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32UpdateValueInt, psRGXTDMSubmitTransferIN->pui32UpdateValue, psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXTDMSubmitTransferIN->pui32ServerSyncFlags, psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncInt2, psRGXTDMSubmitTransferIN->phServerSync, psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 32 * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (32 * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXTDMSubmitTransferIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32CommandSize != 0)
+ {
+ ui8FWCommandInt = (IMG_UINT8*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui8FWCommandInt, psRGXTDMSubmitTransferIN->pui8FWCommand, psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount != 0)
+ {
+ ui32SyncPMRFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32SyncPMRFlagsInt, psRGXTDMSubmitTransferIN->pui32SyncPMRFlags, psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount != 0)
+ {
+ psSyncPMRsInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *);
+ hSyncPMRsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hSyncPMRsInt2, psRGXTDMSubmitTransferIN->phSyncPMRs, psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ IMG_TRUE);
+ if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientFenceCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psFenceUFOSyncPrimBlockInt[i],
+ hFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientUpdateCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psUpdateUFOSyncPrimBlockInt[i],
+ hUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncInt[i],
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32SyncPMRCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncPMRsInt[i],
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVRGXTDMSubmitTransferKM(
+ psTransferContextInt,
+ psRGXTDMSubmitTransferIN->ui32PDumpFlags,
+ psRGXTDMSubmitTransferIN->ui32ClientCacheOpSeqNum,
+ psRGXTDMSubmitTransferIN->ui32ClientFenceCount,
+ psFenceUFOSyncPrimBlockInt,
+ ui32FenceSyncOffsetInt,
+ ui32FenceValueInt,
+ psRGXTDMSubmitTransferIN->ui32ClientUpdateCount,
+ psUpdateUFOSyncPrimBlockInt,
+ ui32UpdateSyncOffsetInt,
+ ui32UpdateValueInt,
+ psRGXTDMSubmitTransferIN->ui32ServerSyncCount,
+ ui32ServerSyncFlagsInt,
+ psServerSyncInt,
+ psRGXTDMSubmitTransferIN->i32CheckFenceFD,
+ psRGXTDMSubmitTransferIN->i32UpdateTimelineFD,
+ &psRGXTDMSubmitTransferOUT->i32UpdateFenceFD,
+ uiUpdateFenceNameInt,
+ psRGXTDMSubmitTransferIN->ui32CommandSize,
+ ui8FWCommandInt,
+ psRGXTDMSubmitTransferIN->ui32ExternalJobReference,
+ psRGXTDMSubmitTransferIN->ui32SyncPMRCount,
+ ui32SyncPMRFlagsInt,
+ psSyncPMRsInt);
+
+
+
+
+RGXTDMSubmitTransfer_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientFenceCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psFenceUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientUpdateCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psUpdateUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerSyncInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32SyncPMRCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psSyncPMRsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPriorityIN->hTransferContext;
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMSetTransferContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMSetTransferContextPriority_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSetTransferContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ IMG_TRUE);
+ if(psRGXTDMSetTransferContextPriorityOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSetTransferContextPriority_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXTDMSetTransferContextPriorityOUT->eError =
+ PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection, OSGetDevData(psConnection),
+ psTransferContextInt,
+ psRGXTDMSetTransferContextPriorityIN->ui32Priority);
+
+
+
+
+RGXTDMSetTransferContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateIN,
+ PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hTransferContext = psRGXTDMNotifyWriteOffsetUpdateIN->hTransferContext;
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMNotifyWriteOffsetUpdate_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ IMG_TRUE);
+ if(psRGXTDMNotifyWriteOffsetUpdateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMNotifyWriteOffsetUpdate_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+ PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+ psTransferContextInt,
+ psRGXTDMNotifyWriteOffsetUpdateIN->ui32PDumpFlags);
+
+
+
+
+RGXTDMNotifyWriteOffsetUpdate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTQ2Bridge(void);
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void);
+
+/*
+ * Register all RGXTQ2 functions with services
+ */
+PVRSRV_ERROR InitRGXTQ2Bridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT, PVRSRVBridgeRGXTDMCreateTransferContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT, PVRSRVBridgeRGXTDMDestroyTransferContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER, PVRSRVBridgeRGXTDMSubmitTransfer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY, PVRSRVBridgeRGXTDMSetTransferContextPriority,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE, PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq2 functions with services
+ */
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for rgxtq
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxtq
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXTQ_BRIDGE_H
+#define COMMON_RGXTQ_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+
+
+#define PVRSRV_BRIDGE_RGXTQ_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST (PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3)
+
+
+/*******************************************
+ RGXCreateTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT_TAG
+{
+ IMG_UINT32 ui32Priority;
+ IMG_DEV_VIRTADDR sMCUFenceAddr;
+ IMG_UINT32 ui32FrameworkCmdize;
+ IMG_BYTE * psFrameworkCmd;
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT;
+
+/* Bridge out structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG
+{
+ IMG_HANDLE hTransferContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT;
+
+
+/*******************************************
+ RGXDestroyTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+ IMG_HANDLE hTransferContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT;
+
+/* Bridge out structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT;
+
+
+/*******************************************
+ RGXSubmitTransfer
+ *******************************************/
+
+/* Bridge in structure for RGXSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32PrepareCount;
+ IMG_UINT32 * pui32ClientFenceCount;
+ IMG_HANDLE* * phFenceUFOSyncPrimBlock;
+ IMG_UINT32* * pui32FenceSyncOffset;
+ IMG_UINT32* * pui32FenceValue;
+ IMG_UINT32 * pui32ClientUpdateCount;
+ IMG_HANDLE* * phUpdateUFOSyncPrimBlock;
+ IMG_UINT32* * pui32UpdateSyncOffset;
+ IMG_UINT32* * pui32UpdateValue;
+ IMG_UINT32 * pui32ServerSyncCount;
+ IMG_UINT32* * pui32ServerSyncFlags;
+ IMG_HANDLE* * phServerSync;
+ IMG_INT32 i32CheckFenceFD;
+ IMG_INT32 i32UpdateTimelineFD;
+ IMG_CHAR * puiUpdateFenceName;
+ IMG_UINT32 * pui32CommandSize;
+ IMG_UINT8* * pui8FWCommand;
+ IMG_UINT32 * pui32TQPrepareFlags;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_UINT32 ui32SyncPMRCount;
+ IMG_UINT32 * pui32SyncPMRFlags;
+ IMG_HANDLE * phSyncPMRs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER;
+
+/* Bridge out structure for RGXSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER_TAG
+{
+ IMG_INT32 i32UpdateFenceFD;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER;
+
+
+/*******************************************
+ RGXSetTransferContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY;
+
+
+#endif /* COMMON_RGXTQ_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for rgxtq
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxtq
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtransfer.h"
+
+
+#include "common_rgxtq_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_BYTE *psFrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXCreateTransferContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_TQ_CONTEXT * psTransferContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateTransferContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateTransferContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateTransferContext_exit;
+ }
+ }
+ }
+
+ if (psRGXCreateTransferContextIN->ui32FrameworkCmdize != 0)
+ {
+ psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateTransferContextIN->psFrameworkCmd, psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateTransferContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXCreateTransferContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateTransferContext_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateTransferContextOUT->eError =
+ PVRSRVRGXCreateTransferContextKM(psConnection, OSGetDevData(psConnection),
+ psRGXCreateTransferContextIN->ui32Priority,
+ psRGXCreateTransferContextIN->sMCUFenceAddr,
+ psRGXCreateTransferContextIN->ui32FrameworkCmdize,
+ psFrameworkCmdInt,
+ hPrivDataInt,
+ &psTransferContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateTransferContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateTransferContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateTransferContextOUT->hTransferContext,
+ (void *) psTransferContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyTransferContextKM);
+ if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateTransferContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateTransferContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ if (psTransferContextInt)
+ {
+ PVRSRVRGXDestroyTransferContextKM(psTransferContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyTransferContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyTransferContextIN->hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+ if ((psRGXDestroyTransferContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyTransferContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyTransferContextOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RGXDestroyTransferContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyTransferContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSubmitTransfer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER *psRGXSubmitTransferIN,
+ PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER *psRGXSubmitTransferOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hTransferContext = psRGXSubmitTransferIN->hTransferContext;
+ RGX_SERVER_TQ_CONTEXT * psTransferContextInt = NULL;
+ IMG_UINT32 *ui32ClientFenceCountInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * **psFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE **hFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 **ui32FenceSyncOffsetInt = NULL;
+ IMG_UINT32 **ui32FenceValueInt = NULL;
+ IMG_UINT32 *ui32ClientUpdateCountInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * **psUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE **hUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 **ui32UpdateSyncOffsetInt = NULL;
+ IMG_UINT32 **ui32UpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncCountInt = NULL;
+ IMG_UINT32 **ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * **psServerSyncInt = NULL;
+ IMG_HANDLE **hServerSyncInt2 = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_UINT32 *ui32CommandSizeInt = NULL;
+ IMG_UINT8 **ui8FWCommandInt = NULL;
+ IMG_UINT32 *ui32TQPrepareFlagsInt = NULL;
+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+ PMR * *psSyncPMRsInt = NULL;
+ IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BYTE *pArrayArgsBuffer2 = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+ (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+ (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+ (32 * sizeof(IMG_CHAR)) +
+ (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+ (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+ (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+ (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *)) +
+ (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) +
+ 0;
+ IMG_UINT32 ui32BufferSize2 = 0;
+ IMG_UINT32 ui32NextOffset2 = 0;
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SERVER_SYNC_PRIMITIVE **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT8*);
+ }
+
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXSubmitTransferIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXSubmitTransferIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ ui32ClientFenceCountInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceCountInt, psRGXSubmitTransferIN->pui32ClientFenceCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning psFenceUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */
+ psFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK ***)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+ /* Assigning hFenceUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */
+ hFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui32FenceSyncOffsetInt to the right offset in the pool buffer for first dimension */
+ ui32FenceSyncOffsetInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui32FenceValueInt to the right offset in the pool buffer for first dimension */
+ ui32FenceValueInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ ui32ClientUpdateCountInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateCountInt, psRGXSubmitTransferIN->pui32ClientUpdateCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */
+ psUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK ***)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+ /* Assigning hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */
+ hUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui32UpdateSyncOffsetInt to the right offset in the pool buffer for first dimension */
+ ui32UpdateSyncOffsetInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui32UpdateValueInt to the right offset in the pool buffer for first dimension */
+ ui32UpdateValueInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ ui32ServerSyncCountInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncCountInt, psRGXSubmitTransferIN->pui32ServerSyncCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui32ServerSyncFlagsInt to the right offset in the pool buffer for first dimension */
+ ui32ServerSyncFlagsInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning psServerSyncInt to the right offset in the pool buffer for first dimension */
+ psServerSyncInt = (SERVER_SYNC_PRIMITIVE ***)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SERVER_SYNC_PRIMITIVE **);
+ /* Assigning hServerSyncInt2 to the right offset in the pool buffer for first dimension */
+ hServerSyncInt2 = (IMG_HANDLE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE);
+ }
+
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 32 * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (32 * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXSubmitTransferIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ ui32CommandSizeInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32CommandSizeInt, psRGXSubmitTransferIN->pui32CommandSize, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui8FWCommandInt to the right offset in the pool buffer for first dimension */
+ ui8FWCommandInt = (IMG_UINT8**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT8*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ ui32TQPrepareFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32TQPrepareFlagsInt, psRGXSubmitTransferIN->pui32TQPrepareFlags, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32SyncPMRCount != 0)
+ {
+ ui32SyncPMRFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32SyncPMRFlagsInt, psRGXSubmitTransferIN->pui32SyncPMRFlags, psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32SyncPMRCount != 0)
+ {
+ psSyncPMRsInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *);
+ hSyncPMRsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hSyncPMRsInt2, psRGXSubmitTransferIN->phSyncPMRs, psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+ {
+ ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE *);
+ ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+ ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+ ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *);
+ ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+ ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+ ui32BufferSize2 += ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32);
+ ui32BufferSize2 += ui32ServerSyncCountInt[i] * sizeof(SERVER_SYNC_PRIMITIVE *);
+ ui32BufferSize2 += ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE *);
+ ui32BufferSize2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+ }
+ }
+
+ if (ui32BufferSize2 != 0)
+ {
+ pArrayArgsBuffer2 = OSAllocMemNoStats(ui32BufferSize2);
+
+ if(!pArrayArgsBuffer2)
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each psFenceUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */
+ psFenceUFOSyncPrimBlockInt[i] = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ /* Assigning each hFenceUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */
+ hFenceUFOSyncPrimBlockInt2[i] = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui32FenceSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */
+ ui32FenceSyncOffsetInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui32FenceValueInt to the right offset in the pool buffer (this is the second dimension) */
+ ui32FenceValueInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */
+ psUpdateUFOSyncPrimBlockInt[i] = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ /* Assigning each hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */
+ hUpdateUFOSyncPrimBlockInt2[i] = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui32UpdateSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */
+ ui32UpdateSyncOffsetInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui32UpdateValueInt to the right offset in the pool buffer (this is the second dimension) */
+ ui32UpdateValueInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui32ServerSyncFlagsInt to the right offset in the pool buffer (this is the second dimension) */
+ ui32ServerSyncFlagsInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each psServerSyncInt to the right offset in the pool buffer (this is the second dimension) */
+ psServerSyncInt[i] = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ServerSyncCountInt[i] * sizeof(SERVER_SYNC_PRIMITIVE *);
+ /* Assigning each hServerSyncInt2 to the right offset in the pool buffer (this is the second dimension) */
+ hServerSyncInt2[i] = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui8FWCommandInt to the right offset in the pool buffer (this is the second dimension) */
+ ui8FWCommandInt[i] = (IMG_UINT8*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_HANDLE **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->phFenceUFOSyncPrimBlock[i],
+ sizeof(IMG_HANDLE **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (hFenceUFOSyncPrimBlockInt2[i]), psPtr, (ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32FenceSyncOffset[i],
+ sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui32FenceSyncOffsetInt[i]), psPtr, (ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32FenceValue[i],
+ sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui32FenceValueInt[i]), psPtr, (ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_HANDLE **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->phUpdateUFOSyncPrimBlock[i],
+ sizeof(IMG_HANDLE **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (hUpdateUFOSyncPrimBlockInt2[i]), psPtr, (ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32UpdateSyncOffset[i],
+ sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui32UpdateSyncOffsetInt[i]), psPtr, (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32UpdateValue[i],
+ sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui32UpdateValueInt[i]), psPtr, (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32ServerSyncFlags[i],
+ sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui32ServerSyncFlagsInt[i]), psPtr, (ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_HANDLE **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->phServerSync[i],
+ sizeof(IMG_HANDLE **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (hServerSyncInt2[i]), psPtr, (ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT8 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui8FWCommand[i],
+ sizeof(IMG_UINT8 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32CommandSizeInt[i] * sizeof(IMG_UINT8)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui8FWCommandInt[i]), psPtr, (ui32CommandSizeInt[i] * sizeof(IMG_UINT8))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+ IMG_TRUE);
+ if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ClientFenceCountInt[i];j++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psFenceUFOSyncPrimBlockInt[i][j],
+ hFenceUFOSyncPrimBlockInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ClientUpdateCountInt[i];j++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psUpdateUFOSyncPrimBlockInt[i][j],
+ hUpdateUFOSyncPrimBlockInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ServerSyncCountInt[i];j++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncInt[i][j],
+ hServerSyncInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32SyncPMRCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncPMRsInt[i],
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVRGXSubmitTransferKM(
+ psTransferContextInt,
+ psRGXSubmitTransferIN->ui32ClientCacheOpSeqNum,
+ psRGXSubmitTransferIN->ui32PrepareCount,
+ ui32ClientFenceCountInt,
+ psFenceUFOSyncPrimBlockInt,
+ ui32FenceSyncOffsetInt,
+ ui32FenceValueInt,
+ ui32ClientUpdateCountInt,
+ psUpdateUFOSyncPrimBlockInt,
+ ui32UpdateSyncOffsetInt,
+ ui32UpdateValueInt,
+ ui32ServerSyncCountInt,
+ ui32ServerSyncFlagsInt,
+ psServerSyncInt,
+ psRGXSubmitTransferIN->i32CheckFenceFD,
+ psRGXSubmitTransferIN->i32UpdateTimelineFD,
+ &psRGXSubmitTransferOUT->i32UpdateFenceFD,
+ uiUpdateFenceNameInt,
+ ui32CommandSizeInt,
+ ui8FWCommandInt,
+ ui32TQPrepareFlagsInt,
+ psRGXSubmitTransferIN->ui32ExtJobRef,
+ psRGXSubmitTransferIN->ui32SyncPMRCount,
+ ui32SyncPMRFlagsInt,
+ psSyncPMRsInt);
+
+
+
+
+RGXSubmitTransfer_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ClientFenceCountInt[i];j++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psFenceUFOSyncPrimBlockInt[i][j])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hFenceUFOSyncPrimBlockInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ClientUpdateCountInt[i];j++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psUpdateUFOSyncPrimBlockInt[i][j])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hUpdateUFOSyncPrimBlockInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ServerSyncCountInt[i];j++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerSyncInt[i][j])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32SyncPMRCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psSyncPMRsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize2 == ui32NextOffset2);
+
+ if(pArrayArgsBuffer2)
+ OSFreeMemNoStats(pArrayArgsBuffer2);
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hTransferContext = psRGXSetTransferContextPriorityIN->hTransferContext;
+ RGX_SERVER_TQ_CONTEXT * psTransferContextInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRGXSetTransferContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+ IMG_TRUE);
+ if(psRGXSetTransferContextPriorityOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSetTransferContextPriority_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSetTransferContextPriorityOUT->eError =
+ PVRSRVRGXSetTransferContextPriorityKM(psConnection, OSGetDevData(psConnection),
+ psTransferContextInt,
+ psRGXSetTransferContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetTransferContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTQBridge(void);
+PVRSRV_ERROR DeinitRGXTQBridge(void);
+
+/*
+ * Register all RGXTQ functions with services
+ */
+PVRSRV_ERROR InitRGXTQBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT, PVRSRVBridgeRGXCreateTransferContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT, PVRSRVBridgeRGXDestroyTransferContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER, PVRSRVBridgeRGXSubmitTransfer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY, PVRSRVBridgeRGXSetTransferContextPriority,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq functions with services
+ */
+PVRSRV_ERROR DeinitRGXTQBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for ri
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_RI_BRIDGE_H
+#define CLIENT_RI_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_ri_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle,
+ IMG_UINT32 ui32TextASize,
+ const IMG_CHAR *puiTextA,
+ IMG_UINT64 ui64LogicalSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *puiTextB,
+ IMG_UINT64 ui64Offset,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64BackedSize,
+ IMG_BOOL bIsImport,
+ IMG_BOOL bIsExportable,
+ IMG_HANDLE *phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *puiTextB,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64BackedSize,
+ IMG_UINT64 ui64DevVAddr,
+ IMG_HANDLE *phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle,
+ IMG_DEV_VIRTADDR sAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCPinning(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle,
+ IMG_BOOL bIsPinned);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCBacking(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle,
+ IMG_INT32 i32NumModified);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge,
+ IMG_PID ui32Pid);
+
+
+#endif /* CLIENT_RI_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_ri_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "ri_typedefs.h"
+
+#include "ri_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle,
+ IMG_UINT32 ui32TextASize,
+ const IMG_CHAR *puiTextA,
+ IMG_UINT64 ui64LogicalSize)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRHandleInt = (PMR *) hPMRHandle;
+
+ eError =
+ RIWritePMREntryKM(
+ psPMRHandleInt,
+ ui32TextASize,
+ puiTextA,
+ ui64LogicalSize);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *puiTextB,
+ IMG_UINT64 ui64Offset,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64BackedSize,
+ IMG_BOOL bIsImport,
+ IMG_BOOL bIsExportable,
+ IMG_HANDLE *phRIHandle)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRHandleInt;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRHandleInt = (PMR *) hPMRHandle;
+
+ eError =
+ RIWriteMEMDESCEntryKM(
+ psPMRHandleInt,
+ ui32TextBSize,
+ puiTextB,
+ ui64Offset,
+ ui64Size,
+ ui64BackedSize,
+ bIsImport,
+ bIsExportable,
+ &psRIHandleInt);
+
+ *phRIHandle = psRIHandleInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *puiTextB,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64BackedSize,
+ IMG_UINT64 ui64DevVAddr,
+ IMG_HANDLE *phRIHandle)
+{
+ PVRSRV_ERROR eError;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ RIWriteProcListEntryKM(
+ ui32TextBSize,
+ puiTextB,
+ ui64Size,
+ ui64BackedSize,
+ ui64DevVAddr,
+ &psRIHandleInt);
+
+ *phRIHandle = psRIHandleInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle,
+ IMG_DEV_VIRTADDR sAddr)
+{
+ PVRSRV_ERROR eError;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+ eError =
+ RIUpdateMEMDESCAddrKM(
+ psRIHandleInt,
+ sAddr);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCPinning(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle,
+ IMG_BOOL bIsPinned)
+{
+ PVRSRV_ERROR eError;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+ eError =
+ RIUpdateMEMDESCPinningKM(
+ psRIHandleInt,
+ bIsPinned);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCBacking(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle,
+ IMG_INT32 i32NumModified)
+{
+ PVRSRV_ERROR eError;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+ eError =
+ RIUpdateMEMDESCBackingKM(
+ psRIHandleInt,
+ i32NumModified);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle)
+{
+ PVRSRV_ERROR eError;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+ eError =
+ RIDeleteMEMDESCEntryKM(
+ psRIHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRHandleInt = (PMR *) hPMRHandle;
+
+ eError =
+ RIDumpListKM(
+ psPMRHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ RIDumpAllKM(
+ );
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge,
+ IMG_PID ui32Pid)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ RIDumpProcessKM(
+ ui32Pid);
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for ri
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RI_BRIDGE_H
+#define COMMON_RI_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "ri_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_RI_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR PVRSRV_BRIDGE_RI_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCPINNING PVRSRV_BRIDGE_RI_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCBACKING PVRSRV_BRIDGE_RI_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RI_RIDUMPLIST PVRSRV_BRIDGE_RI_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RI_RIDUMPALL PVRSRV_BRIDGE_RI_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS PVRSRV_BRIDGE_RI_CMD_FIRST+9
+#define PVRSRV_BRIDGE_RI_CMD_LAST (PVRSRV_BRIDGE_RI_CMD_FIRST+9)
+
+
+/*******************************************
+ RIWritePMREntry
+ *******************************************/
+
+/* Bridge in structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG
+{
+ IMG_HANDLE hPMRHandle;
+ IMG_UINT32 ui32TextASize;
+ const IMG_CHAR * puiTextA;
+ IMG_UINT64 ui64LogicalSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY;
+
+/* Bridge out structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY;
+
+
+/*******************************************
+ RIWriteMEMDESCEntry
+ *******************************************/
+
+/* Bridge in structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG
+{
+ IMG_HANDLE hPMRHandle;
+ IMG_UINT32 ui32TextBSize;
+ const IMG_CHAR * puiTextB;
+ IMG_UINT64 ui64Offset;
+ IMG_UINT64 ui64Size;
+ IMG_UINT64 ui64BackedSize;
+ IMG_BOOL bIsImport;
+ IMG_BOOL bIsExportable;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY;
+
+/* Bridge out structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG
+{
+ IMG_HANDLE hRIHandle;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY;
+
+
+/*******************************************
+ RIWriteProcListEntry
+ *******************************************/
+
+/* Bridge in structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY_TAG
+{
+ IMG_UINT32 ui32TextBSize;
+ const IMG_CHAR * puiTextB;
+ IMG_UINT64 ui64Size;
+ IMG_UINT64 ui64BackedSize;
+ IMG_UINT64 ui64DevVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY;
+
+/* Bridge out structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY_TAG
+{
+ IMG_HANDLE hRIHandle;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY;
+
+
+/*******************************************
+ RIUpdateMEMDESCAddr
+ *******************************************/
+
+/* Bridge in structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG
+{
+ IMG_HANDLE hRIHandle;
+ IMG_DEV_VIRTADDR sAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR;
+
+/* Bridge out structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR;
+
+
+/*******************************************
+ RIUpdateMEMDESCPinning
+ *******************************************/
+
+/* Bridge in structure for RIUpdateMEMDESCPinning */
+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCPINNING_TAG
+{
+ IMG_HANDLE hRIHandle;
+ IMG_BOOL bIsPinned;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCPINNING;
+
+/* Bridge out structure for RIUpdateMEMDESCPinning */
+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCPINNING_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCPINNING;
+
+
+/*******************************************
+ RIUpdateMEMDESCBacking
+ *******************************************/
+
+/* Bridge in structure for RIUpdateMEMDESCBacking */
+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCBACKING_TAG
+{
+ IMG_HANDLE hRIHandle;
+ IMG_INT32 i32NumModified;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCBACKING;
+
+/* Bridge out structure for RIUpdateMEMDESCBacking */
+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCBACKING_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCBACKING;
+
+
+/*******************************************
+ RIDeleteMEMDESCEntry
+ *******************************************/
+
+/* Bridge in structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG
+{
+ IMG_HANDLE hRIHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY;
+
+/* Bridge out structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY;
+
+
+/*******************************************
+ RIDumpList
+ *******************************************/
+
+/* Bridge in structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG
+{
+ IMG_HANDLE hPMRHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPLIST;
+
+/* Bridge out structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPLIST;
+
+
+/*******************************************
+ RIDumpAll
+ *******************************************/
+
+/* Bridge in structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPALL;
+
+/* Bridge out structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPALL;
+
+
+/*******************************************
+ RIDumpProcess
+ *******************************************/
+
+/* Bridge in structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG
+{
+ IMG_PID ui32Pid;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPPROCESS;
+
+/* Bridge out structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPPROCESS;
+
+
+#endif /* COMMON_RI_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for ri
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "ri_server.h"
+
+
+#include "common_ri_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *psRIWritePMREntryIN,
+ PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *psRIWritePMREntryOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMRHandle = psRIWritePMREntryIN->hPMRHandle;
+ PMR * psPMRHandleInt = NULL;
+ IMG_CHAR *uiTextAInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRIWritePMREntryIN->ui32TextASize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWritePMREntryIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRIWritePMREntryIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRIWritePMREntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RIWritePMREntry_exit;
+ }
+ }
+ }
+
+ if (psRIWritePMREntryIN->ui32TextASize != 0)
+ {
+ uiTextAInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRIWritePMREntryIN->ui32TextASize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psRIWritePMREntryIN->ui32TextASize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextAInt, psRIWritePMREntryIN->puiTextA, psRIWritePMREntryIN->ui32TextASize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRIWritePMREntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RIWritePMREntry_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRIWritePMREntryOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRHandleInt,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRIWritePMREntryOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIWritePMREntry_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRIWritePMREntryOUT->eError =
+ RIWritePMREntryKM(
+ psPMRHandleInt,
+ psRIWritePMREntryIN->ui32TextASize,
+ uiTextAInt,
+ psRIWritePMREntryIN->ui64LogicalSize);
+
+
+
+
+RIWritePMREntry_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryIN,
+ PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMRHandle = psRIWriteMEMDESCEntryIN->hPMRHandle;
+ PMR * psPMRHandleInt = NULL;
+ IMG_CHAR *uiTextBInt = NULL;
+ RI_HANDLE psRIHandleInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRIWriteMEMDESCEntryIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RIWriteMEMDESCEntry_exit;
+ }
+ }
+ }
+
+ if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0)
+ {
+ uiTextBInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextBInt, psRIWriteMEMDESCEntryIN->puiTextB, psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RIWriteMEMDESCEntry_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRIWriteMEMDESCEntryOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRHandleInt,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIWriteMEMDESCEntry_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRIWriteMEMDESCEntryOUT->eError =
+ RIWriteMEMDESCEntryKM(
+ psPMRHandleInt,
+ psRIWriteMEMDESCEntryIN->ui32TextBSize,
+ uiTextBInt,
+ psRIWriteMEMDESCEntryIN->ui64Offset,
+ psRIWriteMEMDESCEntryIN->ui64Size,
+ psRIWriteMEMDESCEntryIN->ui64BackedSize,
+ psRIWriteMEMDESCEntryIN->bIsImport,
+ psRIWriteMEMDESCEntryIN->bIsExportable,
+ &psRIHandleInt);
+ /* Exit early if bridged call fails */
+ if(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+ {
+ goto RIWriteMEMDESCEntry_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRIWriteMEMDESCEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRIWriteMEMDESCEntryOUT->hRIHandle,
+ (void *) psRIHandleInt,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RIDeleteMEMDESCEntryKM);
+ if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIWriteMEMDESCEntry_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RIWriteMEMDESCEntry_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+ {
+ if (psRIHandleInt)
+ {
+ RIDeleteMEMDESCEntryKM(psRIHandleInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryIN,
+ PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiTextBInt = NULL;
+ RI_HANDLE psRIHandleInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRIWriteProcListEntryIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RIWriteProcListEntry_exit;
+ }
+ }
+ }
+
+ if (psRIWriteProcListEntryIN->ui32TextBSize != 0)
+ {
+ uiTextBInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextBInt, psRIWriteProcListEntryIN->puiTextB, psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RIWriteProcListEntry_exit;
+ }
+ }
+
+
+ psRIWriteProcListEntryOUT->eError =
+ RIWriteProcListEntryKM(
+ psRIWriteProcListEntryIN->ui32TextBSize,
+ uiTextBInt,
+ psRIWriteProcListEntryIN->ui64Size,
+ psRIWriteProcListEntryIN->ui64BackedSize,
+ psRIWriteProcListEntryIN->ui64DevVAddr,
+ &psRIHandleInt);
+ /* Exit early if bridged call fails */
+ if(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+ {
+ goto RIWriteProcListEntry_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRIWriteProcListEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRIWriteProcListEntryOUT->hRIHandle,
+ (void *) psRIHandleInt,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RIDeleteMEMDESCEntryKM);
+ if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIWriteProcListEntry_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RIWriteProcListEntry_exit:
+
+
+
+ if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+ {
+ if (psRIHandleInt)
+ {
+ RIDeleteMEMDESCEntryKM(psRIHandleInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrIN,
+ PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRIHandle = psRIUpdateMEMDESCAddrIN->hRIHandle;
+ RI_HANDLE psRIHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRIUpdateMEMDESCAddrOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRIHandleInt,
+ hRIHandle,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ IMG_TRUE);
+ if(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIUpdateMEMDESCAddr_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRIUpdateMEMDESCAddrOUT->eError =
+ RIUpdateMEMDESCAddrKM(
+ psRIHandleInt,
+ psRIUpdateMEMDESCAddrIN->sAddr);
+
+
+
+
+RIUpdateMEMDESCAddr_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psRIHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRIHandle,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIUpdateMEMDESCPinning(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCPINNING *psRIUpdateMEMDESCPinningIN,
+ PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCPINNING *psRIUpdateMEMDESCPinningOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRIHandle = psRIUpdateMEMDESCPinningIN->hRIHandle;
+ RI_HANDLE psRIHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRIUpdateMEMDESCPinningOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRIHandleInt,
+ hRIHandle,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ IMG_TRUE);
+ if(psRIUpdateMEMDESCPinningOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIUpdateMEMDESCPinning_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRIUpdateMEMDESCPinningOUT->eError =
+ RIUpdateMEMDESCPinningKM(
+ psRIHandleInt,
+ psRIUpdateMEMDESCPinningIN->bIsPinned);
+
+
+
+
+RIUpdateMEMDESCPinning_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psRIHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRIHandle,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIUpdateMEMDESCBacking(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCBACKING *psRIUpdateMEMDESCBackingIN,
+ PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCBACKING *psRIUpdateMEMDESCBackingOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRIHandle = psRIUpdateMEMDESCBackingIN->hRIHandle;
+ RI_HANDLE psRIHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRIUpdateMEMDESCBackingOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRIHandleInt,
+ hRIHandle,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ IMG_TRUE);
+ if(psRIUpdateMEMDESCBackingOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIUpdateMEMDESCBacking_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRIUpdateMEMDESCBackingOUT->eError =
+ RIUpdateMEMDESCBackingKM(
+ psRIHandleInt,
+ psRIUpdateMEMDESCBackingIN->i32NumModified);
+
+
+
+
+RIUpdateMEMDESCBacking_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psRIHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRIHandle,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryIN,
+ PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRIDeleteMEMDESCEntryOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRIDeleteMEMDESCEntryIN->hRIHandle,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE);
+ if ((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK) &&
+ (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRIDeleteMEMDESCEntry: %s",
+ PVRSRVGetErrorStringKM(psRIDeleteMEMDESCEntryOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto RIDeleteMEMDESCEntry_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RIDeleteMEMDESCEntry_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIDUMPLIST *psRIDumpListIN,
+ PVRSRV_BRIDGE_OUT_RIDUMPLIST *psRIDumpListOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMRHandle = psRIDumpListIN->hPMRHandle;
+ PMR * psPMRHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psRIDumpListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRHandleInt,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRIDumpListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIDumpList_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRIDumpListOUT->eError =
+ RIDumpListKM(
+ psPMRHandleInt);
+
+
+
+
+RIDumpList_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psPMRHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIDUMPALL *psRIDumpAllIN,
+ PVRSRV_BRIDGE_OUT_RIDUMPALL *psRIDumpAllOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN);
+
+
+
+
+
+ psRIDumpAllOUT->eError =
+ RIDumpAllKM(
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIDUMPPROCESS *psRIDumpProcessIN,
+ PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *psRIDumpProcessOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+ psRIDumpProcessOUT->eError =
+ RIDumpProcessKM(
+ psRIDumpProcessIN->ui32Pid);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRIBridge(void);
+PVRSRV_ERROR DeinitRIBridge(void);
+
+/*
+ * Register all RI functions with services
+ */
+PVRSRV_ERROR InitRIBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY, PVRSRVBridgeRIWritePMREntry,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY, PVRSRVBridgeRIWriteMEMDESCEntry,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY, PVRSRVBridgeRIWriteProcListEntry,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR, PVRSRVBridgeRIUpdateMEMDESCAddr,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCPINNING, PVRSRVBridgeRIUpdateMEMDESCPinning,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCBACKING, PVRSRVBridgeRIUpdateMEMDESCBacking,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY, PVRSRVBridgeRIDeleteMEMDESCEntry,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, PVRSRVBridgeRIDumpList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, PVRSRVBridgeRIDumpAll,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS, PVRSRVBridgeRIDumpProcess,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all ri functions with services
+ */
+PVRSRV_ERROR DeinitRIBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for srvcore
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for srvcore
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_SRVCORE_BRIDGE_H
+#define COMMON_SRVCORE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_device_types.h"
+#include "cache_ops.h"
+
+
+#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST 0
+#define PVRSRV_BRIDGE_SRVCORE_CONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SRVCORE_INITSRVDISCONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST (PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13)
+
+
+/*******************************************
+ Connect
+ *******************************************/
+
+/* Bridge in structure for Connect */
+typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG
+{
+ IMG_UINT32 ui32Flags;
+ IMG_UINT32 ui32ClientBuildOptions;
+ IMG_UINT32 ui32ClientDDKVersion;
+ IMG_UINT32 ui32ClientDDKBuild;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CONNECT;
+
+/* Bridge out structure for Connect */
+typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG
+{
+ IMG_UINT8 ui8KernelArch;
+ IMG_UINT32 ui32CapabilityFlags;
+ IMG_UINT32 ui32PVRBridges;
+ IMG_UINT32 ui32RGXBridges;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CONNECT;
+
+
+/*******************************************
+ Disconnect
+ *******************************************/
+
+/* Bridge in structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DISCONNECT;
+
+/* Bridge out structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DISCONNECT;
+
+
+/*******************************************
+ InitSrvDisconnect
+ *******************************************/
+
+/* Bridge in structure for InitSrvDisconnect */
+typedef struct PVRSRV_BRIDGE_IN_INITSRVDISCONNECT_TAG
+{
+ IMG_BOOL bInitSuccesful;
+ IMG_UINT32 ui32ClientBuildOptions;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_INITSRVDISCONNECT;
+
+/* Bridge out structure for InitSrvDisconnect */
+typedef struct PVRSRV_BRIDGE_OUT_INITSRVDISCONNECT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_INITSRVDISCONNECT;
+
+
+/*******************************************
+ AcquireGlobalEventObject
+ *******************************************/
+
+/* Bridge in structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT;
+
+/* Bridge out structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+ IMG_HANDLE hGlobalEventObject;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT;
+
+
+/*******************************************
+ ReleaseGlobalEventObject
+ *******************************************/
+
+/* Bridge in structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG
+{
+ IMG_HANDLE hGlobalEventObject;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT;
+
+/* Bridge out structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT;
+
+
+/*******************************************
+ EventObjectOpen
+ *******************************************/
+
+/* Bridge in structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG
+{
+ IMG_HANDLE hEventObject;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN;
+
+/* Bridge out structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG
+{
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN;
+
+
+/*******************************************
+ EventObjectWait
+ *******************************************/
+
+/* Bridge in structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG
+{
+ IMG_HANDLE hOSEventKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT;
+
+/* Bridge out structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT;
+
+
+/*******************************************
+ EventObjectClose
+ *******************************************/
+
+/* Bridge in structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG
+{
+ IMG_HANDLE hOSEventKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE;
+
+/* Bridge out structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE;
+
+
+/*******************************************
+ DumpDebugInfo
+ *******************************************/
+
+/* Bridge in structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG
+{
+ IMG_UINT32 ui32ui32VerbLevel;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DUMPDEBUGINFO;
+
+/* Bridge out structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO;
+
+
+/*******************************************
+ GetDevClockSpeed
+ *******************************************/
+
+/* Bridge in structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED;
+
+/* Bridge out structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG
+{
+ IMG_UINT32 ui32ui32ClockSpeed;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED;
+
+
+/*******************************************
+ HWOpTimeout
+ *******************************************/
+
+/* Bridge in structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HWOPTIMEOUT;
+
+/* Bridge out structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HWOPTIMEOUT;
+
+
+/*******************************************
+ AlignmentCheck
+ *******************************************/
+
+/* Bridge in structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_IN_ALIGNMENTCHECK_TAG
+{
+ IMG_UINT32 ui32AlignChecksSize;
+ IMG_UINT32 * pui32AlignChecks;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ALIGNMENTCHECK;
+
+/* Bridge out structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK;
+
+
+/*******************************************
+ GetDeviceStatus
+ *******************************************/
+
+/* Bridge in structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVICESTATUS_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_GETDEVICESTATUS;
+
+/* Bridge out structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVICESTATUS_TAG
+{
+ IMG_UINT32 ui32DeviceSatus;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_GETDEVICESTATUS;
+
+
+/*******************************************
+ EventObjectWaitTimeout
+ *******************************************/
+
+/* Bridge in structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT_TAG
+{
+ IMG_HANDLE hOSEventKM;
+ IMG_UINT64 ui64uiTimeoutus;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT;
+
+/* Bridge out structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT;
+
+
+#endif /* COMMON_SRVCORE_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for srvcore
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for srvcore
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "srvcore.h"
+
+
+#include "common_srvcore_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CONNECT *psConnectIN,
+ PVRSRV_BRIDGE_OUT_CONNECT *psConnectOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psConnectOUT->eError =
+ PVRSRVConnectKM(psConnection, OSGetDevData(psConnection),
+ psConnectIN->ui32Flags,
+ psConnectIN->ui32ClientBuildOptions,
+ psConnectIN->ui32ClientDDKVersion,
+ psConnectIN->ui32ClientDDKBuild,
+ &psConnectOUT->ui8KernelArch,
+ &psConnectOUT->ui32CapabilityFlags,
+ &psConnectOUT->ui32PVRBridges,
+ &psConnectOUT->ui32RGXBridges);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DISCONNECT *psDisconnectIN,
+ PVRSRV_BRIDGE_OUT_DISCONNECT *psDisconnectOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDisconnectIN);
+
+
+
+
+
+ psDisconnectOUT->eError =
+ PVRSRVDisconnectKM(
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeInitSrvDisconnect(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_INITSRVDISCONNECT *psInitSrvDisconnectIN,
+ PVRSRV_BRIDGE_OUT_INITSRVDISCONNECT *psInitSrvDisconnectOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psInitSrvDisconnectOUT->eError =
+ PVRSRVInitSrvDisconnectKM(psConnection, OSGetDevData(psConnection),
+ psInitSrvDisconnectIN->bInitSuccesful,
+ psInitSrvDisconnectIN->ui32ClientBuildOptions);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectIN,
+ PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hGlobalEventObjectInt = NULL;
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN);
+
+
+
+
+
+ psAcquireGlobalEventObjectOUT->eError =
+ PVRSRVAcquireGlobalEventObjectKM(
+ &hGlobalEventObjectInt);
+ /* Exit early if bridged call fails */
+ if(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+ {
+ goto AcquireGlobalEventObject_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psAcquireGlobalEventObjectOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psAcquireGlobalEventObjectOUT->hGlobalEventObject,
+ (void *) hGlobalEventObjectInt,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVReleaseGlobalEventObjectKM);
+ if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto AcquireGlobalEventObject_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+AcquireGlobalEventObject_exit:
+
+
+
+ if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+ {
+ if (hGlobalEventObjectInt)
+ {
+ PVRSRVReleaseGlobalEventObjectKM(hGlobalEventObjectInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectIN,
+ PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psReleaseGlobalEventObjectOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psReleaseGlobalEventObjectIN->hGlobalEventObject,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+ if ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK) &&
+ (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeReleaseGlobalEventObject: %s",
+ PVRSRVGetErrorStringKM(psReleaseGlobalEventObjectOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto ReleaseGlobalEventObject_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+ReleaseGlobalEventObject_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *psEventObjectOpenIN,
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *psEventObjectOpenOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hEventObject = psEventObjectOpenIN->hEventObject;
+ IMG_HANDLE hEventObjectInt = NULL;
+ IMG_HANDLE hOSEventInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psEventObjectOpenOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hEventObjectInt,
+ hEventObject,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+ IMG_TRUE);
+ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto EventObjectOpen_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psEventObjectOpenOUT->eError =
+ OSEventObjectOpen(
+ hEventObjectInt,
+ &hOSEventInt);
+ /* Exit early if bridged call fails */
+ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ goto EventObjectOpen_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psEventObjectOpenOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psEventObjectOpenOUT->hOSEvent,
+ (void *) hOSEventInt,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&OSEventObjectClose);
+ if (psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto EventObjectOpen_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+EventObjectOpen_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hEventObjectInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hEventObject,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ if (hOSEventInt)
+ {
+ OSEventObjectClose(hOSEventInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *psEventObjectWaitIN,
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *psEventObjectWaitOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hOSEventKM = psEventObjectWaitIN->hOSEventKM;
+ IMG_HANDLE hOSEventKMInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psEventObjectWaitOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hOSEventKMInt,
+ hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ IMG_TRUE);
+ if(psEventObjectWaitOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto EventObjectWait_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psEventObjectWaitOUT->eError =
+ OSEventObjectWait(
+ hOSEventKMInt);
+
+
+
+
+EventObjectWait_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hOSEventKMInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *psEventObjectCloseIN,
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *psEventObjectCloseOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psEventObjectCloseOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psEventObjectCloseIN->hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+ if ((psEventObjectCloseOUT->eError != PVRSRV_OK) &&
+ (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeEventObjectClose: %s",
+ PVRSRVGetErrorStringKM(psEventObjectCloseOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto EventObjectClose_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+EventObjectClose_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *psDumpDebugInfoIN,
+ PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *psDumpDebugInfoOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psDumpDebugInfoOUT->eError =
+ PVRSRVDumpDebugInfoKM(psConnection, OSGetDevData(psConnection),
+ psDumpDebugInfoIN->ui32ui32VerbLevel);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *psGetDevClockSpeedIN,
+ PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *psGetDevClockSpeedOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psGetDevClockSpeedIN);
+
+
+
+
+
+ psGetDevClockSpeedOUT->eError =
+ PVRSRVGetDevClockSpeedKM(psConnection, OSGetDevData(psConnection),
+ &psGetDevClockSpeedOUT->ui32ui32ClockSpeed);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HWOPTIMEOUT *psHWOpTimeoutIN,
+ PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *psHWOpTimeoutOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN);
+
+
+
+
+
+ psHWOpTimeoutOUT->eError =
+ PVRSRVHWOpTimeoutKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+static IMG_INT
+PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *psAlignmentCheckIN,
+ PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *psAlignmentCheckOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32AlignChecksInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psAlignmentCheckIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psAlignmentCheckIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto AlignmentCheck_exit;
+ }
+ }
+ }
+
+ if (psAlignmentCheckIN->ui32AlignChecksSize != 0)
+ {
+ ui32AlignChecksInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32AlignChecksInt, psAlignmentCheckIN->pui32AlignChecks, psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto AlignmentCheck_exit;
+ }
+ }
+
+
+ psAlignmentCheckOUT->eError =
+ PVRSRVAlignmentCheckKM(psConnection, OSGetDevData(psConnection),
+ psAlignmentCheckIN->ui32AlignChecksSize,
+ ui32AlignChecksInt);
+
+
+
+
+AlignmentCheck_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeAlignmentCheck NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeGetDeviceStatus(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_GETDEVICESTATUS *psGetDeviceStatusIN,
+ PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *psGetDeviceStatusOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psGetDeviceStatusIN);
+
+
+
+
+
+ psGetDeviceStatusOUT->eError =
+ PVRSRVGetDeviceStatusKM(psConnection, OSGetDevData(psConnection),
+ &psGetDeviceStatusOUT->ui32DeviceSatus);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectWaitTimeout(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutIN,
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hOSEventKM = psEventObjectWaitTimeoutIN->hOSEventKM;
+ IMG_HANDLE hOSEventKMInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psEventObjectWaitTimeoutOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hOSEventKMInt,
+ hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ IMG_TRUE);
+ if(psEventObjectWaitTimeoutOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto EventObjectWaitTimeout_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psEventObjectWaitTimeoutOUT->eError =
+ OSEventObjectWaitTimeout(
+ hOSEventKMInt,
+ psEventObjectWaitTimeoutIN->ui64uiTimeoutus);
+
+
+
+
+EventObjectWaitTimeout_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(hOSEventKMInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSRVCOREBridge(void);
+PVRSRV_ERROR DeinitSRVCOREBridge(void);
+
+/*
+ * Register all SRVCORE functions with services
+ */
+PVRSRV_ERROR InitSRVCOREBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT, PVRSRVBridgeConnect,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT, PVRSRVBridgeDisconnect,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_INITSRVDISCONNECT, PVRSRVBridgeInitSrvDisconnect,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT, PVRSRVBridgeAcquireGlobalEventObject,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT, PVRSRVBridgeReleaseGlobalEventObject,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN, PVRSRVBridgeEventObjectOpen,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT, PVRSRVBridgeEventObjectWait,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE, PVRSRVBridgeEventObjectClose,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO, PVRSRVBridgeDumpDebugInfo,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED, PVRSRVBridgeGetDevClockSpeed,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT, PVRSRVBridgeHWOpTimeout,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK, PVRSRVBridgeAlignmentCheck,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS, PVRSRVBridgeGetDeviceStatus,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT, PVRSRVBridgeEventObjectWaitTimeout,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all srvcore functions with services
+ */
+PVRSRV_ERROR DeinitSRVCOREBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for sync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_SYNC_BRIDGE_H
+#define CLIENT_SYNC_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_sync_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 *pui32SyncPrimBlockSize,
+ IMG_HANDLE *phhSyncPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Index,
+ IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncPrimSet(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncAlloc(IMG_HANDLE hBridge,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncFree(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncQueueHWOp(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_BOOL bbUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncGetStatus(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32SyncCount,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpCreate(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32SyncBlockCount,
+ IMG_HANDLE *phBlockList,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *pui32SyncBlockIndex,
+ IMG_UINT32 *pui32Index,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_HANDLE *phServerSync,
+ IMG_HANDLE *phServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpTake(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *pui32Flags,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *pui32ServerFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpReady(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ IMG_BOOL *pbReady);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpComplete(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpPDumpPol(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32FWAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32FWAddr);
+
+
+#endif /* CLIENT_SYNC_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_sync_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 *pui32SyncPrimBlockSize,
+ IMG_HANDLE *phhSyncPMR)
+{
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PMR * pshSyncPMRInt;
+
+
+ eError =
+ PVRSRVAllocSyncPrimitiveBlockKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ &psSyncHandleInt,
+ pui32SyncPrimVAddr,
+ pui32SyncPrimBlockSize,
+ &pshSyncPMRInt);
+
+ *phSyncHandle = psSyncHandleInt;
+ *phhSyncPMR = pshSyncPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle)
+{
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVFreeSyncPrimitiveBlockKM(
+ psSyncHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Index,
+ IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimSetKM(
+ psSyncHandleInt,
+ ui32Index,
+ ui32Value);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncPrimSet(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+ eError =
+ PVRSRVServerSyncPrimSetKM(
+ psSyncHandleInt,
+ ui32Value);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncAlloc(IMG_HANDLE hBridge,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+
+
+ eError =
+ PVRSRVServerSyncAllocKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ &psSyncHandleInt,
+ pui32SyncPrimVAddr,
+ ui32ClassNameSize,
+ puiClassName);
+
+ *phSyncHandle = psSyncHandleInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncFree(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+ eError =
+ PVRSRVServerSyncFreeKM(
+ psSyncHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncQueueHWOp(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_BOOL bbUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+ eError =
+ PVRSRVServerSyncQueueHWOpKM(
+ psSyncHandleInt,
+ bbUpdate,
+ pui32FenceValue,
+ pui32UpdateValue);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncGetStatus(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32SyncCount,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_PRIMITIVE * *psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SERVER_SYNC_PRIMITIVE **) phSyncHandle;
+
+ eError =
+ PVRSRVServerSyncGetStatusKM(
+ ui32SyncCount,
+ psSyncHandleInt,
+ pui32UID,
+ pui32FWAddr,
+ pui32CurrentOp,
+ pui32NextOp);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpCreate(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32SyncBlockCount,
+ IMG_HANDLE *phBlockList,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *pui32SyncBlockIndex,
+ IMG_UINT32 *pui32Index,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_HANDLE *phServerSync,
+ IMG_HANDLE *phServerCookie)
+{
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * *psBlockListInt;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncInt;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psBlockListInt = (SYNC_PRIMITIVE_BLOCK **) phBlockList;
+ psServerSyncInt = (SERVER_SYNC_PRIMITIVE **) phServerSync;
+
+ eError =
+ PVRSRVSyncPrimOpCreateKM(
+ ui32SyncBlockCount,
+ psBlockListInt,
+ ui32ClientSyncCount,
+ pui32SyncBlockIndex,
+ pui32Index,
+ ui32ServerSyncCount,
+ psServerSyncInt,
+ &psServerCookieInt);
+
+ *phServerCookie = psServerCookieInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpTake(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *pui32Flags,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *pui32ServerFlags)
+{
+ PVRSRV_ERROR eError;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+ eError =
+ PVRSRVSyncPrimOpTakeKM(
+ psServerCookieInt,
+ ui32ClientSyncCount,
+ pui32Flags,
+ pui32FenceValue,
+ pui32UpdateValue,
+ ui32ServerSyncCount,
+ pui32ServerFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpReady(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ IMG_BOOL *pbReady)
+{
+ PVRSRV_ERROR eError;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+ eError =
+ PVRSRVSyncPrimOpReadyKM(
+ psServerCookieInt,
+ pbReady);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpComplete(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie)
+{
+ PVRSRV_ERROR eError;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+ eError =
+ PVRSRVSyncPrimOpCompleteKM(
+ psServerCookieInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie)
+{
+ PVRSRV_ERROR eError;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+ eError =
+ PVRSRVSyncPrimOpDestroyKM(
+ psServerCookieInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimPDumpKM(
+ psSyncHandleInt,
+ ui32Offset);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimPDumpValueKM(
+ psSyncHandleInt,
+ ui32Offset,
+ ui32Value);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimPDumpPolKM(
+ psSyncHandleInt,
+ ui32Offset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ uiPDumpFlags);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpPDumpPol(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+ eError =
+ PVRSRVSyncPrimOpPDumpPolKM(
+ psServerCookieInt,
+ eOperator,
+ uiPDumpFlags);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hServerCookie);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimPDumpCBPKM(
+ psSyncHandleInt,
+ ui32Offset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+ PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+ PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32FWAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ PVRSRVSyncAllocEventKM(
+ bServerSync,
+ ui32FWAddr,
+ ui32ClassNameSize,
+ puiClassName);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32FWAddr)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ PVRSRVSyncFreeEventKM(
+ ui32FWAddr);
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for sync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_SYNC_BRIDGE_H
+#define COMMON_SYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_SYNC_CMD_FIRST 0
+#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET PVRSRV_BRIDGE_SYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET PVRSRV_BRIDGE_SYNC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC PVRSRV_BRIDGE_SYNC_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE PVRSRV_BRIDGE_SYNC_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP PVRSRV_BRIDGE_SYNC_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS PVRSRV_BRIDGE_SYNC_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE PVRSRV_BRIDGE_SYNC_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE PVRSRV_BRIDGE_SYNC_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY PVRSRV_BRIDGE_SYNC_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE PVRSRV_BRIDGE_SYNC_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY PVRSRV_BRIDGE_SYNC_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP PVRSRV_BRIDGE_SYNC_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE PVRSRV_BRIDGE_SYNC_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+16
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP PVRSRV_BRIDGE_SYNC_CMD_FIRST+17
+#define PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+18
+#define PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+19
+#define PVRSRV_BRIDGE_SYNC_CMD_LAST (PVRSRV_BRIDGE_SYNC_CMD_FIRST+19)
+
+
+/*******************************************
+ AllocSyncPrimitiveBlock
+ *******************************************/
+
+/* Bridge in structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32SyncPrimVAddr;
+ IMG_UINT32 ui32SyncPrimBlockSize;
+ IMG_HANDLE hhSyncPMR;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK;
+
+
+/*******************************************
+ FreeSyncPrimitiveBlock
+ *******************************************/
+
+/* Bridge in structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG
+{
+ IMG_HANDLE hSyncHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK;
+
+
+/*******************************************
+ SyncPrimSet
+ *******************************************/
+
+/* Bridge in structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Index;
+ IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMSET;
+
+/* Bridge out structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMSET;
+
+
+/*******************************************
+ ServerSyncPrimSet
+ *******************************************/
+
+/* Bridge in structure for ServerSyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET;
+
+/* Bridge out structure for ServerSyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET;
+
+
+/*******************************************
+ ServerSyncAlloc
+ *******************************************/
+
+/* Bridge in structure for ServerSyncAlloc */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCALLOC_TAG
+{
+ IMG_UINT32 ui32ClassNameSize;
+ const IMG_CHAR * puiClassName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCALLOC;
+
+/* Bridge out structure for ServerSyncAlloc */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32SyncPrimVAddr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC;
+
+
+/*******************************************
+ ServerSyncFree
+ *******************************************/
+
+/* Bridge in structure for ServerSyncFree */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCFREE_TAG
+{
+ IMG_HANDLE hSyncHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCFREE;
+
+/* Bridge out structure for ServerSyncFree */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCFREE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCFREE;
+
+
+/*******************************************
+ ServerSyncQueueHWOp
+ *******************************************/
+
+/* Bridge in structure for ServerSyncQueueHWOp */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_BOOL bbUpdate;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP;
+
+/* Bridge out structure for ServerSyncQueueHWOp */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP_TAG
+{
+ IMG_UINT32 ui32FenceValue;
+ IMG_UINT32 ui32UpdateValue;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP;
+
+
+/*******************************************
+ ServerSyncGetStatus
+ *******************************************/
+
+/* Bridge in structure for ServerSyncGetStatus */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS_TAG
+{
+ IMG_UINT32 ui32SyncCount;
+ IMG_HANDLE * phSyncHandle;
+ /* Output pointer pui32UID is also an implied input */
+ IMG_UINT32 * pui32UID;
+ /* Output pointer pui32FWAddr is also an implied input */
+ IMG_UINT32 * pui32FWAddr;
+ /* Output pointer pui32CurrentOp is also an implied input */
+ IMG_UINT32 * pui32CurrentOp;
+ /* Output pointer pui32NextOp is also an implied input */
+ IMG_UINT32 * pui32NextOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS;
+
+/* Bridge out structure for ServerSyncGetStatus */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS_TAG
+{
+ IMG_UINT32 * pui32UID;
+ IMG_UINT32 * pui32FWAddr;
+ IMG_UINT32 * pui32CurrentOp;
+ IMG_UINT32 * pui32NextOp;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS;
+
+
+/*******************************************
+ SyncPrimOpCreate
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpCreate */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE_TAG
+{
+ IMG_UINT32 ui32SyncBlockCount;
+ IMG_HANDLE * phBlockList;
+ IMG_UINT32 ui32ClientSyncCount;
+ IMG_UINT32 * pui32SyncBlockIndex;
+ IMG_UINT32 * pui32Index;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_HANDLE * phServerSync;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE;
+
+/* Bridge out structure for SyncPrimOpCreate */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE_TAG
+{
+ IMG_HANDLE hServerCookie;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE;
+
+
+/*******************************************
+ SyncPrimOpTake
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpTake */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE_TAG
+{
+ IMG_HANDLE hServerCookie;
+ IMG_UINT32 ui32ClientSyncCount;
+ IMG_UINT32 * pui32Flags;
+ IMG_UINT32 * pui32FenceValue;
+ IMG_UINT32 * pui32UpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE;
+
+/* Bridge out structure for SyncPrimOpTake */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE;
+
+
+/*******************************************
+ SyncPrimOpReady
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpReady */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY_TAG
+{
+ IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY;
+
+/* Bridge out structure for SyncPrimOpReady */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY_TAG
+{
+ IMG_BOOL bReady;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY;
+
+
+/*******************************************
+ SyncPrimOpComplete
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpComplete */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE_TAG
+{
+ IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE;
+
+/* Bridge out structure for SyncPrimOpComplete */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE;
+
+
+/*******************************************
+ SyncPrimOpDestroy
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpDestroy */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY_TAG
+{
+ IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY;
+
+/* Bridge out structure for SyncPrimOpDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY;
+
+
+/*******************************************
+ SyncPrimPDump
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Offset;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP;
+
+/* Bridge out structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP;
+
+
+/*******************************************
+ SyncPrimPDumpValue
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE;
+
+/* Bridge out structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE;
+
+
+/*******************************************
+ SyncPrimPDumpPol
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Mask;
+ PDUMP_POLL_OPERATOR eOperator;
+ PDUMP_FLAGS_T uiPDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL;
+
+/* Bridge out structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL;
+
+
+/*******************************************
+ SyncPrimOpPDumpPol
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL_TAG
+{
+ IMG_HANDLE hServerCookie;
+ PDUMP_POLL_OPERATOR eOperator;
+ PDUMP_FLAGS_T uiPDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL;
+
+/* Bridge out structure for SyncPrimOpPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL;
+
+
+/*******************************************
+ SyncPrimPDumpCBP
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Offset;
+ IMG_DEVMEM_OFFSET_T uiWriteOffset;
+ IMG_DEVMEM_SIZE_T uiPacketSize;
+ IMG_DEVMEM_SIZE_T uiBufferSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP;
+
+/* Bridge out structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP;
+
+
+/*******************************************
+ SyncAllocEvent
+ *******************************************/
+
+/* Bridge in structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG
+{
+ IMG_BOOL bServerSync;
+ IMG_UINT32 ui32FWAddr;
+ IMG_UINT32 ui32ClassNameSize;
+ const IMG_CHAR * puiClassName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCALLOCEVENT;
+
+/* Bridge out structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT;
+
+
+/*******************************************
+ SyncFreeEvent
+ *******************************************/
+
+/* Bridge in structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFREEEVENT_TAG
+{
+ IMG_UINT32 ui32FWAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCFREEEVENT;
+
+/* Bridge out structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFREEEVENT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCFREEEVENT;
+
+
+#endif /* COMMON_SYNC_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for sync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+
+
+#include "common_sync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockIN,
+ PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockOUT,
+ CONNECTION_DATA *psConnection)
+{
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+ PMR * pshSyncPMRInt = NULL;
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psAllocSyncPrimitiveBlockIN);
+
+
+ psAllocSyncPrimitiveBlockOUT->hSyncHandle = NULL;
+
+
+
+ psAllocSyncPrimitiveBlockOUT->eError =
+ PVRSRVAllocSyncPrimitiveBlockKM(psConnection, OSGetDevData(psConnection),
+ &psSyncHandleInt,
+ &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimVAddr,
+ &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimBlockSize,
+ &pshSyncPMRInt);
+ /* Exit early if bridged call fails */
+ if(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+ {
+ goto AllocSyncPrimitiveBlock_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psAllocSyncPrimitiveBlockOUT->hSyncHandle,
+ (void *) psSyncHandleInt,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVFreeSyncPrimitiveBlockKM);
+ if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto AllocSyncPrimitiveBlock_exit;
+ }
+
+
+
+
+
+
+ psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psAllocSyncPrimitiveBlockOUT->hhSyncPMR,
+ (void *) pshSyncPMRInt,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,psAllocSyncPrimitiveBlockOUT->hSyncHandle);
+ if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto AllocSyncPrimitiveBlock_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+AllocSyncPrimitiveBlock_exit:
+
+
+
+ if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psAllocSyncPrimitiveBlockOUT->hSyncHandle)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psAllocSyncPrimitiveBlockOUT->hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeAllocSyncPrimitiveBlock: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psSyncHandleInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psSyncHandleInt)
+ {
+ PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockIN,
+ PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psFreeSyncPrimitiveBlockOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psFreeSyncPrimitiveBlockIN->hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ if ((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK) &&
+ (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeFreeSyncPrimitiveBlock: %s",
+ PVRSRVGetErrorStringKM(psFreeSyncPrimitiveBlockOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto FreeSyncPrimitiveBlock_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+FreeSyncPrimitiveBlock_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMSET *psSyncPrimSetIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMSET *psSyncPrimSetOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psSyncPrimSetIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psSyncPrimSetOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimSetOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimSet_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimSetOUT->eError =
+ PVRSRVSyncPrimSetKM(
+ psSyncHandleInt,
+ psSyncPrimSetIN->ui32Index,
+ psSyncPrimSetIN->ui32Value);
+
+
+
+
+SyncPrimSet_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET *psServerSyncPrimSetIN,
+ PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET *psServerSyncPrimSetOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psServerSyncPrimSetIN->hSyncHandle;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psServerSyncPrimSetOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psServerSyncPrimSetOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ServerSyncPrimSet_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psServerSyncPrimSetOUT->eError =
+ PVRSRVServerSyncPrimSetKM(
+ psSyncHandleInt,
+ psServerSyncPrimSetIN->ui32Value);
+
+
+
+
+ServerSyncPrimSet_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncAlloc(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SERVERSYNCALLOC *psServerSyncAllocIN,
+ PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC *psServerSyncAllocOUT,
+ CONNECTION_DATA *psConnection)
+{
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt = NULL;
+ IMG_CHAR *uiClassNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psServerSyncAllocIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psServerSyncAllocIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psServerSyncAllocOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ServerSyncAlloc_exit;
+ }
+ }
+ }
+
+ if (psServerSyncAllocIN->ui32ClassNameSize != 0)
+ {
+ uiClassNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiClassNameInt, psServerSyncAllocIN->puiClassName, psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psServerSyncAllocOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncAlloc_exit;
+ }
+ }
+
+
+ psServerSyncAllocOUT->eError =
+ PVRSRVServerSyncAllocKM(psConnection, OSGetDevData(psConnection),
+ &psSyncHandleInt,
+ &psServerSyncAllocOUT->ui32SyncPrimVAddr,
+ psServerSyncAllocIN->ui32ClassNameSize,
+ uiClassNameInt);
+ /* Exit early if bridged call fails */
+ if(psServerSyncAllocOUT->eError != PVRSRV_OK)
+ {
+ goto ServerSyncAlloc_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psServerSyncAllocOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psServerSyncAllocOUT->hSyncHandle,
+ (void *) psSyncHandleInt,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVServerSyncFreeKM);
+ if (psServerSyncAllocOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ServerSyncAlloc_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+ServerSyncAlloc_exit:
+
+
+
+ if (psServerSyncAllocOUT->eError != PVRSRV_OK)
+ {
+ if (psSyncHandleInt)
+ {
+ PVRSRVServerSyncFreeKM(psSyncHandleInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncFree(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SERVERSYNCFREE *psServerSyncFreeIN,
+ PVRSRV_BRIDGE_OUT_SERVERSYNCFREE *psServerSyncFreeOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psServerSyncFreeOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psServerSyncFreeIN->hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ if ((psServerSyncFreeOUT->eError != PVRSRV_OK) &&
+ (psServerSyncFreeOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeServerSyncFree: %s",
+ PVRSRVGetErrorStringKM(psServerSyncFreeOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto ServerSyncFree_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+ServerSyncFree_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncQueueHWOp(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP *psServerSyncQueueHWOpIN,
+ PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP *psServerSyncQueueHWOpOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psServerSyncQueueHWOpIN->hSyncHandle;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psServerSyncQueueHWOpOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psServerSyncQueueHWOpOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ServerSyncQueueHWOp_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psServerSyncQueueHWOpOUT->eError =
+ PVRSRVServerSyncQueueHWOpKM(
+ psSyncHandleInt,
+ psServerSyncQueueHWOpIN->bbUpdate,
+ &psServerSyncQueueHWOpOUT->ui32FenceValue,
+ &psServerSyncQueueHWOpOUT->ui32UpdateValue);
+
+
+
+
+ServerSyncQueueHWOp_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncGetStatus(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS *psServerSyncGetStatusIN,
+ PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS *psServerSyncGetStatusOUT,
+ CONNECTION_DATA *psConnection)
+{
+ SERVER_SYNC_PRIMITIVE * *psSyncHandleInt = NULL;
+ IMG_HANDLE *hSyncHandleInt2 = NULL;
+ IMG_UINT32 *pui32UIDInt = NULL;
+ IMG_UINT32 *pui32FWAddrInt = NULL;
+ IMG_UINT32 *pui32CurrentOpInt = NULL;
+ IMG_UINT32 *pui32NextOpInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE)) +
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+ psServerSyncGetStatusOUT->pui32UID = psServerSyncGetStatusIN->pui32UID;
+ psServerSyncGetStatusOUT->pui32FWAddr = psServerSyncGetStatusIN->pui32FWAddr;
+ psServerSyncGetStatusOUT->pui32CurrentOp = psServerSyncGetStatusIN->pui32CurrentOp;
+ psServerSyncGetStatusOUT->pui32NextOp = psServerSyncGetStatusIN->pui32NextOp;
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psServerSyncGetStatusIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psServerSyncGetStatusIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+ }
+
+ if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+ {
+ psSyncHandleInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hSyncHandleInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hSyncHandleInt2, psServerSyncGetStatusIN->phSyncHandle, psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+ if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+ {
+ pui32UIDInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+ }
+
+ if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+ {
+ pui32FWAddrInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+ }
+
+ if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+ {
+ pui32CurrentOpInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+ }
+
+ if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+ {
+ pui32NextOpInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+ }
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psServerSyncGetStatusIN->ui32SyncCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psServerSyncGetStatusOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt[i],
+ hSyncHandleInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psServerSyncGetStatusOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psServerSyncGetStatusOUT->eError =
+ PVRSRVServerSyncGetStatusKM(
+ psServerSyncGetStatusIN->ui32SyncCount,
+ psSyncHandleInt,
+ pui32UIDInt,
+ pui32FWAddrInt,
+ pui32CurrentOpInt,
+ pui32NextOpInt);
+
+
+
+ if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32UID, pui32UIDInt,
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+
+ if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32FWAddr, pui32FWAddrInt,
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+
+ if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32CurrentOp, pui32CurrentOpInt,
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+
+ if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32NextOp, pui32NextOpInt,
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+
+
+ServerSyncGetStatus_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psServerSyncGetStatusIN->ui32SyncCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandleInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpCreate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE *psSyncPrimOpCreateIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE *psSyncPrimOpCreateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ SYNC_PRIMITIVE_BLOCK * *psBlockListInt = NULL;
+ IMG_HANDLE *hBlockListInt2 = NULL;
+ IMG_UINT32 *ui32SyncBlockIndexInt = NULL;
+ IMG_UINT32 *ui32IndexInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncInt = NULL;
+ IMG_HANDLE *hServerSyncInt2 = NULL;
+ SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE)) +
+ (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+ (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+ (psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncPrimOpCreateIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncPrimOpCreateIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ }
+
+ if (psSyncPrimOpCreateIN->ui32SyncBlockCount != 0)
+ {
+ psBlockListInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hBlockListInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hBlockListInt2, psSyncPrimOpCreateIN->phBlockList, psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ if (psSyncPrimOpCreateIN->ui32ClientSyncCount != 0)
+ {
+ ui32SyncBlockIndexInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32SyncBlockIndexInt, psSyncPrimOpCreateIN->pui32SyncBlockIndex, psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ if (psSyncPrimOpCreateIN->ui32ClientSyncCount != 0)
+ {
+ ui32IndexInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32IndexInt, psSyncPrimOpCreateIN->pui32Index, psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ if (psSyncPrimOpCreateIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncInt2, psSyncPrimOpCreateIN->phServerSync, psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psSyncPrimOpCreateIN->ui32SyncBlockCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psSyncPrimOpCreateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psBlockListInt[i],
+ hBlockListInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psSyncPrimOpCreateIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Look up the address from the handle */
+ psSyncPrimOpCreateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncInt[i],
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimOpCreateOUT->eError =
+ PVRSRVSyncPrimOpCreateKM(
+ psSyncPrimOpCreateIN->ui32SyncBlockCount,
+ psBlockListInt,
+ psSyncPrimOpCreateIN->ui32ClientSyncCount,
+ ui32SyncBlockIndexInt,
+ ui32IndexInt,
+ psSyncPrimOpCreateIN->ui32ServerSyncCount,
+ psServerSyncInt,
+ &psServerCookieInt);
+ /* Exit early if bridged call fails */
+ if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+ {
+ goto SyncPrimOpCreate_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psSyncPrimOpCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psSyncPrimOpCreateOUT->hServerCookie,
+ (void *) psServerCookieInt,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVSyncPrimOpDestroyKM);
+ if (psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpCreate_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+SyncPrimOpCreate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psSyncPrimOpCreateIN->ui32SyncBlockCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psBlockListInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hBlockListInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psSyncPrimOpCreateIN->ui32ServerSyncCount;i++)
+ {
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerSyncInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+ {
+ if (psServerCookieInt)
+ {
+ PVRSRVSyncPrimOpDestroyKM(psServerCookieInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpTake(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE *psSyncPrimOpTakeIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE *psSyncPrimOpTakeOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hServerCookie = psSyncPrimOpTakeIN->hServerCookie;
+ SERVER_OP_COOKIE * psServerCookieInt = NULL;
+ IMG_UINT32 *ui32FlagsInt = NULL;
+ IMG_UINT32 *ui32FenceValueInt = NULL;
+ IMG_UINT32 *ui32UpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerFlagsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+ (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+ (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+ (psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncPrimOpTakeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncPrimOpTakeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncPrimOpTake_exit;
+ }
+ }
+ }
+
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+ {
+ ui32FlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FlagsInt, psSyncPrimOpTakeIN->pui32Flags, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpTake_exit;
+ }
+ }
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+ {
+ ui32FenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FenceValueInt, psSyncPrimOpTakeIN->pui32FenceValue, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpTake_exit;
+ }
+ }
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+ {
+ ui32UpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32UpdateValueInt, psSyncPrimOpTakeIN->pui32UpdateValue, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpTake_exit;
+ }
+ }
+ if (psSyncPrimOpTakeIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerFlagsInt, psSyncPrimOpTakeIN->pui32ServerFlags, psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpTake_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psSyncPrimOpTakeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerCookieInt,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ IMG_TRUE);
+ if(psSyncPrimOpTakeOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpTake_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimOpTakeOUT->eError =
+ PVRSRVSyncPrimOpTakeKM(
+ psServerCookieInt,
+ psSyncPrimOpTakeIN->ui32ClientSyncCount,
+ ui32FlagsInt,
+ ui32FenceValueInt,
+ ui32UpdateValueInt,
+ psSyncPrimOpTakeIN->ui32ServerSyncCount,
+ ui32ServerFlagsInt);
+
+
+
+
+SyncPrimOpTake_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerCookieInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpReady(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY *psSyncPrimOpReadyIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY *psSyncPrimOpReadyOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hServerCookie = psSyncPrimOpReadyIN->hServerCookie;
+ SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psSyncPrimOpReadyOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerCookieInt,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ IMG_TRUE);
+ if(psSyncPrimOpReadyOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpReady_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimOpReadyOUT->eError =
+ PVRSRVSyncPrimOpReadyKM(
+ psServerCookieInt,
+ &psSyncPrimOpReadyOUT->bReady);
+
+
+
+
+SyncPrimOpReady_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerCookieInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpComplete(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE *psSyncPrimOpCompleteIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE *psSyncPrimOpCompleteOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hServerCookie = psSyncPrimOpCompleteIN->hServerCookie;
+ SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psSyncPrimOpCompleteOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerCookieInt,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ IMG_TRUE);
+ if(psSyncPrimOpCompleteOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpComplete_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimOpCompleteOUT->eError =
+ PVRSRVSyncPrimOpCompleteKM(
+ psServerCookieInt);
+
+
+
+
+SyncPrimOpComplete_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerCookieInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpDestroy(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY *psSyncPrimOpDestroyIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY *psSyncPrimOpDestroyOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psSyncPrimOpDestroyOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psSyncPrimOpDestroyIN->hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+ if ((psSyncPrimOpDestroyOUT->eError != PVRSRV_OK) &&
+ (psSyncPrimOpDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeSyncPrimOpDestroy: %s",
+ PVRSRVGetErrorStringKM(psSyncPrimOpDestroyOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto SyncPrimOpDestroy_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+SyncPrimOpDestroy_exit:
+
+
+
+
+ return 0;
+}
+
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *psSyncPrimPDumpIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *psSyncPrimPDumpOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psSyncPrimPDumpOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimPDumpOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimPDump_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimPDumpOUT->eError =
+ PVRSRVSyncPrimPDumpKM(
+ psSyncHandleInt,
+ psSyncPrimPDumpIN->ui32Offset);
+
+
+
+
+SyncPrimPDump_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDump NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpValueIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psSyncPrimPDumpValueOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimPDumpValue_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimPDumpValueOUT->eError =
+ PVRSRVSyncPrimPDumpValueKM(
+ psSyncHandleInt,
+ psSyncPrimPDumpValueIN->ui32Offset,
+ psSyncPrimPDumpValueIN->ui32Value);
+
+
+
+
+SyncPrimPDumpValue_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpValue NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpPolIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psSyncPrimPDumpPolOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimPDumpPol_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimPDumpPolOUT->eError =
+ PVRSRVSyncPrimPDumpPolKM(
+ psSyncHandleInt,
+ psSyncPrimPDumpPolIN->ui32Offset,
+ psSyncPrimPDumpPolIN->ui32Value,
+ psSyncPrimPDumpPolIN->ui32Mask,
+ psSyncPrimPDumpPolIN->eOperator,
+ psSyncPrimPDumpPolIN->uiPDumpFlags);
+
+
+
+
+SyncPrimPDumpPol_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpPol NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimOpPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL *psSyncPrimOpPDumpPolIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL *psSyncPrimOpPDumpPolOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hServerCookie = psSyncPrimOpPDumpPolIN->hServerCookie;
+ SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psSyncPrimOpPDumpPolOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerCookieInt,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ IMG_TRUE);
+ if(psSyncPrimOpPDumpPolOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpPDumpPol_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimOpPDumpPolOUT->eError =
+ PVRSRVSyncPrimOpPDumpPolKM(
+ psServerCookieInt,
+ psSyncPrimOpPDumpPolIN->eOperator,
+ psSyncPrimOpPDumpPolIN->uiPDumpFlags);
+
+
+
+
+SyncPrimOpPDumpPol_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psServerCookieInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimOpPDumpPol NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpCBPIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psSyncPrimPDumpCBPOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimPDumpCBP_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimPDumpCBPOUT->eError =
+ PVRSRVSyncPrimPDumpCBPKM(
+ psSyncHandleInt,
+ psSyncPrimPDumpCBPIN->ui32Offset,
+ psSyncPrimPDumpCBPIN->uiWriteOffset,
+ psSyncPrimPDumpCBPIN->uiPacketSize,
+ psSyncPrimPDumpCBPIN->uiBufferSize);
+
+
+
+
+SyncPrimPDumpCBP_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpCBP NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *psSyncAllocEventIN,
+ PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *psSyncAllocEventOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiClassNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncAllocEventIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncAllocEventIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncAllocEvent_exit;
+ }
+ }
+ }
+
+ if (psSyncAllocEventIN->ui32ClassNameSize != 0)
+ {
+ uiClassNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiClassNameInt, psSyncAllocEventIN->puiClassName, psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncAllocEvent_exit;
+ }
+ }
+
+
+ psSyncAllocEventOUT->eError =
+ PVRSRVSyncAllocEventKM(
+ psSyncAllocEventIN->bServerSync,
+ psSyncAllocEventIN->ui32FWAddr,
+ psSyncAllocEventIN->ui32ClassNameSize,
+ uiClassNameInt);
+
+
+
+
+SyncAllocEvent_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncFreeEvent(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCFREEEVENT *psSyncFreeEventIN,
+ PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *psSyncFreeEventOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+ psSyncFreeEventOUT->eError =
+ PVRSRVSyncFreeEventKM(
+ psSyncFreeEventIN->ui32FWAddr);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSYNCBridge(void);
+PVRSRV_ERROR DeinitSYNCBridge(void);
+
+/*
+ * Register all SYNC functions with services
+ */
+PVRSRV_ERROR InitSYNCBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK, PVRSRVBridgeAllocSyncPrimitiveBlock,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK, PVRSRVBridgeFreeSyncPrimitiveBlock,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET, PVRSRVBridgeSyncPrimSet,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET, PVRSRVBridgeServerSyncPrimSet,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC, PVRSRVBridgeServerSyncAlloc,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE, PVRSRVBridgeServerSyncFree,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP, PVRSRVBridgeServerSyncQueueHWOp,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS, PVRSRVBridgeServerSyncGetStatus,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE, PVRSRVBridgeSyncPrimOpCreate,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE, PVRSRVBridgeSyncPrimOpTake,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY, PVRSRVBridgeSyncPrimOpReady,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE, PVRSRVBridgeSyncPrimOpComplete,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY, PVRSRVBridgeSyncPrimOpDestroy,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP, PVRSRVBridgeSyncPrimPDump,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE, PVRSRVBridgeSyncPrimPDumpValue,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL, PVRSRVBridgeSyncPrimPDumpPol,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL, PVRSRVBridgeSyncPrimOpPDumpPol,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP, PVRSRVBridgeSyncPrimPDumpCBP,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT, PVRSRVBridgeSyncAllocEvent,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT, PVRSRVBridgeSyncFreeEvent,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all sync functions with services
+ */
+PVRSRV_ERROR DeinitSYNCBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Client bridge header for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for synctracking
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_SYNCTRACKING_BRIDGE_H
+#define CLIENT_SYNCTRACKING_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_synctracking_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hhRecord);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+ IMG_HANDLE *phhRecord,
+ IMG_HANDLE hhServerSyncPrimBlock,
+ IMG_UINT32 ui32ui32FwBlockAddr,
+ IMG_UINT32 ui32ui32SyncOffset,
+ IMG_BOOL bbServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName);
+
+
+#endif /* CLIENT_SYNCTRACKING_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Direct client bridge for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_synctracking_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+
+#include "sync.h"
+#include "sync_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hhRecord)
+{
+ PVRSRV_ERROR eError;
+ SYNC_RECORD_HANDLE pshRecordInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord;
+
+ eError =
+ PVRSRVSyncRecordRemoveByHandleKM(
+ pshRecordInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+ IMG_HANDLE *phhRecord,
+ IMG_HANDLE hhServerSyncPrimBlock,
+ IMG_UINT32 ui32ui32FwBlockAddr,
+ IMG_UINT32 ui32ui32SyncOffset,
+ IMG_BOOL bbServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName)
+{
+ PVRSRV_ERROR eError;
+ SYNC_RECORD_HANDLE pshRecordInt;
+ SYNC_PRIMITIVE_BLOCK * pshServerSyncPrimBlockInt;
+
+ pshServerSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock;
+
+ eError =
+ PVRSRVSyncRecordAddKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ &pshRecordInt,
+ pshServerSyncPrimBlockInt,
+ ui32ui32FwBlockAddr,
+ ui32ui32SyncOffset,
+ bbServerSync,
+ ui32ClassNameSize,
+ puiClassName);
+
+ *phhRecord = pshRecordInt;
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for synctracking
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_SYNCTRACKING_BRIDGE_H
+#define COMMON_SYNCTRACKING_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST 0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST (PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1)
+
+
+/*******************************************
+ SyncRecordRemoveByHandle
+ *******************************************/
+
+/* Bridge in structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+ IMG_HANDLE hhRecord;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE;
+
+/* Bridge out structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE;
+
+
+/*******************************************
+ SyncRecordAdd
+ *******************************************/
+
+/* Bridge in structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG
+{
+ IMG_HANDLE hhServerSyncPrimBlock;
+ IMG_UINT32 ui32ui32FwBlockAddr;
+ IMG_UINT32 ui32ui32SyncOffset;
+ IMG_BOOL bbServerSync;
+ IMG_UINT32 ui32ClassNameSize;
+ const IMG_CHAR * puiClassName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDADD;
+
+/* Bridge out structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG
+{
+ IMG_HANDLE hhRecord;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDADD;
+
+
+#endif /* COMMON_SYNCTRACKING_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for synctracking
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+
+
+#include "common_synctracking_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleIN,
+ PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psSyncRecordRemoveByHandleOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psSyncRecordRemoveByHandleIN->hhRecord,
+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE);
+ if ((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK) &&
+ (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeSyncRecordRemoveByHandle: %s",
+ PVRSRVGetErrorStringKM(psSyncRecordRemoveByHandleOUT->eError)));
+ PVR_ASSERT(0);
+ UnlockHandle();
+ goto SyncRecordRemoveByHandle_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+SyncRecordRemoveByHandle_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCRECORDADD *psSyncRecordAddIN,
+ PVRSRV_BRIDGE_OUT_SYNCRECORDADD *psSyncRecordAddOUT,
+ CONNECTION_DATA *psConnection)
+{
+ SYNC_RECORD_HANDLE pshRecordInt = NULL;
+ IMG_HANDLE hhServerSyncPrimBlock = psSyncRecordAddIN->hhServerSyncPrimBlock;
+ SYNC_PRIMITIVE_BLOCK * pshServerSyncPrimBlockInt = NULL;
+ IMG_CHAR *uiClassNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncRecordAddIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncRecordAddIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncRecordAdd_exit;
+ }
+ }
+ }
+
+ if (psSyncRecordAddIN->ui32ClassNameSize != 0)
+ {
+ uiClassNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiClassNameInt, psSyncRecordAddIN->puiClassName, psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncRecordAdd_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ /* Look up the address from the handle */
+ psSyncRecordAddOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &pshServerSyncPrimBlockInt,
+ hhServerSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncRecordAddOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncRecordAdd_exit;
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncRecordAddOUT->eError =
+ PVRSRVSyncRecordAddKM(psConnection, OSGetDevData(psConnection),
+ &pshRecordInt,
+ pshServerSyncPrimBlockInt,
+ psSyncRecordAddIN->ui32ui32FwBlockAddr,
+ psSyncRecordAddIN->ui32ui32SyncOffset,
+ psSyncRecordAddIN->bbServerSync,
+ psSyncRecordAddIN->ui32ClassNameSize,
+ uiClassNameInt);
+ /* Exit early if bridged call fails */
+ if(psSyncRecordAddOUT->eError != PVRSRV_OK)
+ {
+ goto SyncRecordAdd_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psSyncRecordAddOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psSyncRecordAddOUT->hhRecord,
+ (void *) pshRecordInt,
+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,(PFN_HANDLE_RELEASE)&PVRSRVSyncRecordRemoveByHandleKM);
+ if (psSyncRecordAddOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncRecordAdd_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+SyncRecordAdd_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ {
+ /* Unreference the previously looked up handle */
+ if(pshServerSyncPrimBlockInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hhServerSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psSyncRecordAddOUT->eError != PVRSRV_OK)
+ {
+ if (pshRecordInt)
+ {
+ PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void);
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void);
+
+/*
+ * Register all SYNCTRACKING functions with services
+ */
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE, PVRSRVBridgeSyncRecordRemoveByHandle,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD, PVRSRVBridgeSyncRecordAdd,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all synctracking functions with services
+ */
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common bridge header for timerquery
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for timerquery
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_TIMERQUERY_BRIDGE_H
+#define COMMON_TIMERQUERY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST 0
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXBEGINTIMERQUERY PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXENDTIMERQUERY PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXQUERYTIMER PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXCURRENTTIME PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_TIMERQUERY_CMD_LAST (PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+3)
+
+
+/*******************************************
+ RGXBeginTimerQuery
+ *******************************************/
+
+/* Bridge in structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY_TAG
+{
+ IMG_UINT32 ui32QueryId;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY;
+
+/* Bridge out structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY;
+
+
+/*******************************************
+ RGXEndTimerQuery
+ *******************************************/
+
+/* Bridge in structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY;
+
+/* Bridge out structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY;
+
+
+/*******************************************
+ RGXQueryTimer
+ *******************************************/
+
+/* Bridge in structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_IN_RGXQUERYTIMER_TAG
+{
+ IMG_UINT32 ui32QueryId;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXQUERYTIMER;
+
+/* Bridge out structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXQUERYTIMER_TAG
+{
+ IMG_UINT64 ui64StartTime;
+ IMG_UINT64 ui64EndTime;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXQUERYTIMER;
+
+
+/*******************************************
+ RGXCurrentTime
+ *******************************************/
+
+/* Bridge in structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCURRENTTIME;
+
+/* Bridge out structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG
+{
+ IMG_UINT64 ui64Time;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCURRENTTIME;
+
+
+#endif /* COMMON_TIMERQUERY_BRIDGE_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Server bridge for timerquery
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for timerquery
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtimerquery.h"
+
+
+#include "common_timerquery_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXBeginTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryIN,
+ PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXBeginTimerQueryOUT->eError =
+ PVRSRVRGXBeginTimerQueryKM(psConnection, OSGetDevData(psConnection),
+ psRGXBeginTimerQueryIN->ui32QueryId);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXEndTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *psRGXEndTimerQueryIN,
+ PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *psRGXEndTimerQueryOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXEndTimerQueryIN);
+
+
+
+
+
+ psRGXEndTimerQueryOUT->eError =
+ PVRSRVRGXEndTimerQueryKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXQueryTimer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXQUERYTIMER *psRGXQueryTimerIN,
+ PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *psRGXQueryTimerOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXQueryTimerOUT->eError =
+ PVRSRVRGXQueryTimerKM(psConnection, OSGetDevData(psConnection),
+ psRGXQueryTimerIN->ui32QueryId,
+ &psRGXQueryTimerOUT->ui64StartTime,
+ &psRGXQueryTimerOUT->ui64EndTime);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCURRENTTIME *psRGXCurrentTimeIN,
+ PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN);
+
+
+
+
+
+ psRGXCurrentTimeOUT->eError =
+ PVRSRVRGXCurrentTime(psConnection, OSGetDevData(psConnection),
+ &psRGXCurrentTimeOUT->ui64Time);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitTIMERQUERYBridge(void);
+PVRSRV_ERROR DeinitTIMERQUERYBridge(void);
+
+/*
+ * Register all TIMERQUERY functions with services
+ */
+PVRSRV_ERROR InitTIMERQUERYBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXBEGINTIMERQUERY, PVRSRVBridgeRGXBeginTimerQuery,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXENDTIMERQUERY, PVRSRVBridgeRGXEndTimerQuery,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXQUERYTIMER, PVRSRVBridgeRGXQueryTimer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXCURRENTTIME, PVRSRVBridgeRGXCurrentTime,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all timerquery functions with services
+ */
+PVRSRV_ERROR DeinitTIMERQUERYBridge(void)
+{
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Resource Handle Manager
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provide resource handle management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* See handle.h for a description of the handle API. */
+
+/*
+ * The implmentation supports movable handle structures, allowing the address
+ * of a handle structure to change without having to fix up pointers in
+ * any of the handle structures. For example, the linked list mechanism
+ * used to link subhandles together uses handle array indices rather than
+ * pointers to the structures themselves.
+ */
+
+#include <stddef.h>
+
+#include "handle.h"
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+
+#define HANDLE_HASH_TAB_INIT_SIZE 32
+
+#define SET_FLAG(v, f) ((void)((v) |= (f)))
+#define CLEAR_FLAG(v, f) ((void)((v) &= (IMG_UINT)~(f)))
+#define TEST_FLAG(v, f) ((IMG_BOOL)(((v) & (f)) != 0))
+
+#define TEST_ALLOC_FLAG(psHandleData, f) TEST_FLAG((psHandleData)->eFlag, f)
+
+#if !defined(ARRAY_SIZE)
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+
+
+/* Linked list structure. Used for both the list head and list items */
+typedef struct _HANDLE_LIST_
+{
+ IMG_HANDLE hPrev;
+ IMG_HANDLE hNext;
+ IMG_HANDLE hParent;
+} HANDLE_LIST;
+
+typedef struct _HANDLE_DATA_
+{
+ /* The handle that represents this structure */
+ IMG_HANDLE hHandle;
+
+ /* Handle type */
+ PVRSRV_HANDLE_TYPE eType;
+
+ /* Flags specified when the handle was allocated */
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag;
+
+ /* Pointer to the data that the handle represents */
+ void *pvData;
+
+ /*
+ * Callback specified at handle allocation time to
+ * release/destroy/free the data represented by the
+ * handle when it's reference count reaches 0. This
+ * should always be NULL for subhandles.
+ */
+ PFN_HANDLE_RELEASE pfnReleaseData;
+
+ /* List head for subhandles of this handle */
+ HANDLE_LIST sChildren;
+
+ /* List entry for sibling subhandles */
+ HANDLE_LIST sSiblings;
+
+ /* Reference count. The pfnReleaseData callback gets called when the
+ * reference count hits zero
+ */
+ IMG_UINT32 ui32RefCount;
+} HANDLE_DATA;
+
+struct _HANDLE_BASE_
+{
+ /* Pointer to a handle implementations base structure */
+ HANDLE_IMPL_BASE *psImplBase;
+
+ /*
+ * Pointer to handle hash table.
+ * The hash table is used to do reverse lookups, converting data
+ * pointers to handles.
+ */
+ HASH_TABLE *psHashTab;
+
+ /* Can be connection, process, global */
+ PVRSRV_HANDLE_BASE_TYPE eType;
+};
+
+/*
+ * The key for the handle hash table is an array of three elements, the
+ * pointer to the resource, the resource type and the parent handle (or
+ * NULL if there is no parent). The eHandKey enumeration gives the
+ * array indices of the elements making up the key.
+ */
+enum eHandKey
+{
+ HAND_KEY_DATA = 0,
+ HAND_KEY_TYPE,
+ HAND_KEY_PARENT,
+ HAND_KEY_LEN /* Must be last item in list */
+};
+
+/* HAND_KEY is the type of the hash table key */
+typedef uintptr_t HAND_KEY[HAND_KEY_LEN];
+
+/* Stores a pointer to the function table of the handle back-end in use */
+static HANDLE_IMPL_FUNCTAB const *gpsHandleFuncs = NULL;
+
+/*
+ * Global lock added to avoid to call the handling functions
+ * only in a single threaded context.
+ */
+static POS_LOCK gHandleLock;
+static IMG_BOOL gbLockInitialised = IMG_FALSE;
+
+void LockHandle(void)
+{
+ OSLockAcquire(gHandleLock);
+}
+
+void UnlockHandle(void)
+{
+ OSLockRelease(gHandleLock);
+}
+
+/*
+ * Kernel handle base structure. This is used for handles that are not
+ * allocated on behalf of a particular process.
+ */
+PVRSRV_HANDLE_BASE *gpsKernelHandleBase = NULL;
+
+/* Increase the reference count on the given handle.
+ * The handle lock must already be acquired.
+ * Returns: the reference count after the increment
+ */
+static inline IMG_UINT32 _HandleRef(HANDLE_DATA *psHandleData)
+{
+#if defined PVRSRV_DEBUG_HANDLE_LOCK
+ if(!OSLockIsLocked(gHandleLock))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__));
+ OSDumpStack();
+ }
+#endif
+ psHandleData->ui32RefCount++;
+ return psHandleData->ui32RefCount;
+}
+
+/* Decrease the reference count on the given handle.
+ * The handle lock must already be acquired.
+ * Returns: the reference count after the decrement
+ */
+static inline IMG_UINT32 _HandleUnref(HANDLE_DATA *psHandleData)
+{
+#if defined PVRSRV_DEBUG_HANDLE_LOCK
+ if(!OSLockIsLocked(gHandleLock))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__));
+ OSDumpStack();
+ }
+#endif
+ PVR_ASSERT(psHandleData->ui32RefCount > 0);
+ psHandleData->ui32RefCount--;
+
+ return psHandleData->ui32RefCount;
+}
+
+/*!
+******************************************************************************
+
+ @Function GetHandleData
+
+ @Description Get the handle data structure for a given handle
+
+ @Input psBase - pointer to handle base structure
+ ppsHandleData - location to return pointer to handle data structure
+ hHandle - handle from client
+ eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the
+ handle type is not to be checked.
+
+ @Output ppsHandleData - points to a pointer to the handle data structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleData)
+#endif
+static INLINE
+PVRSRV_ERROR GetHandleData(PVRSRV_HANDLE_BASE *psBase,
+ HANDLE_DATA **ppsHandleData,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ HANDLE_DATA *psHandleData;
+ PVRSRV_ERROR eError;
+
+ eError = gpsHandleFuncs->pfnGetHandleData(psBase->psImplBase,
+ hHandle,
+ (void **)&psHandleData);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ /*
+ * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function,
+ * check handle is of the correct type.
+ */
+ if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandleData->eType)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "GetHandleData: Handle type mismatch (%d != %d)",
+ eType, psHandleData->eType));
+ return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH;
+ }
+
+ /* Return the handle structure */
+ *ppsHandleData = psHandleData;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function HandleListInit
+
+ @Description Initialise a linked list structure embedded in a handle
+ structure.
+
+ @Input hHandle - handle containing the linked list structure
+ psList - pointer to linked list structure
+ hParent - parent handle or NULL
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInit)
+#endif
+static INLINE
+void HandleListInit(IMG_HANDLE hHandle, HANDLE_LIST *psList, IMG_HANDLE hParent)
+{
+ psList->hPrev = hHandle;
+ psList->hNext = hHandle;
+ psList->hParent = hParent;
+}
+
+/*!
+******************************************************************************
+
+ @Function InitParentList
+
+ @Description Initialise the children list head in a handle structure.
+ The children are the subhandles of this handle.
+
+ @Input psHandleData - pointer to handle data structure
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitParentList)
+#endif
+static INLINE
+void InitParentList(HANDLE_DATA *psHandleData)
+{
+ IMG_HANDLE hParent = psHandleData->hHandle;
+
+ HandleListInit(hParent, &psHandleData->sChildren, hParent);
+}
+
+/*!
+******************************************************************************
+
+ @Function InitChildEntry
+
+ @Description Initialise the child list entry in a handle structure.
+ The list entry is used to link together subhandles of
+ a given handle.
+
+ @Input psHandleData - pointer to handle data structure
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitChildEntry)
+#endif
+static INLINE
+void InitChildEntry(HANDLE_DATA *psHandleData)
+{
+ HandleListInit(psHandleData->hHandle, &psHandleData->sSiblings, NULL);
+}
+
+/*!
+******************************************************************************
+
+ @Function HandleListIsEmpty
+
+ @Description Determine whether a given linked list is empty.
+
+ @Input hHandle - handle containing the list head
+ psList - pointer to the list head
+
+ @Return IMG_TRUE if the list is empty, IMG_FALSE if it isn't.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIsEmpty)
+#endif
+static INLINE
+IMG_BOOL HandleListIsEmpty(IMG_HANDLE hHandle, HANDLE_LIST *psList) /* Instead of passing in the handle can we not just do (psList->hPrev == psList->hNext) ? IMG_TRUE : IMG_FALSE ??? */
+{
+ IMG_BOOL bIsEmpty;
+
+ bIsEmpty = (IMG_BOOL)(psList->hNext == hHandle);
+
+#ifdef DEBUG
+ {
+ IMG_BOOL bIsEmpty2;
+
+ bIsEmpty2 = (IMG_BOOL)(psList->hPrev == hHandle);
+ PVR_ASSERT(bIsEmpty == bIsEmpty2);
+ }
+#endif
+
+ return bIsEmpty;
+}
+
+#ifdef DEBUG
+/*!
+******************************************************************************
+
+ @Function NoChildren
+
+ @Description Determine whether a handle has any subhandles
+
+ @Input psHandleData - pointer to handle data structure
+
+ @Return IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoChildren)
+#endif
+static INLINE
+IMG_BOOL NoChildren(HANDLE_DATA *psHandleData)
+{
+ PVR_ASSERT(psHandleData->sChildren.hParent == psHandleData->hHandle);
+
+ return HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sChildren);
+}
+
+/*!
+******************************************************************************
+
+ @Function NoParent
+
+ @Description Determine whether a handle is a subhandle
+
+ @Input psHandleData - pointer to handle data structure
+
+ @Return IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoParent)
+#endif
+static INLINE
+IMG_BOOL NoParent(HANDLE_DATA *psHandleData)
+{
+ if (HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sSiblings))
+ {
+ PVR_ASSERT(psHandleData->sSiblings.hParent == NULL);
+
+ return IMG_TRUE;
+ }
+ else
+ {
+ PVR_ASSERT(psHandleData->sSiblings.hParent != NULL);
+ }
+ return IMG_FALSE;
+}
+#endif /*DEBUG*/
+
+/*!
+******************************************************************************
+
+ @Function ParentHandle
+
+ @Description Determine the parent of a handle
+
+ @Input psHandleData - pointer to handle data structure
+
+ @Return Parent handle, or NULL if the handle is not a subhandle.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentHandle)
+#endif
+static INLINE
+IMG_HANDLE ParentHandle(HANDLE_DATA *psHandleData)
+{
+ return psHandleData->sSiblings.hParent;
+}
+
+/*
+ * GetHandleListFromHandleAndOffset is used to generate either a
+ * pointer to the subhandle list head, or a pointer to the linked list
+ * structure of an item on a subhandle list.
+ * The list head is itself on the list, but is at a different offset
+ * in the handle structure to the linked list structure for items on
+ * the list. The two linked list structures are differentiated by
+ * the third parameter, containing the parent handle. The parent field
+ * in the list head structure references the handle structure that contains
+ * it. For items on the list, the parent field in the linked list structure
+ * references the parent handle, which will be different from the handle
+ * containing the linked list structure.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleListFromHandleAndOffset)
+#endif
+static INLINE
+HANDLE_LIST *GetHandleListFromHandleAndOffset(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hEntry,
+ IMG_HANDLE hParent,
+ size_t uiParentOffset,
+ size_t uiEntryOffset)
+{
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psBase != NULL);
+
+ eError = GetHandleData(psBase,
+ &psHandleData,
+ hEntry,
+ PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ return NULL;
+ }
+
+ if (hEntry == hParent)
+ {
+ return (HANDLE_LIST *)((IMG_CHAR *)psHandleData + uiParentOffset);
+ }
+ else
+ {
+ return (HANDLE_LIST *)((IMG_CHAR *)psHandleData + uiEntryOffset);
+ }
+}
+
+/*!
+******************************************************************************
+
+ @Function HandleListInsertBefore
+
+ @Description Insert a handle before a handle currently on the list.
+
+ @Input hEntry - handle to be inserted after
+ psEntry - pointer to handle structure to be inserted after
+ uiParentOffset - offset to list head struct in handle structure
+ hNewEntry - handle to be inserted
+ psNewEntry - pointer to handle structure of item to be inserted
+ uiEntryOffset - offset of list item struct in handle structure
+ hParent - parent handle of hNewEntry
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInsertBefore)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hEntry,
+ HANDLE_LIST *psEntry,
+ size_t uiParentOffset,
+ IMG_HANDLE hNewEntry,
+ HANDLE_LIST *psNewEntry,
+ size_t uiEntryOffset,
+ IMG_HANDLE hParent)
+{
+ HANDLE_LIST *psPrevEntry;
+
+ if (psBase == NULL || psEntry == NULL || psNewEntry == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psPrevEntry = GetHandleListFromHandleAndOffset(psBase,
+ psEntry->hPrev,
+ hParent,
+ uiParentOffset,
+ uiEntryOffset);
+ if (psPrevEntry == NULL)
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+
+ PVR_ASSERT(psNewEntry->hParent == NULL);
+ PVR_ASSERT(hEntry == psPrevEntry->hNext);
+
+#if defined(DEBUG)
+ {
+ HANDLE_LIST *psParentList;
+
+ psParentList = GetHandleListFromHandleAndOffset(psBase,
+ hParent,
+ hParent,
+ uiParentOffset,
+ uiParentOffset);
+ PVR_ASSERT(psParentList && psParentList->hParent == hParent);
+ }
+#endif /* defined(DEBUG) */
+
+ psNewEntry->hPrev = psEntry->hPrev;
+ psEntry->hPrev = hNewEntry;
+
+ psNewEntry->hNext = hEntry;
+ psPrevEntry->hNext = hNewEntry;
+
+ psNewEntry->hParent = hParent;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function AdoptChild
+
+ @Description Assign a subhandle to a handle
+
+ @Input psParentData - pointer to handle structure of parent handle
+ psChildData - pointer to handle structure of child subhandle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(AdoptChild)
+#endif
+static INLINE
+PVRSRV_ERROR AdoptChild(PVRSRV_HANDLE_BASE *psBase,
+ HANDLE_DATA *psParentData,
+ HANDLE_DATA *psChildData)
+{
+ IMG_HANDLE hParent = psParentData->sChildren.hParent;
+
+ PVR_ASSERT(hParent == psParentData->hHandle);
+
+ return HandleListInsertBefore(psBase,
+ hParent,
+ &psParentData->sChildren,
+ offsetof(HANDLE_DATA, sChildren),
+ psChildData->hHandle,
+ &psChildData->sSiblings,
+ offsetof(HANDLE_DATA, sSiblings),
+ hParent);
+}
+
+/*!
+******************************************************************************
+
+ @Function HandleListRemove
+
+ @Description Remove a handle from a list
+
+ @Input hEntry - handle to be removed
+ psEntry - pointer to handle structure of item to be removed
+ uiEntryOffset - offset of list item struct in handle structure
+ uiParentOffset - offset to list head struct in handle structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListRemove)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListRemove(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hEntry,
+ HANDLE_LIST *psEntry,
+ size_t uiEntryOffset,
+ size_t uiParentOffset)
+{
+ if (psBase == NULL || psEntry == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (!HandleListIsEmpty(hEntry, psEntry))
+ {
+ HANDLE_LIST *psPrev;
+ HANDLE_LIST *psNext;
+
+ psPrev = GetHandleListFromHandleAndOffset(psBase,
+ psEntry->hPrev,
+ psEntry->hParent,
+ uiParentOffset,
+ uiEntryOffset);
+ if (psPrev == NULL)
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+
+ psNext = GetHandleListFromHandleAndOffset(psBase,
+ psEntry->hNext,
+ psEntry->hParent,
+ uiParentOffset,
+ uiEntryOffset);
+ if (psNext == NULL)
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+
+ /*
+ * The list head is on the list, and we don't want to
+ * remove it.
+ */
+ PVR_ASSERT(psEntry->hParent != NULL);
+
+ psPrev->hNext = psEntry->hNext;
+ psNext->hPrev = psEntry->hPrev;
+
+ HandleListInit(hEntry, psEntry, NULL);
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function UnlinkFromParent
+
+ @Description Remove a subhandle from its parents list
+
+ @Input psHandleData - pointer to handle data structure of child subhandle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(UnlinkFromParent)
+#endif
+static INLINE
+PVRSRV_ERROR UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase,
+ HANDLE_DATA *psHandleData)
+{
+ return HandleListRemove(psBase,
+ psHandleData->hHandle,
+ &psHandleData->sSiblings,
+ offsetof(HANDLE_DATA, sSiblings),
+ offsetof(HANDLE_DATA, sChildren));
+}
+
+/*!
+******************************************************************************
+
+ @Function HandleListIterate
+
+ @Description Iterate over the items in a list
+
+ @Input psHead - pointer to list head
+ uiParentOffset - offset to list head struct in handle structure
+ uiEntryOffset - offset of list item struct in handle structure
+ pfnIterFunc - function to be called for each handle in the list
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIterate)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase,
+ HANDLE_LIST *psHead,
+ size_t uiParentOffset,
+ size_t uiEntryOffset,
+ PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+ IMG_HANDLE hHandle = psHead->hNext;
+ IMG_HANDLE hParent = psHead->hParent;
+ IMG_HANDLE hNext;
+
+ PVR_ASSERT(psHead->hParent != NULL);
+
+ /*
+ * Follow the next chain from the list head until we reach
+ * the list head again, which signifies the end of the list.
+ */
+ while (hHandle != hParent)
+ {
+ HANDLE_LIST *psEntry;
+ PVRSRV_ERROR eError;
+
+ psEntry = GetHandleListFromHandleAndOffset(psBase,
+ hHandle,
+ hParent,
+ uiParentOffset,
+ uiEntryOffset);
+ if (psEntry == NULL)
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+
+ PVR_ASSERT(psEntry->hParent == psHead->hParent);
+
+ /*
+ * Get the next index now, in case the list item is
+ * modified by the iteration function.
+ */
+ hNext = psEntry->hNext;
+
+ eError = (*pfnIterFunc)(psBase, hHandle);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ hHandle = hNext;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function IterateOverChildren
+
+ @Description Iterate over the subhandles of a parent handle
+
+ @Input psParentData - pointer to parent handle structure
+ pfnIterFunc - function to be called for each subhandle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(IterateOverChildren)
+#endif
+static INLINE
+PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase,
+ HANDLE_DATA *psParentData,
+ PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+ return HandleListIterate(psBase,
+ &psParentData->sChildren,
+ offsetof(HANDLE_DATA, sChildren),
+ offsetof(HANDLE_DATA, sSiblings),
+ pfnIterFunc);
+}
+
+/*!
+******************************************************************************
+
+ @Function ParentIfPrivate
+
+ @Description Return the parent handle if the handle was allocated
+ with PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return
+ NULL
+
+ @Input psHandleData - pointer to handle data structure
+
+ @Return Parent handle, or NULL
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentIfPrivate)
+#endif
+static INLINE
+IMG_HANDLE ParentIfPrivate(HANDLE_DATA *psHandleData)
+{
+ return TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
+ ParentHandle(psHandleData) : NULL;
+}
+
+/*!
+******************************************************************************
+
+ @Function InitKey
+
+ @Description Initialise a hash table key for the current process
+
+ @Input psBase - pointer to handle base structure
+ aKey - pointer to key
+ pvData - pointer to the resource the handle represents
+ eType - type of resource
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitKey)
+#endif
+static INLINE
+void InitKey(HAND_KEY aKey,
+ PVRSRV_HANDLE_BASE *psBase,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ IMG_HANDLE hParent)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ aKey[HAND_KEY_DATA] = (uintptr_t)pvData;
+ aKey[HAND_KEY_TYPE] = (uintptr_t)eType;
+ aKey[HAND_KEY_PARENT] = (uintptr_t)hParent;
+}
+
+static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle);
+
+/*!
+******************************************************************************
+
+ @Function FreeHandle
+
+ @Description Free a handle data structure.
+
+ @Input psBase - Pointer to handle base structure
+ hHandle - Handle to be freed
+ eType - Type of the handle to be freed
+ ppvData - Location for data associated with the freed handle
+
+ @Output ppvData - Points to data that was associated with the freed handle
+
+ @Return PVRSRV_OK or PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType,
+ void **ppvData)
+{
+ HANDLE_DATA *psHandleData = NULL;
+ HANDLE_DATA *psReleasedHandleData;
+ PVRSRV_ERROR eError;
+
+ eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ if(_HandleUnref(psHandleData) > 0)
+ {
+ /* this handle still has references so do not destroy it
+ * or the underlying object yet
+ */
+ return PVRSRV_OK;
+ }
+
+ /* Call the release data callback for each reference on the handle */
+ if (psHandleData->pfnReleaseData != NULL)
+ {
+ eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "FreeHandle: "
+ "Got retry while calling release data callback for %p (type = %d)",
+ hHandle,
+ (IMG_UINT32)psHandleData->eType));
+
+ /* the caller should retry, so retain a reference on the handle */
+ _HandleRef(psHandleData);
+
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+ HAND_KEY aKey;
+ IMG_HANDLE hRemovedHandle;
+
+ InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType, ParentIfPrivate(psHandleData));
+
+ hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psBase->psHashTab, aKey);
+
+ PVR_ASSERT(hRemovedHandle != NULL);
+ PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+ PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+ }
+
+ eError = UnlinkFromParent(psBase, psHandleData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "FreeHandle: Error whilst unlinking from parent handle (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* Free children */
+ eError = IterateOverChildren(psBase, psHandleData, FreeHandleWrapper);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "FreeHandle: Error whilst freeing subhandles (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase,
+ psHandleData->hHandle,
+ (void **)&psReleasedHandleData);
+ if (eError == PVRSRV_OK)
+ {
+ PVR_ASSERT(psReleasedHandleData == psHandleData);
+ }
+
+ if (ppvData)
+ {
+ *ppvData = psHandleData->pvData;
+ }
+
+ OSFreeMem(psHandleData);
+
+ return eError;
+}
+
+static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle)
+{
+ return FreeHandle(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE, NULL);
+}
+
+/*!
+******************************************************************************
+
+ @Function FindHandle
+
+ @Description Find handle corresponding to a resource pointer
+
+ @Input psBase - pointer to handle base structure
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+
+ @Return the handle, or NULL if not found
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(FindHandle)
+#endif
+static INLINE
+IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ IMG_HANDLE hParent)
+{
+ HAND_KEY aKey;
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+
+ InitKey(aKey, psBase, pvData, eType, hParent);
+
+ return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
+}
+
+/*!
+******************************************************************************
+
+ @Function AllocHandle
+
+ @Description Allocate a new handle
+
+ @Input phHandle - location for new handle
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+ hParent - parent handle or NULL
+ pfnReleaseData - Function to release resource at handle release
+ time
+
+ @Output phHandle - points to new handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+ IMG_HANDLE hParent,
+ PFN_HANDLE_RELEASE pfnReleaseData)
+{
+ HANDLE_DATA *psNewHandleData;
+ IMG_HANDLE hHandle;
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(psBase != NULL && psBase->psHashTab != NULL);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+ /* Handle must not already exist */
+ PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == NULL);
+ }
+
+ psNewHandleData = OSAllocZMem(sizeof(*psNewHandleData));
+ if (psNewHandleData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't allocate handle data"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ eError = gpsHandleFuncs->pfnAcquireHandle(psBase->psImplBase, &hHandle, psNewHandleData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Failed to acquire a handle"));
+ goto ErrorFreeHandleData;
+ }
+
+ /*
+ * If a data pointer can be associated with multiple handles, we
+ * don't put the handle in the hash table, as the data pointer
+ * may not map to a unique handle
+ */
+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+ HAND_KEY aKey;
+
+ /* Initialise hash key */
+ InitKey(aKey, psBase, pvData, eType, hParent);
+
+ /* Put the new handle in the hash table */
+ if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (uintptr_t)hHandle))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table"));
+ eError = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+ goto ErrorReleaseHandle;
+ }
+ }
+
+ psNewHandleData->hHandle = hHandle;
+ psNewHandleData->eType = eType;
+ psNewHandleData->eFlag = eFlag;
+ psNewHandleData->pvData = pvData;
+ psNewHandleData->pfnReleaseData = pfnReleaseData;
+ psNewHandleData->ui32RefCount = 1;
+
+ InitParentList(psNewHandleData);
+#if defined(DEBUG)
+ PVR_ASSERT(NoChildren(psNewHandleData));
+#endif
+
+ InitChildEntry(psNewHandleData);
+#if defined(DEBUG)
+ PVR_ASSERT(NoParent(psNewHandleData));
+#endif
+
+ /* Return the new handle to the client */
+ *phHandle = psNewHandleData->hHandle;
+
+ return PVRSRV_OK;
+
+ErrorReleaseHandle:
+ (void)gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, hHandle, NULL);
+
+ErrorFreeHandleData:
+ OSFreeMem(psNewHandleData);
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVAllocHandle
+
+ @Description Allocate a handle
+
+ @Input phHandle - location for new handle
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+ pfnReleaseData - Function to release resource at handle release
+ time
+
+ @Output phHandle - points to new handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+ PFN_HANDLE_RELEASE pfnReleaseData)
+{
+ PVRSRV_ERROR eError;
+
+ LockHandle();
+ eError = PVRSRVAllocHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, pfnReleaseData);
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVAllocHandleUnlocked
+
+ @Description Allocate a handle without acquiring/releasing the handle
+ lock. The function assumes you hold the lock when called.
+
+ @Input phHandle - location for new handle
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+ pfnReleaseData - Function to release resource at handle release
+ time
+
+ @Output phHandle - points to new handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+ PFN_HANDLE_RELEASE pfnReleaseData)
+{
+ PVRSRV_ERROR eError;
+
+ *phHandle = NULL;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ if (pfnReleaseData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Missing release function"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, NULL, pfnReleaseData);
+
+Exit:
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVAllocSubHandle
+
+ @Description Allocate a subhandle
+
+ @Input phHandle - location for new subhandle
+ pvData - pointer to resource to be associated with the subhandle
+ eType - the type of resource
+ hParent - parent handle
+
+ @Output phHandle - points to new subhandle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+ IMG_HANDLE hParent)
+{
+ PVRSRV_ERROR eError;
+
+ LockHandle();
+ eError = PVRSRVAllocSubHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, hParent);
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVAllocSubHandleUnlocked
+
+ @Description Allocate a subhandle without acquiring/releasing the
+ handle lock. The function assumes you hold the lock when called.
+
+ @Input phHandle - location for new subhandle
+ pvData - pointer to resource to be associated with the subhandle
+ eType - the type of resource
+ hParent - parent handle
+
+ @Output phHandle - points to new subhandle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+ IMG_HANDLE hParent)
+{
+ HANDLE_DATA *psPHandleData = NULL;
+ HANDLE_DATA *psCHandleData = NULL;
+ IMG_HANDLE hParentKey;
+ IMG_HANDLE hHandle;
+ PVRSRV_ERROR eError;
+
+ *phHandle = NULL;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? hParent : NULL;
+
+ /* Lookup the parent handle */
+ eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+ goto Exit;
+ }
+
+ eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey, NULL);
+ if (eError != PVRSRV_OK)
+ {
+ goto Exit;
+ }
+
+ eError = GetHandleData(psBase, &psCHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+
+ /* If we were able to allocate the handle then there should be no reason why we
+ can't also get it's handle structure. Otherwise something has gone badly wrong. */
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ goto Exit;
+ }
+
+ /*
+ * Get the parent handle structure again, in case the handle
+ * structure has moved (depending on the implementation
+ * of AllocHandle).
+ */
+ eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+
+ (void)FreeHandle(psBase, hHandle, eType, NULL);
+ goto Exit;
+ }
+
+ eError = AdoptChild(psBase, psPHandleData, psCHandleData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Parent handle failed to adopt subhandle"));
+
+ (void)FreeHandle(psBase, hHandle, eType, NULL);
+ goto Exit;
+ }
+
+ *phHandle = hHandle;
+
+ eError = PVRSRV_OK;
+
+Exit:
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVFindHandle
+
+ @Description Find handle corresponding to a resource pointer
+
+ @Input phHandle - location for returned handle
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+
+ @Output phHandle - points to handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ PVRSRV_ERROR eError;
+
+ LockHandle();
+ eError = PVRSRVFindHandleUnlocked(psBase, phHandle, pvData, eType);
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVFindHandleUnlocked
+
+ @Description Find handle corresponding to a resource pointer without
+ acquiring/releasing the handle lock. The function assumes you hold
+ the lock when called.
+
+ @Input phHandle - location for returned handle
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+
+ @Output phHandle - points to handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ IMG_HANDLE hHandle;
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFindHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ /* See if there is a handle for this data pointer */
+ hHandle = FindHandle(psBase, pvData, eType, NULL);
+ if (hHandle == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVFindHandle: Error finding handle. Type %u",
+ eType));
+
+ eError = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+ goto Exit;
+ }
+
+ *phHandle = hHandle;
+
+ eError = PVRSRV_OK;
+
+Exit:
+ return eError;
+
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVLookupHandle
+
+ @Description Lookup the data pointer corresponding to a handle
+
+ @Input ppvData - location to return data pointer
+ hHandle - handle from client
+ eType - handle type
+ bRef - If TRUE, a reference will be added on the handle if the
+ lookup is successful.
+
+ @Output ppvData - points to the data pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+ void **ppvData,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType,
+ IMG_BOOL bRef)
+{
+ PVRSRV_ERROR eError;
+
+ LockHandle();
+ eError = PVRSRVLookupHandleUnlocked(psBase, ppvData, hHandle, eType, bRef);
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVLookupHandleUnlocked
+
+ @Description Lookup the data pointer corresponding to a handle without
+ acquiring/releasing the handle lock. The function assumes you
+ hold the lock when called.
+
+ @Input ppvData - location to return data pointer
+ hHandle - handle from client
+ eType - handle type
+ bRef - If TRUE, a reference will be added on the handle if the
+ lookup is successful.
+
+ @Output ppvData - points to the data pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+ void **ppvData,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType,
+ IMG_BOOL bRef)
+{
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVLookupHandle: Error looking up handle (%s). Handle %p, type %u",
+ PVRSRVGetErrorStringKM(eError),
+ (void*) hHandle,
+ eType));
+#if defined(DEBUG) || defined(PVRSRV_NEED_PVR_DPF)
+ OSDumpStack();
+#endif
+ goto Exit;
+ }
+
+ if(bRef)
+ {
+ _HandleRef(psHandleData);
+ }
+
+ *ppvData = psHandleData->pvData;
+
+ eError = PVRSRV_OK;
+
+Exit:
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVLookupSubHandle
+
+ @Description Lookup the data pointer corresponding to a subhandle
+
+ @Input ppvData - location to return data pointer
+ hHandle - handle from client
+ eType - handle type
+ hAncestor - ancestor handle
+
+ @Output ppvData - points to the data pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ void **ppvData,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType,
+ IMG_HANDLE hAncestor)
+{
+ HANDLE_DATA *psPHandleData = NULL;
+ HANDLE_DATA *psCHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ LockHandle();
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ExitUnlock;
+ }
+
+ eError = GetHandleData(psBase, &psCHandleData, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVLookupSubHandle: Error looking up subhandle (%s). Handle %p, type %u",
+ PVRSRVGetErrorStringKM(eError),
+ (void*) hHandle,
+ eType));
+ OSDumpStack();
+ goto ExitUnlock;
+ }
+
+ /* Look for hAncestor among the handle's ancestors */
+ for (psPHandleData = psCHandleData; ParentHandle(psPHandleData) != hAncestor; )
+ {
+ eError = GetHandleData(psBase, &psPHandleData, ParentHandle(psPHandleData), PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor"));
+ eError = PVRSRV_ERROR_INVALID_SUBHANDLE;
+ goto ExitUnlock;
+ }
+ }
+
+ *ppvData = psCHandleData->pvData;
+
+ eError = PVRSRV_OK;
+
+ExitUnlock:
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVGetParentHandle
+
+ @Description Lookup the parent of a handle
+
+ @Input phParent - location for returning parent handle
+ hHandle - handle for which the parent handle is required
+ eType - handle type
+ hParent - parent handle
+
+ @Output *phParent - parent handle, or NULL if there is no parent
+
+ @Return Error code or PVRSRV_OK. Note that not having a parent is
+ not regarded as an error.
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phParent,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ LockHandle();
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ExitUnlock;
+ }
+
+ eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVGetParentHandle: Error looking up subhandle (%s). Type %u",
+ PVRSRVGetErrorStringKM(eError),
+ eType));
+ OSDumpStack();
+ goto ExitUnlock;
+ }
+
+ *phParent = ParentHandle(psHandleData);
+
+ eError = PVRSRV_OK;
+
+ExitUnlock:
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVReleaseHandle
+
+ @Description Release a handle that is no longer needed
+
+ @Input hHandle - handle from client
+ eType - handle type
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ PVRSRV_ERROR eError;
+
+ LockHandle();
+ eError = PVRSRVReleaseHandleUnlocked(psBase, hHandle, eType);
+ UnlockHandle();
+
+ return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVReleaseHandleUnlocked
+
+ @Description Release a handle that is no longer needed without
+ acquiring/releasing the handle lock. The function assumes you
+ hold the lock when called.
+
+ @Input hHandle - handle from client
+ eType - handle type
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ eError = FreeHandle(psBase, hHandle, eType, NULL);
+
+Exit:
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVPurgeHandles
+
+ @Description Purge handles for a given handle base
+
+ @Input psBase - pointer to handle base structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ LockHandle();
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ExitUnlock;
+ }
+
+ eError = gpsHandleFuncs->pfnPurgeHandles(psBase->psImplBase);
+
+ExitUnlock:
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVAllocHandleBase
+
+ @Description Allocate a handle base structure for a process
+
+ @Input ppsBase - pointer to handle base structure pointer
+
+ @Output ppsBase - points to handle base structure pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase,
+ PVRSRV_HANDLE_BASE_TYPE eType)
+{
+ PVRSRV_HANDLE_BASE *psBase;
+ PVRSRV_ERROR eError;
+
+ if (gpsHandleFuncs == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Handle management not initialised"));
+ return PVRSRV_ERROR_NOT_READY;
+ }
+
+ LockHandle();
+
+ if (ppsBase == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ErrorUnlock;
+ }
+
+ psBase = OSAllocZMem(sizeof(*psBase));
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorUnlock;
+ }
+
+ psBase->eType = eType;
+
+ eError = gpsHandleFuncs->pfnCreateHandleBase(&psBase->psImplBase);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorFreeHandleBase;
+ }
+
+ psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE,
+ sizeof(HAND_KEY),
+ HASH_Func_Default,
+ HASH_Key_Comp_Default);
+ if (psBase->psHashTab == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table"));
+ eError = PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE;
+ goto ErrorDestroyHandleBase;
+ }
+
+ *ppsBase = psBase;
+
+ UnlockHandle();
+
+ return PVRSRV_OK;
+
+ErrorDestroyHandleBase:
+ (void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+
+ErrorFreeHandleBase:
+ OSFreeMem(psBase);
+
+ErrorUnlock:
+ UnlockHandle();
+
+ return eError;
+}
+
+#if defined(DEBUG)
+typedef struct _COUNT_HANDLE_DATA_
+{
+ PVRSRV_HANDLE_BASE *psBase;
+ IMG_UINT32 uiHandleDataCount;
+} COUNT_HANDLE_DATA;
+
+/* Used to count the number of handles that have data associated with them */
+static PVRSRV_ERROR CountHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+ COUNT_HANDLE_DATA *psData = (COUNT_HANDLE_DATA *)pvData;
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psData == NULL ||
+ psData->psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CountHandleDataWrapper: Missing free data"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = GetHandleData(psData->psBase,
+ &psHandleData,
+ hHandle,
+ PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CountHandleDataWrapper: Couldn't get handle data for handle"));
+ return eError;
+ }
+
+ if (psHandleData != NULL)
+ {
+ psData->uiHandleDataCount++;
+ }
+
+ return PVRSRV_OK;
+}
+
+/* Print a handle in the handle base. Used with the iterator callback. */
+static PVRSRV_ERROR ListHandlesInBase(IMG_HANDLE hHandle, void *pvData)
+{
+ PVRSRV_HANDLE_BASE *psBase = (PVRSRV_HANDLE_BASE*) pvData;
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Missing base", __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = GetHandleData(psBase,
+ &psHandleData,
+ hHandle,
+ PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't get handle data for handle", __func__));
+ return eError;
+ }
+
+ if (psHandleData != NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, " Handle: %6u, Type: %3u, Refs: %3u",
+ (IMG_UINT32) (uintptr_t) psHandleData->hHandle,
+ psHandleData->eType,
+ psHandleData->ui32RefCount));
+
+ }
+
+ return PVRSRV_OK;
+}
+
+
+
+#endif /* defined(DEBUG) */
+
+typedef struct FREE_HANDLE_DATA_TAG
+{
+ PVRSRV_HANDLE_BASE *psBase;
+ PVRSRV_HANDLE_TYPE eHandleFreeType;
+ /* timing data (ns) to release bridge lock upon the deadline */
+ IMG_UINT64 ui64TimeStart;
+ IMG_UINT64 ui64MaxBridgeTime;
+} FREE_HANDLE_DATA;
+
+static INLINE IMG_BOOL _CheckIfMaxTimeExpired(IMG_UINT64 ui64TimeStart, IMG_UINT64 ui64MaxBridgeTime)
+{
+ IMG_UINT64 ui64Diff;
+ IMG_UINT64 ui64Now = OSClockns64();
+
+ if(ui64Now >= ui64TimeStart)
+ {
+ ui64Diff = ui64Now - ui64TimeStart;
+ }
+ else
+ {
+ /* time has wrapped around */
+ ui64Diff = (0xFFFFFFFFFFFFFFFF - ui64TimeStart) + ui64Now;
+ }
+
+ return ui64Diff >= ui64MaxBridgeTime;
+}
+
+static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+ FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)pvData;
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psData == NULL ||
+ psData->psBase == NULL ||
+ psData->eHandleFreeType == PVRSRV_HANDLE_TYPE_NONE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeHandleDataWrapper: Missing free data"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = GetHandleData(psData->psBase,
+ &psHandleData,
+ hHandle,
+ PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeHandleDataWrapper: Couldn't get handle data for handle"));
+ return eError;
+ }
+
+ if (psHandleData == NULL || psHandleData->eType != psData->eHandleFreeType)
+ {
+ return PVRSRV_OK;
+ }
+
+ PVR_ASSERT(psHandleData->ui32RefCount > 0);
+
+ while (psHandleData->ui32RefCount != 0)
+ {
+ if (psHandleData->pfnReleaseData != NULL)
+ {
+ eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "FreeHandleDataWrapper: "
+ "Got retry while calling release data callback for %p (type = %d)",
+ hHandle,
+ (IMG_UINT32)psHandleData->eType));
+
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ _HandleUnref(psHandleData);
+ }
+
+ if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+ HAND_KEY aKey;
+ IMG_HANDLE hRemovedHandle;
+
+ InitKey(aKey,
+ psData->psBase,
+ psHandleData->pvData,
+ psHandleData->eType,
+ ParentIfPrivate(psHandleData));
+
+ hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psData->psBase->psHashTab, aKey);
+
+ PVR_ASSERT(hRemovedHandle != NULL);
+ PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+ PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+ }
+
+ eError = gpsHandleFuncs->pfnSetHandleData(psData->psBase->psImplBase, hHandle, NULL);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ OSFreeMem(psHandleData);
+
+ /* If we reach the end of the time slice release we can release the global
+ * lock, invoke the scheduler and reacquire the lock */
+ if((psData->ui64MaxBridgeTime != 0) && _CheckIfMaxTimeExpired(psData->ui64TimeStart, psData->ui64MaxBridgeTime))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "FreeResourceByCriteria: Lock timeout (timeout: %llu)",
+ psData->ui64MaxBridgeTime));
+ UnlockHandle();
+ OSReleaseBridgeLock();
+ /* Invoke the scheduler to check if other processes are waiting for the lock */
+ OSReleaseThreadQuanta();
+ OSAcquireBridgeLock();
+ LockHandle();
+ /* Set again lock timeout and reset the counter */
+ psData->ui64TimeStart = OSClockns64();
+ PVR_DPF((PVR_DBG_MESSAGE, "FreeResourceByCriteria: Lock acquired again"));
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] =
+{
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK,
+ PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_EXPORT,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE,
+ PVRSRV_HANDLE_TYPE_DC_BUFFER,
+ PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT,
+ PVRSRV_HANDLE_TYPE_DC_DEVICE,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP
+};
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVFreeHandleBase
+
+ @Description Free a handle base structure
+
+ @Input psBase - pointer to handle base structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime)
+{
+#if defined(DEBUG)
+ COUNT_HANDLE_DATA sCountData = { 0 };
+#endif
+ FREE_HANDLE_DATA sHandleData = { 0 };
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ LockHandle();
+
+ sHandleData.psBase = psBase;
+ sHandleData.ui64TimeStart = OSClockns64();
+ sHandleData.ui64MaxBridgeTime = ui64MaxBridgeTime;
+
+
+#if defined(DEBUG)
+
+ sCountData.psBase = psBase;
+
+ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+ &CountHandleDataWrapper,
+ (void *)&sCountData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVFreeHandleBase: Failed to perform handle count (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto ExitUnlock;
+ }
+
+ if (sCountData.uiHandleDataCount != 0)
+ {
+ IMG_BOOL bList = sCountData.uiHandleDataCount < HANDLE_DEBUG_LISTING_MAX_NUM;
+
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: %u remaining handles in handle base 0x%p "
+ "(PVRSRV_HANDLE_BASE_TYPE %u). %s",
+ __func__,
+ sCountData.uiHandleDataCount,
+ psBase,
+ psBase->eType,
+ bList ? "Check handle.h for a type reference":
+ "Skipping details, too many items..."));
+
+ if (bList)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "-------- Listing Handles --------"));
+ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+ &ListHandlesInBase,
+ psBase);
+ PVR_DPF((PVR_DBG_WARNING, "-------- Done Listing --------"));
+ }
+ }
+
+#endif /* defined(DEBUG) */
+
+ /*
+ * As we're freeing handles based on type, make sure all
+ * handles have actually had their data freed to avoid
+ * resources being leaked
+ */
+ for (i = 0; i < ARRAY_SIZE(g_aeOrderedFreeList); i++)
+ {
+ sHandleData.eHandleFreeType = g_aeOrderedFreeList[i];
+
+ /* Make sure all handles have been freed before destroying the handle base */
+ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+ &FreeHandleDataWrapper,
+ (void *)&sHandleData);
+ if (eError != PVRSRV_OK)
+ {
+ goto ExitUnlock;
+ }
+ }
+
+
+ if (psBase->psHashTab != NULL)
+ {
+ HASH_Delete(psBase->psHashTab);
+ }
+
+ eError = gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+ if (eError != PVRSRV_OK)
+ {
+ goto ExitUnlock;
+ }
+
+ OSFreeMem(psBase);
+
+ eError = PVRSRV_OK;
+
+ExitUnlock:
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVHandleInit
+
+ @Description Initialise handle management
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsKernelHandleBase == NULL);
+ PVR_ASSERT(gpsHandleFuncs == NULL);
+ PVR_ASSERT(!gbLockInitialised);
+
+ eError = OSLockCreate(&gHandleLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVHandleInit: Creation of handle global lock failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+ gbLockInitialised = IMG_TRUE;
+
+ eError = PVRSRVHandleGetFuncTable(&gpsHandleFuncs);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVHandleInit: PVRSRVHandleGetFuncTable failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto ErrorHandleDeinit;
+ }
+
+ eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase,
+ PVRSRV_HANDLE_BASE_TYPE_GLOBAL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto ErrorHandleDeinit;
+ }
+
+ eError = gpsHandleFuncs->pfnEnableHandlePurging(gpsKernelHandleBase->psImplBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVHandleInit: PVRSRVEnableHandlePurging failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto ErrorHandleDeinit;
+ }
+
+ return PVRSRV_OK;
+
+ErrorHandleDeinit:
+ (void) PVRSRVHandleDeInit();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVHandleDeInit
+
+ @Description De-initialise handle management
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleDeInit(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (gpsHandleFuncs != NULL)
+ {
+ if (gpsKernelHandleBase != NULL)
+ {
+ eError = PVRSRVFreeHandleBase(gpsKernelHandleBase, 0 /* do not release bridge lock */);
+ if (eError == PVRSRV_OK)
+ {
+ gpsKernelHandleBase = NULL;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVHandleDeInit: FreeHandleBase failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ }
+
+ if (eError == PVRSRV_OK)
+ {
+ gpsHandleFuncs = NULL;
+ }
+ }
+ else
+ {
+ /* If we don't have a handle function table we shouldn't have a handle base either */
+ PVR_ASSERT(gpsKernelHandleBase == NULL);
+ }
+
+ if (gbLockInitialised)
+ {
+ OSLockDestroy(gHandleLock);
+ gbLockInitialised = IMG_FALSE;
+ }
+
+ return eError;
+}
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Handle Manager API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provide handle management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__HANDLE_H__)
+#define __HANDLE_H__
+
+/*
+ * Handle API
+ * ----------
+ * The handle API is intended to provide handles for kernel resources,
+ * which can then be passed back to user space processes.
+ *
+ * The following functions comprise the API. Each function takes a
+ * pointer to a PVRSRV_HANDLE_BASE strcture, one of which is allocated
+ * for each process, and stored in the per-process data area. Use
+ * KERNEL_HANDLE_BASE for handles not allocated for a particular process,
+ * or for handles that need to be allocated before the PVRSRV_HANDLE_BASE
+ * structure for the process is available.
+ *
+ * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ * PVRSRV_HANDLE_ALLOC_FLAG eFlag);
+ *
+ * Allocate a handle phHandle, for the resource of type eType pointed to by
+ * pvData.
+ *
+ * For handles that have a definite lifetime, where the corresponding
+ * resource is explicitly created and destroyed, eFlag should be zero.
+ *
+ * If a particular resource may be referenced multiple times by a
+ * given process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ * will allow multiple handles to be allocated for the resource.
+ * Such handles cannot be found with PVRSRVFindHandle.
+ *
+ * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ * PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+ *
+ * This function is similar to PVRSRVAllocHandle, except that the allocated
+ * handles are associated with a parent handle, hParent, that has been
+ * allocated previously. Subhandles are automatically deallocated when their
+ * parent handle is deallocated.
+ * Subhandles can be treated as ordinary handles. For example, they may
+ * have subhandles of their own, and may be explicity deallocated using
+ * PVRSRVReleaseHandle (see below).
+ *
+ * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Find the handle previously allocated for the resource pointed to by
+ * pvData, of type eType. Handles allocated with the flag
+ * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this
+ * function.
+ *
+ * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+ * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Given a handle for a resource of type eType, return the pointer to the
+ * resource.
+ *
+ * PVRSRV_ERROR PVRSRVLookuSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType,
+ * IMH_HANDLE hAncestor);
+ *
+ * Similar to PVRSRVLookupHandle, but checks the handle is a descendant
+ * of hAncestor.
+ *
+ * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+ * IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Deallocate a handle of given type.
+ *
+ * PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase,
+ * void **phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Return the parent of a handle in *phParent, or NULL if the handle has
+ * no parent.
+ */
+
+#include "img_types.h"
+#include "hash.h"
+
+typedef enum
+{
+ PVRSRV_HANDLE_TYPE_NONE = 0,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST,
+ PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_EXPORT,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+ PVRSRV_HANDLE_TYPE_DC_DEVICE,
+ PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT,
+ PVRSRV_HANDLE_TYPE_DC_BUFFER,
+ PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP,
+ PVRSRV_HANDLE_TYPE_WORKEST_RETURN_DATA
+} PVRSRV_HANDLE_TYPE;
+
+typedef enum
+{
+ PVRSRV_HANDLE_BASE_TYPE_CONNECTION,
+ PVRSRV_HANDLE_BASE_TYPE_PROCESS,
+ PVRSRV_HANDLE_BASE_TYPE_GLOBAL
+} PVRSRV_HANDLE_BASE_TYPE;
+
+
+typedef enum
+{
+ /* No flags */
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0,
+ /* Muliple handles can point at the given data pointer */
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x01,
+ /* Subhandles are allocated in a private handle space */
+ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x02
+} PVRSRV_HANDLE_ALLOC_FLAG;
+
+typedef struct _HANDLE_BASE_ PVRSRV_HANDLE_BASE;
+
+typedef struct _PROCESS_HANDLE_BASE_
+{
+ PVRSRV_HANDLE_BASE *psHandleBase;
+ ATOMIC_T iRefCount;
+
+} PROCESS_HANDLE_BASE;
+
+extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
+#define KERNEL_HANDLE_BASE (gpsKernelHandleBase)
+
+#define HANDLE_DEBUG_LISTING_MAX_NUM 20
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_RELEASE)(void *pvData);
+
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData);
+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData);
+
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef);
+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef);
+
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
+
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase,
+ PVRSRV_HANDLE_BASE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime);
+
+PVRSRV_ERROR PVRSRVHandleInit(void);
+
+PVRSRV_ERROR PVRSRVHandleDeInit(void);
+
+void LockHandle(void);
+void UnlockHandle(void);
+
+
+#endif /* !defined(__HANDLE_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Resource Handle Manager - IDR Back-end
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provide IDR based resource handle management back-end
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#define ID_VALUE_MIN 1
+#define ID_VALUE_MAX INT_MAX
+
+#define ID_TO_HANDLE(i) ((IMG_HANDLE)(uintptr_t)(i))
+#define HANDLE_TO_ID(h) ((IMG_INT)(uintptr_t)(h))
+
+struct _HANDLE_IMPL_BASE_
+{
+ struct idr sIdr;
+
+ IMG_UINT32 ui32MaxHandleValue;
+
+ IMG_UINT32 ui32TotalHandCount;
+};
+
+typedef struct _HANDLE_ITER_DATA_WRAPPER_
+{
+ PFN_HANDLE_ITER pfnHandleIter;
+ void *pvHandleIterData;
+} HANDLE_ITER_DATA_WRAPPER;
+
+
+static int HandleIterFuncWrapper(int id, void *data, void *iter_data)
+{
+ HANDLE_ITER_DATA_WRAPPER *psIterData = (HANDLE_ITER_DATA_WRAPPER *)iter_data;
+
+ PVR_UNREFERENCED_PARAMETER(data);
+
+ return (int)psIterData->pfnHandleIter(ID_TO_HANDLE(id), psIterData->pvHandleIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function AcquireHandle
+
+ @Description Acquire a new handle
+
+ @Input psBase - Pointer to handle base structure
+ phHandle - Points to a handle pointer
+ pvData - Pointer to resource to be associated with the handle
+
+ @Output phHandle - Points to a handle pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData)
+{
+ int id;
+ int result;
+
+ PVR_ASSERT(psBase != NULL);
+ PVR_ASSERT(phHandle != NULL);
+ PVR_ASSERT(pvData != NULL);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+ idr_preload(GFP_KERNEL);
+ id = idr_alloc(&psBase->sIdr, pvData, ID_VALUE_MIN, psBase->ui32MaxHandleValue + 1, 0);
+ idr_preload_end();
+
+ result = id;
+#else
+ do
+ {
+ if (idr_pre_get(&psBase->sIdr, GFP_KERNEL) == 0)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ result = idr_get_new_above(&psBase->sIdr, pvData, ID_VALUE_MIN, &id);
+ } while (result == -EAGAIN);
+
+ if ((IMG_UINT32)id > psBase->ui32MaxHandleValue)
+ {
+ idr_remove(&psBase->sIdr, id);
+ result = -ENOSPC;
+ }
+#endif
+
+ if (result < 0)
+ {
+ if (result == -ENOSPC)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Limit of %u handles reached",
+ __FUNCTION__, psBase->ui32MaxHandleValue));
+
+ return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+ }
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psBase->ui32TotalHandCount++;
+
+ *phHandle = ID_TO_HANDLE(id);
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function ReleaseHandle
+
+ @Description Release a handle that is no longer needed.
+
+ @Input psBase - Pointer to handle base structure
+ hHandle - Handle to release
+ ppvData - Points to a void data pointer
+
+ @Output ppvData - Points to a void data pointer
+
+ @Return PVRSRV_OK or PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase,
+ IMG_HANDLE hHandle,
+ void **ppvData)
+{
+ int id = HANDLE_TO_ID(hHandle);
+ void *pvData;
+
+ PVR_ASSERT(psBase);
+
+ /* Get the data associated with the handle. If we get back NULL then
+ it's an invalid handle */
+
+ pvData = idr_find(&psBase->sIdr, id);
+ if (pvData)
+ {
+ idr_remove(&psBase->sIdr, id);
+ psBase->ui32TotalHandCount--;
+ }
+
+ if (pvData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Handle out of range (%u > %u)",
+ __FUNCTION__, id, psBase->ui32TotalHandCount));
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+
+ if (ppvData)
+ {
+ *ppvData = pvData;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function GetHandleData
+
+ @Description Get the data associated with the given handle
+
+ @Input psBase - Pointer to handle base structure
+ hHandle - Handle from which data should be retrieved
+ ppvData - Points to a void data pointer
+
+ @Output ppvData - Points to a void data pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR GetHandleData(HANDLE_IMPL_BASE *psBase,
+ IMG_HANDLE hHandle,
+ void **ppvData)
+{
+ int id = HANDLE_TO_ID(hHandle);
+ void *pvData;
+
+ PVR_ASSERT(psBase);
+ PVR_ASSERT(ppvData);
+
+ pvData = idr_find(&psBase->sIdr, id);
+ if (pvData)
+ {
+ *ppvData = pvData;
+
+ return PVRSRV_OK;
+ }
+ else
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+}
+
+/*!
+******************************************************************************
+
+ @Function SetHandleData
+
+ @Description Set the data associated with the given handle
+
+ @Input psBase - Pointer to handle base structure
+ hHandle - Handle for which data should be changed
+ pvData - Pointer to new data to be associated with the handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR SetHandleData(HANDLE_IMPL_BASE *psBase,
+ IMG_HANDLE hHandle,
+ void *pvData)
+{
+ int id = HANDLE_TO_ID(hHandle);
+ void *pvOldData;
+
+ PVR_ASSERT(psBase);
+
+ pvOldData = idr_replace(&psBase->sIdr, pvData, id);
+ if (IS_ERR(pvOldData))
+ {
+ if (PTR_ERR(pvOldData) == -ENOENT)
+ {
+ return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED;
+ }
+ else
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR IterateOverHandles(HANDLE_IMPL_BASE *psBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData)
+{
+ HANDLE_ITER_DATA_WRAPPER sIterData;
+
+ PVR_ASSERT(psBase);
+ PVR_ASSERT(pfnHandleIter);
+
+ sIterData.pfnHandleIter = pfnHandleIter;
+ sIterData.pvHandleIterData = pvHandleIterData;
+
+ return (PVRSRV_ERROR)idr_for_each(&psBase->sIdr, HandleIterFuncWrapper, &sIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function EnableHandlePurging
+
+ @Description Enable purging for a given handle base
+
+ @Input psBase - pointer to handle base structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR EnableHandlePurging(HANDLE_IMPL_BASE *psBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+ PVR_ASSERT(psBase);
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PurgeHandles
+
+ @Description Purge handles for a given handle base
+
+ @Input psBase - Pointer to handle base structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR PurgeHandles(HANDLE_IMPL_BASE *psBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+ PVR_ASSERT(psBase);
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function CreateHandleBase
+
+ @Description Create a handle base structure
+
+ @Input ppsBase - pointer to handle base structure pointer
+
+ @Output ppsBase - points to handle base structure pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR CreateHandleBase(HANDLE_IMPL_BASE **ppsBase)
+{
+ HANDLE_IMPL_BASE *psBase;
+
+ PVR_ASSERT(ppsBase);
+
+ psBase = OSAllocZMem(sizeof(*psBase));
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate generic handle base", __FUNCTION__));
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ idr_init(&psBase->sIdr);
+
+ psBase->ui32MaxHandleValue = ID_VALUE_MAX;
+ psBase->ui32TotalHandCount = 0;
+
+ *ppsBase = psBase;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function DestroyHandleBase
+
+ @Description Destroy a handle base structure
+
+ @Input psBase - pointer to handle base structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase)
+{
+ PVR_ASSERT(psBase);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+ idr_remove_all(&psBase->sIdr);
+#endif
+
+ /* Finally destroy the idr */
+ idr_destroy(&psBase->sIdr);
+
+ OSFreeMem(psBase);
+
+ return PVRSRV_OK;
+}
+
+
+static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab =
+{
+ .pfnAcquireHandle = AcquireHandle,
+ .pfnReleaseHandle = ReleaseHandle,
+ .pfnGetHandleData = GetHandleData,
+ .pfnSetHandleData = SetHandleData,
+ .pfnIterateOverHandles = IterateOverHandles,
+ .pfnEnableHandlePurging = EnableHandlePurging,
+ .pfnPurgeHandles = PurgeHandles,
+ .pfnCreateHandleBase = CreateHandleBase,
+ .pfnDestroyHandleBase = DestroyHandleBase
+};
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs)
+{
+ static IMG_BOOL bAcquired = IMG_FALSE;
+
+ if (bAcquired)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Function table already acquired",
+ __FUNCTION__));
+ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ }
+
+ if (ppsFuncs == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *ppsFuncs = &g_sHandleFuncTab;
+
+ bAcquired = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Implementation Callbacks for Handle Manager API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the handle manager API. This file is for declarations
+ and definitions that are private/internal to the handle manager
+ API but need to be shared between the generic handle manager
+ code and the various handle manager backends, i.e. the code that
+ implements the various callbacks.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__HANDLE_IMPL_H__)
+#define __HANDLE_IMPL_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+typedef struct _HANDLE_IMPL_BASE_ HANDLE_IMPL_BASE;
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_ITER)(IMG_HANDLE hHandle, void *pvData);
+
+typedef struct _HANDLE_IMPL_FUNCTAB_
+{
+ /* Acquire a new handle which is associated with the given data */
+ PVRSRV_ERROR (*pfnAcquireHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE *phHandle, void *pvData);
+
+ /* Release the given handle (optionally returning the data associated with it) */
+ PVRSRV_ERROR (*pfnReleaseHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+ /* Get the data associated with the given handle */
+ PVRSRV_ERROR (*pfnGetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+ /* Set the data associated with the given handle */
+ PVRSRV_ERROR (*pfnSetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void *pvData);
+
+ PVRSRV_ERROR (*pfnIterateOverHandles)(HANDLE_IMPL_BASE *psHandleBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData);
+
+ /* Enable handle purging on the given handle base */
+ PVRSRV_ERROR (*pfnEnableHandlePurging)(HANDLE_IMPL_BASE *psHandleBase);
+
+ /* Purge handles on the given handle base */
+ PVRSRV_ERROR (*pfnPurgeHandles)(HANDLE_IMPL_BASE *psHandleBase);
+
+ /* Create handle base */
+ PVRSRV_ERROR (*pfnCreateHandleBase)(HANDLE_IMPL_BASE **psHandleBase);
+
+ /* Destroy handle base */
+ PVRSRV_ERROR (*pfnDestroyHandleBase)(HANDLE_IMPL_BASE *psHandleBase);
+} HANDLE_IMPL_FUNCTAB;
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs);
+
+#endif /* !defined(__HANDLE_IMPL_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Self scaling hash tables.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+ Implements simple self scaling hash tables. Hash collisions are
+ handled by chaining entries together. Hash tables are increased in
+ size when they become more than (50%?) full and decreased in size
+ when less than (25%?) full. Hash tables are never decreased below
+ their initial size.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* include/ */
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/shared/include/ */
+#include "hash.h"
+
+/* services/client/include/ or services/server/include/ */
+#include "osfunc.h"
+#include "allocmem.h"
+
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b))
+
+#define KEY_TO_INDEX(pHash, key, uSize) \
+ ((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize))
+
+#define KEY_COMPARE(pHash, pKey1, pKey2) \
+ ((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2)))
+
+/* Each entry in a hash table is placed into a bucket */
+struct _BUCKET_
+{
+ /* the next bucket on the same chain */
+ struct _BUCKET_ *pNext;
+
+ /* entry value */
+ uintptr_t v;
+
+ /* entry key */
+#if defined (WIN32)
+ uintptr_t k[1];
+#else
+ uintptr_t k[]; /* PRQA S 0642 */ /* override dynamic array declaration warning */
+#endif
+};
+typedef struct _BUCKET_ BUCKET;
+
+struct _HASH_TABLE_
+{
+ /* current size of the hash table */
+ IMG_UINT32 uSize;
+
+ /* number of entries currently in the hash table */
+ IMG_UINT32 uCount;
+
+ /* the minimum size that the hash table should be re-sized to */
+ IMG_UINT32 uMinimumSize;
+
+ /* size of key in bytes */
+ IMG_UINT32 uKeySize;
+
+ /* hash function */
+ HASH_FUNC *pfnHashFunc;
+
+ /* key comparison function */
+ HASH_KEY_COMP *pfnKeyComp;
+
+ /* the hash table array */
+ BUCKET **ppBucketTable;
+};
+
+/*************************************************************************/ /*!
+@Function HASH_Func_Default
+@Description Hash function intended for hashing keys composed of
+ uintptr_t arrays.
+@Input uKeySize The size of the hash key, in bytes.
+@Input pKey A pointer to the key to hash.
+@Input uHashTabLen The length of the hash table.
+@Return The hash value.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_UINT32
+HASH_Func_Default (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+ uintptr_t *p = (uintptr_t *)pKey;
+ IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t);
+ IMG_UINT32 ui;
+ IMG_UINT32 uHashKey = 0;
+
+ PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+ PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0);
+
+ for (ui = 0; ui < uKeyLen; ui++)
+ {
+ IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
+
+ uHashPart += (uHashPart << 12);
+ uHashPart ^= (uHashPart >> 22);
+ uHashPart += (uHashPart << 4);
+ uHashPart ^= (uHashPart >> 9);
+ uHashPart += (uHashPart << 10);
+ uHashPart ^= (uHashPart >> 2);
+ uHashPart += (uHashPart << 7);
+ uHashPart ^= (uHashPart >> 12);
+
+ uHashKey += uHashPart;
+ }
+
+ return uHashKey;
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Key_Comp_Default
+@Description Compares keys composed of uintptr_t arrays.
+@Input uKeySize The size of the hash key, in bytes.
+@Input pKey1 Pointer to first hash key to compare.
+@Input pKey2 Pointer to second hash key to compare.
+@Return IMG_TRUE The keys match.
+ IMG_FALSE The keys don't match.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Key_Comp_Default (size_t uKeySize, void *pKey1, void *pKey2)
+{
+ uintptr_t *p1 = (uintptr_t *)pKey1;
+ uintptr_t *p2 = (uintptr_t *)pKey2;
+ IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t);
+ IMG_UINT32 ui;
+
+ PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0);
+
+ for (ui = 0; ui < uKeyLen; ui++)
+ {
+ if (*p1++ != *p2++)
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function _ChainInsert
+@Description Insert a bucket into the appropriate hash table chain.
+@Input pBucket The bucket
+@Input ppBucketTable The hash table
+@Input uSize The size of the hash table
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static void
+_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
+{
+ IMG_UINT32 uIndex;
+
+ /* We assume that all parameters passed by the caller are valid. */
+ PVR_ASSERT (pBucket != NULL);
+ PVR_ASSERT (ppBucketTable != NULL);
+ PVR_ASSERT (uSize != 0);
+
+ uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize); /* PRQA S 0432,0541 */ /* ignore dynamic array warning */
+ pBucket->pNext = ppBucketTable[uIndex];
+ ppBucketTable[uIndex] = pBucket;
+}
+
+/*************************************************************************/ /*!
+@Function _Rehash
+@Description Iterate over every entry in an old hash table and
+ rehash into the new table.
+@Input ppOldTable The old hash table
+@Input uOldSize The size of the old hash table
+@Input ppNewTable The new hash table
+@Input uNewSize The size of the new hash table
+@Return None
+*/ /**************************************************************************/
+static void
+_Rehash (HASH_TABLE *pHash,
+ BUCKET **ppOldTable, IMG_UINT32 uOldSize,
+ BUCKET **ppNewTable, IMG_UINT32 uNewSize)
+{
+ IMG_UINT32 uIndex;
+ for (uIndex=0; uIndex< uOldSize; uIndex++)
+ {
+ BUCKET *pBucket;
+ pBucket = ppOldTable[uIndex];
+ while (pBucket != NULL)
+ {
+ BUCKET *pNextBucket = pBucket->pNext;
+ _ChainInsert (pHash, pBucket, ppNewTable, uNewSize);
+ pBucket = pNextBucket;
+ }
+ }
+}
+
+/*************************************************************************/ /*!
+@Function _Resize
+@Description Attempt to resize a hash table, failure to allocate a
+ new larger hash table is not considered a hard failure.
+ We simply continue and allow the table to fill up, the
+ effect is to allow hash chains to become longer.
+@Input pHash Hash table to resize.
+@Input uNewSize Required table size.
+@Return IMG_TRUE Success
+ IMG_FALSE Failed
+*/ /**************************************************************************/
+static IMG_BOOL
+_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize)
+{
+ if (uNewSize != pHash->uSize)
+ {
+ BUCKET **ppNewTable;
+ IMG_UINT32 uIndex;
+
+#if defined(__linux__) && defined(__KERNEL__)
+ ppNewTable = OSAllocMemNoStats(sizeof (BUCKET *) * uNewSize);
+#else
+ ppNewTable = OSAllocMem(sizeof (BUCKET *) * uNewSize);
+#endif
+ if (ppNewTable == NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ for (uIndex=0; uIndex<uNewSize; uIndex++)
+ ppNewTable[uIndex] = NULL;
+
+ _Rehash(pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize);
+
+#if defined(__linux__) && defined(__KERNEL__)
+ OSFreeMemNoStats(pHash->ppBucketTable);
+#else
+ OSFreeMem(pHash->ppBucketTable);
+#endif
+ /*not nulling pointer, being reassigned just below*/
+ pHash->ppBucketTable = ppNewTable;
+ pHash->uSize = uNewSize;
+ }
+ return IMG_TRUE;
+}
+
+
+/*************************************************************************/ /*!
+@Function HASH_Create_Extended
+@Description Create a self scaling hash table, using the supplied
+ key size, and the supplied hash and key comparsion
+ functions.
+@Input uInitialLen Initial and minimum length of the
+ hash table, where the length refers to the number
+ of entries in the hash table, not its size in
+ bytes.
+@Input uKeySize The size of the key, in bytes.
+@Input pfnHashFunc Pointer to hash function.
+@Input pfnKeyComp Pointer to key comparsion function.
+@Return NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
+{
+ HASH_TABLE *pHash;
+ IMG_UINT32 uIndex;
+
+ if (uInitialLen == 0 || uKeySize == 0 || pfnHashFunc == NULL || pfnKeyComp == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HASH_Create_Extended: invalid input parameters"));
+ return NULL;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen));
+
+#if defined(__linux__) && defined(__KERNEL__)
+ pHash = OSAllocMemNoStats(sizeof(HASH_TABLE));
+#else
+ pHash = OSAllocMem(sizeof(HASH_TABLE));
+#endif
+ if (pHash == NULL)
+ {
+ return NULL;
+ }
+
+ pHash->uCount = 0;
+ pHash->uSize = uInitialLen;
+ pHash->uMinimumSize = uInitialLen;
+ pHash->uKeySize = uKeySize;
+ pHash->pfnHashFunc = pfnHashFunc;
+ pHash->pfnKeyComp = pfnKeyComp;
+
+#if defined(__linux__) && defined(__KERNEL__)
+ pHash->ppBucketTable = OSAllocMemNoStats(sizeof (BUCKET *) * pHash->uSize);
+#else
+ pHash->ppBucketTable = OSAllocMem(sizeof (BUCKET *) * pHash->uSize);
+#endif
+ if (pHash->ppBucketTable == NULL)
+ {
+#if defined(__linux__) && defined(__KERNEL__)
+ OSFreeMemNoStats(pHash);
+#else
+ OSFreeMem(pHash);
+#endif
+ /*not nulling pointer, out of scope*/
+ return NULL;
+ }
+
+ for (uIndex=0; uIndex<pHash->uSize; uIndex++)
+ pHash->ppBucketTable[uIndex] = NULL;
+ return pHash;
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Create
+@Description Create a self scaling hash table with a key
+ consisting of a single uintptr_t, and using
+ the default hash and key comparison functions.
+@Input uInitialLen Initial and minimum length of the
+ hash table, where the length refers to the
+ number of entries in the hash table, not its size
+ in bytes.
+@Return NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen)
+{
+ return HASH_Create_Extended(uInitialLen, sizeof(uintptr_t),
+ &HASH_Func_Default, &HASH_Key_Comp_Default);
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Delete
+@Description Delete a hash table created by HASH_Create_Extended or
+ HASH_Create. All entries in the table must have been
+ removed before calling this function.
+@Input pHash Hash table
+@Return None
+*/ /**************************************************************************/
+IMG_INTERNAL void
+HASH_Delete (HASH_TABLE *pHash)
+{
+ IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__) && !defined(__QNXNTO__)
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ if (psPVRSRVData != NULL)
+ {
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ bDoCheck = IMG_FALSE;
+ }
+ }
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ else
+ {
+ bDoCheck = IMG_FALSE;
+ }
+#endif
+#endif
+ if (pHash != NULL)
+ {
+ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete"));
+
+ if (bDoCheck)
+ {
+ PVR_ASSERT (pHash->uCount==0);
+ }
+ if(pHash->uCount != 0)
+ {
+ IMG_UINT32 uiEntriesLeft = pHash->uCount;
+ IMG_UINT32 i;
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Leak detected in hash table!", __func__));
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmemcontext", __func__));
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Removing remaining %u hash entries.", __func__, uiEntriesLeft));
+
+ for (i = 0; i < uiEntriesLeft; i++)
+ {
+#if defined(__linux__) && defined(__KERNEL__)
+ OSFreeMemNoStats(pHash->ppBucketTable[i]);
+#else
+ OSFreeMem(pHash->ppBucketTable[i]);
+#endif
+ }
+ }
+#if defined(__linux__) && defined(__KERNEL__)
+ OSFreeMemNoStats(pHash->ppBucketTable);
+#else
+ OSFreeMem(pHash->ppBucketTable);
+#endif
+ pHash->ppBucketTable = NULL;
+#if defined(__linux__) && defined(__KERNEL__)
+ OSFreeMemNoStats(pHash);
+#else
+ OSFreeMem(pHash);
+#endif
+ /*not nulling pointer, copy on stack*/
+ }
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Insert_Extended
+@Description Insert a key value pair into a hash table created
+ with HASH_Create_Extended.
+@Input pHash Hash table
+@Input pKey Pointer to the key.
+@Input v The value associated with the key.
+@Return IMG_TRUE - success
+ IMG_FALSE - failure
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert_Extended (HASH_TABLE *pHash, void *pKey, uintptr_t v)
+{
+ BUCKET *pBucket;
+
+ PVR_ASSERT (pHash != NULL);
+
+ if (pHash == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HASH_Insert_Extended: invalid parameter"));
+ return IMG_FALSE;
+ }
+
+#if defined(__linux__) && defined(__KERNEL__)
+ pBucket = OSAllocMemNoStats(sizeof(BUCKET) + pHash->uKeySize);
+#else
+ pBucket = OSAllocMem(sizeof(BUCKET) + pHash->uKeySize);
+#endif
+ if (pBucket == NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ pBucket->v = v;
+ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/
+ OSCachedMemCopy(pBucket->k, pKey, pHash->uKeySize);
+
+ _ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize);
+
+ pHash->uCount++;
+
+ /* check if we need to think about re-balancing */
+ if (pHash->uCount << 1 > pHash->uSize)
+ {
+ /* Ignore the return code from _Resize because the hash table is
+ still in a valid state and although not ideally sized, it is still
+ functional */
+ _Resize (pHash, pHash->uSize << 1);
+ }
+
+
+ return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Insert
+@Description Insert a key value pair into a hash table created with
+ HASH_Create.
+@Input pHash Hash table
+@Input k The key value.
+@Input v The value associated with the key.
+@Return IMG_TRUE - success.
+ IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert (HASH_TABLE *pHash, uintptr_t k, uintptr_t v)
+{
+ return HASH_Insert_Extended(pHash, &k, v);
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Remove_Extended
+@Description Remove a key from a hash table created with
+ HASH_Create_Extended.
+@Input pHash Hash table
+@Input pKey Pointer to key.
+@Return 0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey)
+{
+ BUCKET **ppBucket;
+ IMG_UINT32 uIndex;
+
+ PVR_ASSERT (pHash != NULL);
+
+ if (pHash == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HASH_Remove_Extended: Null hash table"));
+ return 0;
+ }
+
+ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext))
+ {
+ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+ {
+ BUCKET *pBucket = *ppBucket;
+ uintptr_t v = pBucket->v;
+ (*ppBucket) = pBucket->pNext;
+
+#if defined(__linux__) && defined(__KERNEL__)
+ OSFreeMemNoStats(pBucket);
+#else
+ OSFreeMem(pBucket);
+#endif
+ /*not nulling original pointer, already overwritten*/
+
+ pHash->uCount--;
+
+ /* check if we need to think about re-balancing */
+ if (pHash->uSize > (pHash->uCount << 2) &&
+ pHash->uSize > pHash->uMinimumSize)
+ {
+ /* Ignore the return code from _Resize because the
+ hash table is still in a valid state and although
+ not ideally sized, it is still functional */
+ _Resize (pHash,
+ PRIVATE_MAX (pHash->uSize >> 1,
+ pHash->uMinimumSize));
+ }
+
+ return v;
+ }
+ }
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Remove
+@Description Remove a key value pair from a hash table created
+ with HASH_Create.
+@Input pHash Hash table
+@Input k The key
+@Return 0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Remove (HASH_TABLE *pHash, uintptr_t k)
+{
+ return HASH_Remove_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Retrieve_Extended
+@Description Retrieve a value from a hash table created with
+ HASH_Create_Extended.
+@Input pHash Hash table
+@Input pKey Pointer to the key.
+@Return 0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Retrieve_Extended (HASH_TABLE *pHash, void *pKey)
+{
+ BUCKET **ppBucket;
+ IMG_UINT32 uIndex;
+
+ PVR_ASSERT (pHash != NULL);
+
+ if (pHash == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HASH_Retrieve_Extended: Null hash table"));
+ return 0;
+ }
+
+ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext))
+ {
+ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+ {
+ BUCKET *pBucket = *ppBucket;
+ uintptr_t v = pBucket->v;
+
+ return v;
+ }
+ }
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Retrieve
+@Description Retrieve a value from a hash table created with
+ HASH_Create.
+@Input pHash Hash table
+@Input k The key
+@Return 0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Retrieve (HASH_TABLE *pHash, uintptr_t k)
+{
+ return HASH_Retrieve_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Iterate
+@Description Iterate over every entry in the hash table
+@Input pHash - Hash table to iterate
+@Input pfnCallback - Callback to call with the key and data for each
+ entry in the hash table
+@Return Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback)
+{
+ IMG_UINT32 uIndex;
+ for (uIndex=0; uIndex < pHash->uSize; uIndex++)
+ {
+ BUCKET *pBucket;
+ pBucket = pHash->ppBucketTable[uIndex];
+ while (pBucket != NULL)
+ {
+ PVRSRV_ERROR eError;
+ BUCKET *pNextBucket = pBucket->pNext;
+
+ eError = pfnCallback((uintptr_t) ((void *) *(pBucket->k)), (uintptr_t) pBucket->v);
+
+ /* The callback might want us to break out early */
+ if (eError != PVRSRV_OK)
+ return eError;
+
+ pBucket = pNextBucket;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function HASH_Dump
+@Description To dump the contents of a hash table in human readable
+ form.
+@Input pHash Hash table
+*/ /**************************************************************************/
+void
+HASH_Dump (HASH_TABLE *pHash)
+{
+ IMG_UINT32 uIndex;
+ IMG_UINT32 uMaxLength=0;
+ IMG_UINT32 uEmptyCount=0;
+
+ PVR_ASSERT (pHash != NULL);
+ for (uIndex=0; uIndex<pHash->uSize; uIndex++)
+ {
+ BUCKET *pBucket;
+ IMG_UINT32 uLength = 0;
+ if (pHash->ppBucketTable[uIndex] == NULL)
+ {
+ uEmptyCount++;
+ }
+ for (pBucket=pHash->ppBucketTable[uIndex];
+ pBucket != NULL;
+ pBucket = pBucket->pNext)
+ {
+ uLength++;
+ }
+ uMaxLength = PRIVATE_MAX (uMaxLength, uLength);
+ }
+
+ PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d",
+ pHash->uMinimumSize, pHash->uSize, pHash->uCount));
+ PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength));
+}
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Self scaling hash tables
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements simple self scaling hash tables.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _HASH_H_
+#define _HASH_H_
+
+/* include5/ */
+#include "img_types.h"
+
+/* services/client/include/ or services/server/include/ */
+#include "osfunc.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Keys passed to the comparsion function are only guaranteed to
+ * be aligned on an uintptr_t boundary.
+ */
+typedef IMG_UINT32 HASH_FUNC(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+typedef IMG_BOOL HASH_KEY_COMP(size_t uKeySize, void *pKey1, void *pKey2);
+
+typedef struct _HASH_TABLE_ HASH_TABLE;
+
+typedef PVRSRV_ERROR (*HASH_pfnCallback) (
+ uintptr_t k,
+ uintptr_t v
+);
+
+/*************************************************************************/ /*!
+@Function HASH_Func_Default
+@Description Hash function intended for hashing keys composed of
+ uintptr_t arrays.
+@Input uKeySize The size of the hash key, in bytes.
+@Input pKey A pointer to the key to hash.
+@Input uHashTabLen The length of the hash table.
+@Return The hash value.
+*/ /**************************************************************************/
+IMG_UINT32 HASH_Func_Default (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+/*************************************************************************/ /*!
+@Function HASH_Key_Comp_Default
+@Description Compares keys composed of uintptr_t arrays.
+@Input uKeySize The size of the hash key, in bytes.
+@Input pKey1 Pointer to first hash key to compare.
+@Input pKey2 Pointer to second hash key to compare.
+@Return IMG_TRUE - the keys match.
+ IMG_FALSE - the keys don't match.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Key_Comp_Default (size_t uKeySize, void *pKey1, void *pKey2);
+
+/*************************************************************************/ /*!
+@Function HASH_Create_Extended
+@Description Create a self scaling hash table, using the supplied
+ key size, and the supllied hash and key comparsion
+ functions.
+@Input uInitialLen Initial and minimum length of the
+ hash table, where the length refers to the number
+ of entries in the hash table, not its size in
+ bytes.
+@Input uKeySize The size of the key, in bytes.
+@Input pfnHashFunc Pointer to hash function.
+@Input pfnKeyComp Pointer to key comparsion function.
+@Return NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
+
+/*************************************************************************/ /*!
+@Function HASH_Create
+@Description Create a self scaling hash table with a key
+ consisting of a single uintptr_t, and using
+ the default hash and key comparison functions.
+@Input uInitialLen Initial and minimum length of the
+ hash table, where the length refers to the
+ number of entries in the hash table, not its size
+ in bytes.
+@Return NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen);
+
+/*************************************************************************/ /*!
+@Function HASH_Delete
+@Description Delete a hash table created by HASH_Create_Extended or
+ HASH_Create. All entries in the table must have been
+ removed before calling this function.
+@Input pHash Hash table
+*/ /**************************************************************************/
+void HASH_Delete (HASH_TABLE *pHash);
+
+/*************************************************************************/ /*!
+@Function HASH_Insert_Extended
+@Description Insert a key value pair into a hash table created
+ with HASH_Create_Extended.
+@Input pHash The hash table.
+@Input pKey Pointer to the key.
+@Input v The value associated with the key.
+@Return IMG_TRUE - success
+ IMG_FALSE - failure
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, void *pKey, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function HASH_Insert
+
+@Description Insert a key value pair into a hash table created with
+ HASH_Create.
+@Input pHash The hash table.
+@Input k The key value.
+@Input v The value associated with the key.
+@Return IMG_TRUE - success.
+ IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert (HASH_TABLE *pHash, uintptr_t k, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function HASH_Remove_Extended
+@Description Remove a key from a hash table created with
+ HASH_Create_Extended.
+@Input pHash The hash table.
+@Input pKey Pointer to key.
+@Return 0 if the key is missing, or the value associated
+ with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey);
+
+/*************************************************************************/ /*!
+@Function HASH_Remove
+@Description Remove a key value pair from a hash table created
+ with HASH_Create.
+@Input pHash The hash table.
+@Input pKey Pointer to key.
+@Return 0 if the key is missing, or the value associated
+ with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Remove (HASH_TABLE *pHash, uintptr_t k);
+
+/*************************************************************************/ /*!
+@Function HASH_Retrieve_Extended
+@Description Retrieve a value from a hash table created with
+ HASH_Create_Extended.
+@Input pHash The hash table.
+@Input pKey Pointer to key.
+@Return 0 if the key is missing, or the value associated with
+ the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Retrieve_Extended (HASH_TABLE *pHash, void *pKey);
+
+/*************************************************************************/ /*!
+@Function HASH_Retrieve
+@Description Retrieve a value from a hash table created with
+ HASH_Create.
+@Input pHash The hash table.
+@Input pKey Pointer to key.
+@Return 0 if the key is missing, or the value associated with
+ the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Retrieve (HASH_TABLE *pHash, uintptr_t k);
+
+/*************************************************************************/ /*!
+@Function HASH_Iterate
+@Description Iterate over every entry in the hash table
+@Input pHash Hash table to iterate
+@Input pfnCallback Callback to call with the key and data for
+ each entry in the hash table
+@Return Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback);
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function HASH_Dump
+@Description Dump out some information about a hash table.
+@Input pHash The hash table.
+*/ /**************************************************************************/
+void HASH_Dump (HASH_TABLE *pHash);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* _HASH_H_ */
+
+/******************************************************************************
+ End of file (hash.h)
+******************************************************************************/
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Debug driver file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include <linux/vmalloc.h>
+#include <linux/mutex.h>
+#include <linux/hardirq.h>
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#endif /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+#include "dbgdrvif_srv5.h"
+#include "hostfunc.h"
+#include "dbgdriv.h"
+
+
+/*!
+******************************************************************************
+
+ @Function HostMemSet
+
+ @Description Function that does the same as the C memset() functions
+
+ @Modified *pvDest : pointer to start of buffer to be set
+
+ @Input ui8Value: value to set each byte to
+
+ @Input ui32Size : number of bytes to set
+
+ @Return void
+
+******************************************************************************/
+void HostMemSet(void *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
+{
+ memset(pvDest, (int) ui8Value, (size_t) ui32Size);
+}
+
+/*!
+******************************************************************************
+
+ @Function HostMemCopy
+
+ @Description Copies memory around
+
+ @Input pvDst - pointer to dst
+ @Output pvSrc - pointer to src
+ @Input ui32Size - bytes to copy
+
+ @Return none
+
+******************************************************************************/
+void HostMemCopy(void *pvDst, void *pvSrc, IMG_UINT32 ui32Size)
+{
+#if defined(USE_UNOPTIMISED_MEMCPY)
+ unsigned char *src,*dst;
+ int i;
+
+ src=(unsigned char *)pvSrc;
+ dst=(unsigned char *)pvDst;
+ for(i=0;i<ui32Size;i++)
+ {
+ dst[i]=src[i];
+ }
+#else
+ memcpy(pvDst, pvSrc, ui32Size);
+#endif
+}
+
+IMG_UINT32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName, IMG_UINT32 *pui32Data)
+{
+ /* XXX Not yet implemented */
+ return 0;
+}
+
+void * HostPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+ return (void*)vmalloc(ui32Pages * PAGE_SIZE);/*, GFP_KERNEL);*/
+}
+
+void HostPageablePageFree(void * pvBase)
+{
+ vfree(pvBase);
+}
+
+void * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+ return (void*)vmalloc(ui32Pages * PAGE_SIZE);/*, GFP_KERNEL);*/
+}
+
+void HostNonPageablePageFree(void * pvBase)
+{
+ vfree(pvBase);
+}
+
+void * HostMapKrnBufIntoUser(void * pvKrnAddr, IMG_UINT32 ui32Size, void **ppvMdl)
+{
+ /* XXX Not yet implemented */
+ return NULL;
+}
+
+void HostUnMapKrnBufFromUser(void * pvUserAddr, void * pvMdl, void * pvProcess)
+{
+ /* XXX Not yet implemented */
+}
+
+void HostCreateRegDeclStreams(void)
+{
+ /* XXX Not yet implemented */
+}
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+
+#define EVENT_WAIT_TIMEOUT_MS 500
+#define EVENT_WAIT_TIMEOUT_JIFFIES (EVENT_WAIT_TIMEOUT_MS * HZ / 1000)
+
+static int iStreamData;
+static wait_queue_head_t sStreamDataEvent;
+
+IMG_INT32 HostCreateEventObjects(void)
+{
+ init_waitqueue_head(&sStreamDataEvent);
+
+ return 0;
+}
+
+void HostWaitForEvent(DBG_EVENT eEvent)
+{
+ switch(eEvent)
+ {
+ case DBG_EVENT_STREAM_DATA:
+ /*
+ * More than one process may be woken up.
+ * Any process that wakes up should consume
+ * all the data from the streams.
+ */
+ wait_event_interruptible_timeout(sStreamDataEvent, iStreamData != 0, EVENT_WAIT_TIMEOUT_JIFFIES);
+ iStreamData = 0;
+ break;
+ default:
+ /*
+ * For unknown events, enter an interruptible sleep.
+ */
+ msleep_interruptible(EVENT_WAIT_TIMEOUT_MS);
+ break;
+ }
+}
+
+void HostSignalEvent(DBG_EVENT eEvent)
+{
+ switch(eEvent)
+ {
+ case DBG_EVENT_STREAM_DATA:
+ iStreamData = 1;
+ wake_up_interruptible(&sStreamDataEvent);
+ break;
+ default:
+ break;
+ }
+}
+
+void HostDestroyEventObjects(void)
+{
+}
+#endif /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _HOSTFUNC_
+#define _HOSTFUNC_
+
+/*****************************************************************************
+ Defines
+*****************************************************************************/
+#define HOST_PAGESIZE (4096)
+#define DBG_MEMORY_INITIALIZER (0xe2)
+
+/*****************************************************************************
+ Function prototypes
+*****************************************************************************/
+IMG_UINT32 HostReadRegistryDWORDFromString(IMG_CHAR *pcKey, IMG_CHAR *pcValueName, IMG_UINT32 *pui32Data);
+
+void * HostPageablePageAlloc(IMG_UINT32 ui32Pages);
+void HostPageablePageFree(void * pvBase);
+void * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages);
+void HostNonPageablePageFree(void * pvBase);
+
+void * HostMapKrnBufIntoUser(void * pvKrnAddr, IMG_UINT32 ui32Size, void * *ppvMdl);
+void HostUnMapKrnBufFromUser(void * pvUserAddr, void * pvMdl, void * pvProcess);
+
+void HostCreateRegDeclStreams(void);
+
+/* Direct macros for Linux to avoid LockDep false-positives from occurring */
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#define HostCreateMutex(void) ({ \
+ struct mutex* pMutex = NULL; \
+ pMutex = kmalloc(sizeof(struct mutex), GFP_KERNEL); \
+ if (pMutex) { mutex_init(pMutex); }; \
+ pMutex;})
+#define HostDestroyMutex(hLock) ({mutex_destroy((hLock)); kfree((hLock)); PVRSRV_OK;})
+
+#define HostAquireMutex(hLock) ({mutex_lock((hLock)); PVRSRV_OK;})
+#define HostReleaseMutex(hLock) ({mutex_unlock((hLock)); PVRSRV_OK;})
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+void * HostCreateMutex(void);
+void HostAquireMutex(void * pvMutex);
+void HostReleaseMutex(void * pvMutex);
+void HostDestroyMutex(void * pvMutex);
+
+#endif
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+IMG_INT32 HostCreateEventObjects(void);
+void HostWaitForEvent(DBG_EVENT eEvent);
+void HostSignalEvent(DBG_EVENT eEvent);
+void HostDestroyEventObjects(void);
+#endif /*defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+
+#endif
+
+/*****************************************************************************
+ End of file (HOSTFUNC.H)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File htbserver.c
+@Title Host Trace Buffer server implementation.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Host Trace Buffer provides a mechanism to log Host events to a
+ buffer in a similar way to the Firmware Trace mechanism.
+ Host Trace Buffer logs data using a Transport Layer buffer.
+ The Transport Layer and pvrtld tool provides the mechanism to
+ retrieve the trace data.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "htbserver.h"
+#include "htbuffer.h"
+#include "htbuffer_types.h"
+#include "tlstream.h"
+#include "pvrsrv_tlcommon.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrsrv_apphint.h"
+
+/* size of circular buffer controlling the maximum number of concurrent PIDs logged */
+#define HTB_MAX_NUM_PID 8
+
+/* number of times to try rewriting a log entry */
+#define HTB_LOG_RETRY_COUNT 5
+
+/*************************************************************************/ /*!
+ Host Trace Buffer control information structure
+*/ /**************************************************************************/
+typedef struct
+{
+ IMG_CHAR *pszBufferName; /*!< Name to use for the trace buffer,
+ this will be required to request
+ trace data from TL.
+ Once set this may not be changed */
+
+ IMG_UINT32 ui32BufferSize; /*!< Requested buffer size in bytes
+ Once set this may not be changed */
+
+ HTB_OPMODE_CTRL eOpMode; /*!< Control what trace data is dropped if
+ the buffer is full.
+ Once set this may not be changed */
+
+/* IMG_UINT32 ui32GroupEnable; */ /*!< Flags word controlling groups to be
+ logged */
+
+ IMG_UINT32 ui32LogLevel; /*!< Log level to control messages logged */
+
+ IMG_UINT32 aui32EnablePID[HTB_MAX_NUM_PID]; /*!< PIDs to enable logging for
+ a specific set of processes */
+
+ IMG_UINT32 ui32PIDCount; /*!< Current number of PIDs being logged */
+
+ IMG_UINT32 ui32PIDHead; /*!< Head of the PID circular buffer */
+
+ HTB_LOGMODE_CTRL eLogMode; /*!< Logging mode control */
+
+ IMG_BOOL bLogDropSignalled; /*!< Flag indicating if a log message has
+ been signalled as dropped */
+
+ /* synchronisation parameters */
+ IMG_UINT64 ui64SyncOSTS;
+ IMG_UINT64 ui64SyncCRTS;
+ IMG_UINT32 ui32SyncCalcClkSpd;
+ IMG_UINT32 ui32SyncMarker;
+
+} HTB_CTRL_INFO;
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static const IMG_UINT32 MapFlags[] =
+{
+ 0, /* HTB_OPMODE_UNDEF = 0 */
+ TL_FLAG_RESERVE_DROP_NEWER, /* HTB_OPMODE_DROPLATEST */
+ 0, /* HTB_OPMODE_DROPOLDEST */
+ TL_FLAG_RESERVE_BLOCK /* HTB_OPMODE_BLOCK */
+};
+
+static_assert(0 == HTB_OPMODE_UNDEF, "Unexpected value for HTB_OPMODE_UNDEF");
+static_assert(1 == HTB_OPMODE_DROPLATEST, "Unexpected value for HTB_OPMODE_DROPLATEST");
+static_assert(2 == HTB_OPMODE_DROPOLDEST, "Unexpected value for HTB_OPMODE_DROPOLDEST");
+static_assert(3 == HTB_OPMODE_BLOCK, "Unexpected value for HTB_OPMODE_BLOCK");
+
+static const IMG_UINT32 g_ui32TLBaseFlags = 0; //TL_FLAG_NO_SIGNAL_ON_COMMIT
+
+/* Minimum TL buffer size,
+ * large enough for around 60 worst case messages or 200 average messages
+ */
+#define HTB_TL_BUFFER_SIZE_MIN (0x10000)
+
+
+static HTB_CTRL_INFO g_sCtrl = {0};
+static IMG_BOOL g_bConfigured = IMG_FALSE;
+static IMG_HANDLE g_hTLStream = NULL;
+
+
+/************************************************************************/ /*!
+ @Function _LookupFlags
+ @Description Convert HTBuffer Operation mode to TLStream flags
+
+ @Input eModeHTBuffer Operation Mode
+
+ @Return IMG_UINT32 TLStream FLags
+*/ /**************************************************************************/
+static IMG_UINT32
+_LookupFlags( HTB_OPMODE_CTRL eMode )
+{
+ return (eMode < (sizeof(MapFlags)/sizeof(MapFlags[0])))? MapFlags[eMode]: 0;
+}
+
+
+/************************************************************************/ /*!
+ @Function _HTBLogDebugInfo
+ @Description Debug dump handler used to dump the state of the HTB module.
+ Called for each verbosity level during a debug dump. Function
+ only prints state when called for High verbosity.
+
+ @Input hDebugRequestHandle See PFN_DBGREQ_NOTIFY
+
+ @Input ui32VerbLevel See PFN_DBGREQ_NOTIFY
+
+ @Input pfnDumpDebugPrintf See PFN_DBGREQ_NOTIFY
+
+ @Input pvDumpDebugFile See PFN_DBGREQ_NOTIFY
+
+*/ /**************************************************************************/
+static void _HTBLogDebugInfo(
+ PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile
+)
+{
+ PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle);
+
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH)
+ {
+
+ if (g_bConfigured)
+ {
+ IMG_INT i;
+
+ PVR_DUMPDEBUG_LOG("------[ HTB Log state: On ]------");
+
+ PVR_DUMPDEBUG_LOG("HTB Log mode: %d", g_sCtrl.eLogMode);
+ PVR_DUMPDEBUG_LOG("HTB Log level: %d", g_sCtrl.ui32LogLevel);
+ PVR_DUMPDEBUG_LOG("HTB Buffer Opmode: %d", g_sCtrl.eOpMode);
+
+ for (i=0; i < HTB_FLAG_NUM_EL; i++)
+ {
+ PVR_DUMPDEBUG_LOG("HTB Log group %d: %x", i, g_auiHTBGroupEnable[i]);
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("------[ HTB Log state: Off ]------");
+ }
+ }
+}
+
+/************************************************************************/ /*!
+ @Function HTBDeviceCreate
+ @Description Initialisation actions for HTB at device creation.
+
+ @Input psDeviceNode Reference to the device node in context
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeviceCreate(
+ PVRSRV_DEVICE_NODE *psDeviceNode
+)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hHtbDbgReqNotify,
+ psDeviceNode, &_HTBLogDebugInfo, DEBUG_REQUEST_HTB, NULL);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+ return eError;
+}
+
+/************************************************************************/ /*!
+ @Function HTBIDeviceDestroy
+ @Description De-initialisation actions for HTB at device destruction.
+
+ @Input psDeviceNode Reference to the device node in context
+
+*/ /**************************************************************************/
+void
+HTBDeviceDestroy(
+ PVRSRV_DEVICE_NODE *psDeviceNode
+)
+{
+ if (psDeviceNode->hHtbDbgReqNotify)
+ {
+ /* No much we can do if it fails, driver unloading */
+ (void)PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hHtbDbgReqNotify);
+ psDeviceNode->hHtbDbgReqNotify = NULL;
+ }
+}
+
+
+/************************************************************************/ /*!
+ @Function HTBDeInit
+ @Description Close the Host Trace Buffer and free all resources. Must
+ perform a no-op if already de-initialised.
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeInit( void )
+{
+ if (g_hTLStream)
+ {
+ TLStreamClose( g_hTLStream );
+ g_hTLStream = NULL;
+ }
+
+ if (g_sCtrl.pszBufferName)
+ {
+ OSFreeMem( g_sCtrl.pszBufferName );
+ g_sCtrl.pszBufferName = NULL;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+ AppHint interface functions
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ return HTBControlKM(1, &ui32Value, 0, 0,
+ HTB_LOGMODE_UNDEF, HTB_OPMODE_UNDEF);
+}
+
+static
+PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ *pui32Value = g_auiHTBGroupEnable[0];
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ return HTBControlKM(0, NULL, 0, 0, HTB_LOGMODE_UNDEF, ui32Value);
+}
+
+static
+PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ *pui32Value = (IMG_UINT32)g_sCtrl.eOpMode;
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+ @Function HTBConfigureKM
+ @Description Configure or update the configuration of the Host Trace Buffer
+
+ @Input ui32NameSize Size of the pszName string
+
+ @Input pszName Name to use for the underlying data buffer
+
+ @Input ui32BufferSize Size of the underlying data buffer
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBConfigureKM(
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR * pszName,
+ const IMG_UINT32 ui32BufferSize
+)
+{
+ if ( !g_sCtrl.pszBufferName )
+ {
+ g_sCtrl.ui32BufferSize = (ui32BufferSize < HTB_TL_BUFFER_SIZE_MIN)? HTB_TL_BUFFER_SIZE_MIN: ui32BufferSize;
+ ui32NameSize = (ui32NameSize > PRVSRVTL_MAX_STREAM_NAME_SIZE)? PRVSRVTL_MAX_STREAM_NAME_SIZE: ui32NameSize;
+ g_sCtrl.pszBufferName = OSAllocMem(ui32NameSize * sizeof(IMG_CHAR));
+ OSStringNCopy(g_sCtrl.pszBufferName, pszName, ui32NameSize);
+ g_sCtrl.pszBufferName[ui32NameSize-1] = 0;
+
+ /* initialise rest of state */
+ g_sCtrl.eOpMode = HTB_OPMODE_DROPLATEST;
+ g_sCtrl.ui32LogLevel = 0;
+ g_sCtrl.ui32PIDCount = 0;
+ g_sCtrl.ui32PIDHead = 0;
+ g_sCtrl.eLogMode = HTB_LOGMODE_ALLPID;
+ g_sCtrl.bLogDropSignalled = IMG_FALSE;
+
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableHTBLogGroup,
+ _HTBReadLogGroup,
+ _HTBSetLogGroup,
+ NULL,
+ NULL);
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBOperationMode,
+ _HTBReadOpMode,
+ _HTBSetOpMode,
+ NULL,
+ NULL);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HTBConfigureKM: Reconfiguration is not supported\n"));
+ }
+
+ return PVRSRV_OK;
+}
+
+
+static void
+_OnTLReaderOpenCallback( void *pvArg )
+{
+ if ( g_hTLStream )
+ {
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Time = OSClockus();
+ eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE,
+ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)),
+ ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)),
+ ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+ g_sCtrl.ui32SyncCalcClkSpd);
+ }
+
+ PVR_UNREFERENCED_PARAMETER(pvArg);
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBControlKM
+ @Description Update the configuration of the Host Trace Buffer
+
+ @Input ui32NumFlagGroups Number of group enable flags words
+
+ @Input aui32GroupEnable Flags words controlling groups to be logged
+
+ @Input ui32LogLevel Log level to record
+
+ @Input ui32EnablePID PID to enable logging for a specific process
+
+ @Input eLogMode Enable logging for all or specific processes,
+
+ @Input eOpMode Control the behaviour of the data buffer
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBControlKM(
+ const IMG_UINT32 ui32NumFlagGroups,
+ const IMG_UINT32 * aui32GroupEnable,
+ const IMG_UINT32 ui32LogLevel,
+ const IMG_UINT32 ui32EnablePID,
+ const HTB_LOGMODE_CTRL eLogMode,
+ const HTB_OPMODE_CTRL eOpMode
+)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32Time = OSClockus();
+
+ if ( !g_bConfigured && g_sCtrl.pszBufferName && ui32NumFlagGroups )
+ {
+ eError = TLStreamCreate(
+ &g_hTLStream,
+ g_sCtrl.pszBufferName,
+ g_sCtrl.ui32BufferSize,
+ _LookupFlags(HTB_OPMODE_DROPLATEST) | g_ui32TLBaseFlags,
+ _OnTLReaderOpenCallback, NULL, NULL, NULL );
+ PVR_LOGR_IF_ERROR( eError, "TLStreamCreate");
+ g_bConfigured = IMG_TRUE;
+ }
+
+ if ( HTB_OPMODE_UNDEF != eOpMode && g_sCtrl.eOpMode != eOpMode)
+ {
+ g_sCtrl.eOpMode = eOpMode;
+ eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags));
+ while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
+ {
+ OSReleaseThreadQuanta();
+ eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags));
+ }
+ PVR_LOGR_IF_ERROR( eError, "TLStreamReconfigure");
+ }
+
+ if ( ui32EnablePID )
+ {
+ g_sCtrl.aui32EnablePID[g_sCtrl.ui32PIDHead] = ui32EnablePID;
+ g_sCtrl.ui32PIDHead++;
+ g_sCtrl.ui32PIDHead %= HTB_MAX_NUM_PID;
+ g_sCtrl.ui32PIDCount++;
+ if ( g_sCtrl.ui32PIDCount > HTB_MAX_NUM_PID )
+ {
+ g_sCtrl.ui32PIDCount = HTB_MAX_NUM_PID;
+ }
+ }
+
+ /* HTB_LOGMODE_ALLPID overrides ui32EnablePID */
+ if ( HTB_LOGMODE_ALLPID == eLogMode )
+ {
+ OSCachedMemSet(g_sCtrl.aui32EnablePID, 0, sizeof(g_sCtrl.aui32EnablePID));
+ g_sCtrl.ui32PIDCount = 0;
+ g_sCtrl.ui32PIDHead = 0;
+ }
+ if ( HTB_LOGMODE_UNDEF != eLogMode )
+ {
+ g_sCtrl.eLogMode = eLogMode;
+ }
+
+ if ( ui32NumFlagGroups )
+ {
+ for (i = 0; i < HTB_FLAG_NUM_EL && i < ui32NumFlagGroups; i++)
+ {
+ g_auiHTBGroupEnable[i] = aui32GroupEnable[i];
+ }
+ for (; i < HTB_FLAG_NUM_EL; i++)
+ {
+ g_auiHTBGroupEnable[i] = 0;
+ }
+ }
+
+ if ( ui32LogLevel )
+ {
+ g_sCtrl.ui32LogLevel = ui32LogLevel;
+ }
+
+ /* Dump the current configuration state */
+ eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode);
+ PVR_LOG_IF_ERROR( eError, "HTBLog");
+ eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]);
+ PVR_LOG_IF_ERROR( eError, "HTBLog");
+ eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel);
+ PVR_LOG_IF_ERROR( eError, "HTBLog");
+ eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode);
+ PVR_LOG_IF_ERROR( eError, "HTBLog");
+ for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
+ {
+ eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]);
+ PVR_LOG_IF_ERROR( eError, "HTBLog");
+ }
+
+ if (0 != g_sCtrl.ui32SyncMarker && 0 != g_sCtrl.ui32SyncCalcClkSpd)
+ {
+ eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_MARK_RPT,
+ g_sCtrl.ui32SyncMarker);
+ PVR_LOG_IF_ERROR( eError, "HTBLog");
+ eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE_RPT,
+ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+ g_sCtrl.ui32SyncCalcClkSpd);
+ PVR_LOG_IF_ERROR( eError, "HTBLog");
+ }
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static IMG_BOOL
+_ValidPID( IMG_UINT32 PID )
+{
+ IMG_UINT32 i;
+
+ for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
+ {
+ if ( g_sCtrl.aui32EnablePID[i] == PID )
+ {
+ return IMG_TRUE;
+ }
+ }
+ return IMG_FALSE;
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBSyncPartitionMarker
+ @Description Write an HTB sync partition marker to the HTB log
+
+ @Input ui33Marker Marker value
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarker(
+ const IMG_UINT32 ui32Marker
+)
+{
+ g_sCtrl.ui32SyncMarker = ui32Marker;
+ if ( g_hTLStream )
+ {
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Time = OSClockus();
+ eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_MARK, ui32Marker);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog", PVRSRVGETERRORSTRING(eError), __func__));
+ }
+ if (0 != g_sCtrl.ui32SyncCalcClkSpd)
+ {
+ eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE,
+ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+ g_sCtrl.ui32SyncCalcClkSpd);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog", PVRSRVGETERRORSTRING(eError), __func__));
+ }
+ }
+ }
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBSyncScale
+ @Description Write FW-Host synchronisation data to the HTB log when clocks
+ change or are re-calibrated
+
+ @Input bLogValues IMG_TRUE if value should be immediately written
+ out to the log
+
+ @Input ui32OSTS OS Timestamp
+
+ @Input ui32CRTS Rogue timestamp
+
+ @Input ui32CalcClkSpd Calculated clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncScale(
+ const IMG_BOOL bLogValues,
+ const IMG_UINT64 ui64OSTS,
+ const IMG_UINT64 ui64CRTS,
+ const IMG_UINT32 ui32CalcClkSpd
+)
+{
+ g_sCtrl.ui64SyncOSTS = ui64OSTS;
+ g_sCtrl.ui64SyncCRTS = ui64CRTS;
+ g_sCtrl.ui32SyncCalcClkSpd = ui32CalcClkSpd;
+ if ( g_hTLStream && bLogValues)
+ {
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Time = OSClockus();
+ eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE,
+ ((IMG_UINT32)((ui64OSTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64OSTS&0xffffffff)),
+ ((IMG_UINT32)((ui64CRTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64CRTS&0xffffffff)),
+ ui32CalcClkSpd);
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog", PVRSRVGETERRORSTRING(eError), __func__));
+ }
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBLogKM
+ @Description Record a Host Trace Buffer log event
+
+ @Input PID The PID of the process the event is associated
+ with. This is provided as an argument rather
+ than querying internally so that events associated
+ with a particular process, but performed by
+ another can be logged correctly.
+
+ @Input ui32TimeStamp The timestamp to be associated with this log event
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBLogKM(
+ IMG_UINT32 PID,
+ IMG_UINT32 ui32TimeStamp,
+ HTB_LOG_SFids SF,
+ IMG_UINT32 ui32NumArgs,
+ IMG_UINT32 * aui32Args
+)
+{
+ /* format of messages is: SF:PID:TIME:[PARn]*
+ * 32-bit timestamp (us) gives about 1h before looping
+ * Buffer allocated on the stack so don't need a semaphore to guard it
+ */
+ IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS];
+
+ PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED;
+ IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
+ IMG_UINT32 * pui32Message = aui32MessageBuffer;
+ IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs);
+
+ if ( g_hTLStream
+ && ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) )
+/* && ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */
+/* && ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */
+ )
+ {
+ *pui32Message++ = SF;
+ *pui32Message++ = PID;
+ *pui32Message++ = ui32TimeStamp;
+ while ( ui32NumArgs )
+ {
+ ui32NumArgs--;
+ pui32Message[ui32NumArgs] = aui32Args[ui32NumArgs];
+ }
+
+ eError = TLStreamWrite( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize );
+ while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
+ {
+ OSReleaseThreadQuanta();
+ eError = TLStreamWrite( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize );
+ }
+
+ if ( PVRSRV_OK == eError )
+ {
+ g_sCtrl.bLogDropSignalled = IMG_FALSE;
+ }
+ else if ( PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG != eError || !g_sCtrl.bLogDropSignalled )
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__));
+ }
+ if ( PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG == eError )
+ {
+ g_sCtrl.bLogDropSignalled = IMG_TRUE;
+ }
+ }
+
+ return eError;
+}
+
+/* EOF */
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File htbserver.h
+@Title Host Trace Buffer server implementation.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+
+@Description Host Trace Buffer provides a mechanism to log Host events to a
+ buffer in a similar way to the Firmware Trace mechanism.
+ Host Trace Buffer logs data using a Transport Layer buffer.
+ The Transport Layer and pvrtld tool provides the mechanism to
+ retrieve the trace data.
+
+ A Host Trace can be merged with a corresponding Firmware Trace.
+ This is achieved by inserting synchronisation data into both
+ traces and post processing to merge them.
+
+ The FW Trace will contain a "Sync Partition Marker". This is
+ updated every time the RGX is brought out of reset (RGX clock
+ timestamps reset at this point) and is repeated when the FW
+ Trace buffer wraps to ensure there is always at least 1
+ partition marker in the Firmware Trace buffer whenever it is
+ read.
+
+ The Host Trace will contain corresponding "Sync Partition
+ Markers" - #HTBSyncPartitionMarker(). Each partition is then
+ subdivided into "Sync Scale" sections - #HTBSyncScale(). The
+ "Sync Scale" data allows the timestamps from the two traces to
+ be correlated. The "Sync Scale" data is updated as part of the
+ standard RGX time correlation code (rgxtimecorr.c) and is
+ updated periodically including on power and clock changes.
+
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __HTBSERVER_H__
+#define __HTBSERVER_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv.h"
+#include "htbuffer.h"
+
+
+/************************************************************************/ /*!
+ @Function HTBIDeviceCreate
+ @Description Initialisation actions for HTB at device creation.
+
+ @Input psDeviceNode Reference to the device node in context
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeviceCreate(
+ PVRSRV_DEVICE_NODE *psDeviceNode
+);
+
+
+/************************************************************************/ /*!
+ @Function HTBIDeviceDestroy
+ @Description De-initialisation actions for HTB at device destruction.
+
+ @Input psDeviceNode Reference to the device node in context
+
+*/ /**************************************************************************/
+void
+HTBDeviceDestroy(
+ PVRSRV_DEVICE_NODE *psDeviceNode
+);
+
+
+/************************************************************************/ /*!
+ @Function HTBDeInit
+ @Description Close the Host Trace Buffer and free all resources
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeInit( void );
+
+
+/*************************************************************************/ /*!
+ @Function HTBConfigureKM
+ @Description Configure or update the configuration of the Host Trace Buffer
+
+ @Input ui32NameSize Size of the pszName string
+
+ @Input pszName Name to use for the underlying data buffer
+
+ @Input ui32BufferSize Size of the underlying data buffer
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBConfigureKM(
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR * pszName,
+ const IMG_UINT32 ui32BufferSize
+);
+
+
+/*************************************************************************/ /*!
+ @Function HTBControlKM
+ @Description Update the configuration of the Host Trace Buffer
+
+ @Input ui32NumFlagGroups Number of group enable flags words
+
+ @Input aui32GroupEnable Flags words controlling groups to be logged
+
+ @Input ui32LogLevel Log level to record
+
+ @Input ui32EnablePID PID to enable logging for a specific process
+
+ @Input eLogMode Enable logging for all or specific processes,
+
+ @Input eOpMode Control the behaviour of the data buffer
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBControlKM(
+ const IMG_UINT32 ui32NumFlagGroups,
+ const IMG_UINT32 * aui32GroupEnable,
+ const IMG_UINT32 ui32LogLevel,
+ const IMG_UINT32 ui32EnablePID,
+ const HTB_LOGMODE_CTRL eLogMode,
+ const HTB_OPMODE_CTRL eOpMode
+);
+
+
+/*************************************************************************/ /*!
+ @Function HTBSyncPartitionMarker
+ @Description Write an HTB sync partition marker to the HTB log
+
+ @Input ui33Marker Marker value
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarker(
+ const IMG_UINT32 ui32Marker
+);
+
+
+/*************************************************************************/ /*!
+ @Function HTBSyncScale
+ @Description Write FW-Host synchronisation data to the HTB log when clocks
+ change or are re-calibrated
+
+ @Input bLogValues IMG_TRUE if value should be immediately written
+ out to the log
+
+ @Input ui32OSTS OS Timestamp
+
+ @Input ui32CRTS Rogue timestamp
+
+ @Input ui32CalcClkSpd Calculated clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncScale(
+ const IMG_BOOL bLogValues,
+ const IMG_UINT64 ui64OSTS,
+ const IMG_UINT64 ui64CRTS,
+ const IMG_UINT32 ui32CalcClkSpd
+);
+
+
+/*************************************************************************/ /*!
+ @Function HTBLogKM
+ @Description Record a Host Trace Buffer log event
+
+ @Input PID The PID of the process the event is associated
+ with. This is provided as an argument rather
+ than querying internally so that events associated
+ with a particular process, but performed by
+ another can be logged correctly.
+
+ @Input ui32TimeStamp The timestamp to be associated with this log event
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBLogKM(
+ IMG_UINT32 PID,
+ IMG_UINT32 ui32TimeStamp,
+ HTB_LOG_SFids SF,
+ IMG_UINT32 ui32NumArgs,
+ IMG_UINT32 * aui32Args
+);
+
+
+#endif /* __HTBSERVER_H__ */
+
+/* EOF */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File htbuffer.c
+@Title Host Trace Buffer shared API.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Host Trace Buffer provides a mechanism to log Host events to a
+ buffer in a similar way to the Firmware Trace mechanism.
+ Host Trace Buffer logs data using a Transport Layer buffer.
+ The Transport Layer and pvrtld tool provides the mechanism to
+ retrieve the trace data.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stdarg.h>
+#include "htbuffer.h"
+//#include "allocmem.h"
+#include "osfunc.h"
+#include "client_htbuffer_bridge.h"
+#if defined(__KERNEL__)
+//#include "osfunc.h"
+#endif
+
+/* the group flags array of ints large enough to store all the group flags
+ * NB: This will only work while all logging is in the kernel
+ */
+IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL] = {0};
+
+/*************************************************************************/ /*!
+ @Function HTBConfigure
+ @Description Configure the Host Trace Buffer.
+ Once these parameters are set they may not be changed
+
+ @Input hSrvHandle Server Handle
+
+ @Input pszBufferName Name to use for the TL buffer, this will be
+ required to request trace data from the TL
+
+ @Input ui32BufferSize Requested TL buffer size in bytes
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBConfigure(
+ IMG_HANDLE hSrvHandle,
+ IMG_CHAR * pszBufferName,
+ IMG_UINT32 ui32BufferSize
+)
+{
+ return BridgeHTBConfigure(
+ hSrvHandle,
+ (OSStringLength(pszBufferName)+1),
+ pszBufferName,
+ ui32BufferSize
+ );
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBControl
+ @Description Update the configuration of the Host Trace Buffer
+
+ @Input hSrvHandle Server Handle
+
+ @Input ui32NumFlagGroups Number of group enable flags words
+
+ @Input aui32GroupEnable Flags words controlling groups to be logged
+
+ @Input ui32LogLevel Log level to record
+
+ @Input ui32EnablePID PID to enable logging for a specific process
+
+ @Input eLogPidMode Enable logging for all or specific processes,
+
+ @Input eOpMode Control what trace data is dropped if the TL
+ buffer is full
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBControl(
+ IMG_HANDLE hSrvHandle,
+ IMG_UINT32 ui32NumFlagGroups,
+ IMG_UINT32 * aui32GroupEnable,
+ IMG_UINT32 ui32LogLevel,
+ IMG_UINT32 ui32EnablePID,
+ HTB_LOGMODE_CTRL eLogPidMode,
+ HTB_OPMODE_CTRL eOpMode
+)
+{
+ return BridgeHTBControl(
+ hSrvHandle,
+ ui32NumFlagGroups,
+ aui32GroupEnable,
+ ui32LogLevel,
+ ui32EnablePID,
+ eLogPidMode,
+ eOpMode
+ );
+}
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 ui32TimeStampus, HTB_LOG_SFids SF, va_list args)
+{
+#if defined(__KERNEL__)
+ IMG_UINT32 i;
+ IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF);
+ IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS];
+
+ PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS);
+ ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS)? HTB_LOG_MAX_PARAMS: ui32NumArgs;
+
+ /* unpack var args before sending over bridge */
+ for (i=0; i<ui32NumArgs; i++)
+ {
+ aui32Args[i] = va_arg(args, IMG_UINT32);
+ }
+
+ return BridgeHTBLog(hSrvHandle, PID, ui32TimeStampus, SF, ui32NumArgs, aui32Args);
+#else
+ PVR_UNREFERENCED_PARAMETER(hSrvHandle);
+ PVR_UNREFERENCED_PARAMETER(PID);
+ PVR_UNREFERENCED_PARAMETER(ui32TimeStampus);
+ PVR_UNREFERENCED_PARAMETER(SF);
+ PVR_UNREFERENCED_PARAMETER(args);
+
+ PVR_ASSERT(0=="HTB Logging in UM is not yet supported");
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBLog
+ @Description Record a Host Trace Buffer log event
+
+ @Input PID The PID of the process the event is associated
+ with. This is provided as an argument rather
+ than querying internally so that events associated
+ with a particular process, but performed by
+ another can be logged correctly.
+
+ @Input ui32TimeStampus The timestamp to be associated with this log event
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 ui32TimeStampus, IMG_UINT32 SF, ...)
+{
+ PVRSRV_ERROR eError;
+ va_list args;
+ va_start(args, SF);
+ eError =_HTBLog(hSrvHandle, PID, ui32TimeStampus, SF, args);
+ va_end(args);
+ return eError;
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBLogSimple
+ @Description Record a Host Trace Buffer log event with implicit PID and Timestamp
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...)
+{
+ PVRSRV_ERROR eError;
+ va_list args;
+ va_start(args, SF);
+ eError = _HTBLog(hSrvHandle, OSGetCurrentProcessID(), OSClockus(), SF, args);
+ va_end(args);
+ return eError;
+}
+
+
+/* EOF */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File htbuffer.h
+@Title Host Trace Buffer shared API.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Host Trace Buffer provides a mechanism to log Host events to a
+ buffer in a similar way to the Firmware Trace mechanism.
+ Host Trace Buffer logs data using a Transport Layer buffer.
+ The Transport Layer and pvrtld tool provides the mechanism to
+ retrieve the trace data.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_H__
+#define __HTBUFFER_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "htbuffer_sf.h"
+#include "htbuffer_types.h"
+#include "htbuffer_init.h"
+
+#if defined(__KERNEL__)
+#define HTBLOGK(SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple(0, SF, ## args); } while (0)
+#else
+#define HTBLOG(handle, SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple(handle, SF, ## args); } while (0)
+#endif
+
+/* macros to cast 64 or 32-bit pointers into 32-bit integer components for Host Trace */
+#define HTBLOG_PTR_BITS_HIGH(p) ((IMG_UINT32)((((IMG_UINT64)((uintptr_t)p))>>32)&0xffffffff))
+#define HTBLOG_PTR_BITS_LOW(p) ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff))
+
+/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */
+#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff))
+#define HTBLOG_U64_BITS_LOW(u) ((IMG_UINT32)(u&0xffffffff))
+
+/*************************************************************************/ /*!
+ @Function HTBLog
+ @Description Record a Host Trace Buffer log event
+
+ @Input PID The PID of the process the event is associated
+ with. This is provided as an argument rather
+ than querying internally so that events associated
+ with a particular process, but performed by
+ another can be logged correctly.
+
+ @Input TimeStampus The timestamp in us for this event
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 ui32TimeStampus, IMG_UINT32 SF, ...);
+
+
+/*************************************************************************/ /*!
+ @Function HTBLogSimple
+ @Description Record a Host Trace Buffer log event with implicit PID and Timestamp
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...);
+
+
+
+/* DEBUG log group enable */
+#if !defined(HTB_DEBUG_LOG_GROUP)
+#undef HTB_LOG_TYPE_DBG /* No trace statements in this log group should be checked in */
+#define HTB_LOG_TYPE_DBG __BUILDERROR__
+#endif
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_H__ */
+/*****************************************************************************
+ End of file (htbuffer.h)
+*****************************************************************************/
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File htbuffer_init.h
+@Title Host Trace Buffer functions needed for Services initialisation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_INIT_H__
+#define __HTBUFFER_INIT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+
+/*************************************************************************/ /*!
+ @Function HTBConfigure
+ @Description Configure the Host Trace Buffer.
+ Once these parameters are set they may not be changed
+
+ @Input hSrvHandle Server Handle
+
+ @Input pszBufferName Name to use for the TL buffer, this will be
+ required to request trace data from the TL
+
+ @Input ui32BufferSize Requested TL buffer size in bytes
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBConfigure(
+ IMG_HANDLE hSrvHandle,
+ IMG_CHAR * pszBufferName,
+ IMG_UINT32 ui32BufferSize
+);
+
+/*************************************************************************/ /*!
+ @Function HTBControl
+ @Description Update the configuration of the Host Trace Buffer
+
+ @Input hSrvHandle Server Handle
+
+ @Input ui32NumFlagGroups Number of group enable flags words
+
+ @Input aui32GroupEnable Flags words controlling groups to be logged
+
+ @Input ui32LogLevel Log level to record
+
+ @Input ui32EnablePID PID to enable logging for a specific process
+
+ @Input eLogMode Enable logging for all or specific processes,
+
+ @Input eOpMode Control what trace data is dropped if the TL
+ buffer is full
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBControl(
+ IMG_HANDLE hSrvHandle,
+ IMG_UINT32 ui32NumFlagGroups,
+ IMG_UINT32 * aui32GroupEnable,
+ IMG_UINT32 ui32LogLevel,
+ IMG_UINT32 ui32EnablePID,
+ HTB_LOGMODE_CTRL eLogMode,
+ HTB_OPMODE_CTRL eOpMode
+);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_INIT_H__ */
+/*****************************************************************************
+ End of file (htbuffer_init.h)
+*****************************************************************************/
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File htbuffer_sf.h
+@Title Host Trace Buffer interface string format specifiers
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the Host Trace Buffer logging messages. The following
+ list are the messages the host driver prints. Changing anything
+ but the first column or spelling mistakes in the strings will
+ break compatibility with log files created with older/newer
+ driver versions.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_SF_H__
+#define __HTBUFFER_SF_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*****************************************************************************
+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you
+ * WILL BREAK host tracing message compatibility with previous
+ * driver versions. Only add new ones, if so required.
+ ****************************************************************************/
+
+
+/* String used in pvrdebug -h output */
+#define HTB_LOG_GROUPS_STRING_LIST "ctrl,mmu,sync,main,brg"
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define HTB_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s"
+
+/* Available log groups - Master template
+ *
+ * Group usage is as follows:
+ * CTRL - Internal Host Trace information and synchronisation data
+ * MMU - MMU page mapping information
+ * SYNC - Synchronisation debug
+ * MAIN - Data master kicks, etc. tying in with the MAIN group in FWTrace
+ * DBG - Temporary debugging group, logs not to be left in the driver
+ *
+ */
+#define HTB_LOG_SFGROUPLIST \
+ X( HTB_GROUP_NONE, NONE ) \
+/* gid, group flag / apphint name */ \
+ X( HTB_GROUP_CTRL, CTRL ) \
+ X( HTB_GROUP_MMU, MMU ) \
+ X( HTB_GROUP_SYNC, SYNC ) \
+ X( HTB_GROUP_MAIN, MAIN ) \
+ X( HTB_GROUP_BRG, BRG ) \
+/* Debug group HTB_GROUP_DBG must always be last */ \
+ X( HTB_GROUP_DBG, DBG )
+
+
+/* Table of String Format specifiers, the group they belong and the number of
+ * arguments each expects. Xmacro styled macros are used to generate what is
+ * needed without requiring hand editing.
+ *
+ * id : unique id within a group
+ * gid : group id as defined above
+ * sym name : symbolic name of enumerations used to identify message strings
+ * string : Actual string
+ * #args : number of arguments the string format requires
+ */
+#define HTB_LOG_SFIDLIST \
+/*id, gid, sym name, string, # arguments */ \
+X( 0, HTB_GROUP_NONE, HTB_SF_FIRST, "You should not use this string", 0) \
+\
+X( 1, HTB_GROUP_CTRL, HTB_SF_CTRL_LOGMODE, "HTB log mode set to %d (1- all PID, 2 - restricted PID)\n", 1) \
+X( 2, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_PID, "HTB enable logging for PID %d\n", 1) \
+X( 3, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_GROUP, "HTB enable logging groups 0x%08x\n", 1) \
+X( 4, HTB_GROUP_CTRL, HTB_SF_CTRL_LOG_LEVEL, "HTB log level set to %d\n", 1) \
+X( 5, HTB_GROUP_CTRL, HTB_SF_CTRL_OPMODE, "HTB operating mode set to %d (1 - droplatest, 2 - drop oldest, 3 - block)\n", 1) \
+X( 6, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE, "HTBFWSync OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \
+X( 7, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE_RPT, "FW Sync scale info OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \
+X( 8, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK, "FW Sync Partition marker: %d\n", 1) \
+X( 9, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_RPT, "FW Sync Partition repeat: %d\n", 1) \
+\
+X( 1, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_TABLE, "MMU page op table entry page_id=%08x%08x index=%d level=%d val=%08x%08x map=%d\n", 7) \
+X( 2, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_ALLOC, "MMU allocating DevVAddr from %08x%08x to %08x%08x\n", 4) \
+X( 3, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_FREE, "MMU freeing DevVAddr from %08x%08x to %08x%08x\n", 4) \
+X( 4, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_MAP, "MMU mapping DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \
+X( 5, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_PMRMAP, "MMU mapping PMR DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \
+X( 6, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_UNMAP, "MMU unmapping DevVAddr %08x%08x\n", 2) \
+\
+X( 1, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_ALLOC, "Server sync allocation [%08X]\n", 1) \
+X( 2, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_UNREF, "Server sync unreferenced [%08X]\n", 1) \
+X( 3, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_CREATE, "Sync OP create 0x%08x, block count=%d, server syncs=%d, client syncs=%d\n", 4) \
+X( 4, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_TAKE, "Sync OP take 0x%08x server syncs=%d, client syncs=%d\n", 3) \
+X( 5, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_COMPLETE, "Sync OP complete 0x%08x\n", 1) \
+X( 6, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_DESTROY, "Sync OP destroy 0x%08x\n", 1) \
+\
+X( 1, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA, "Kick TA: FWCtx %08X @ %d\n", 2) \
+X( 2, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D, "Kick 3D: FWCtx %08X @ %d\n", 2) \
+X( 3, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM, "Kick CDM: FWCtx %08X @ %d\n", 2) \
+X( 4, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx %08X @ %d\n", 2) \
+X( 5, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx %08X @ %d\n", 2) \
+X( 6, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D, "Kick 2D: FWCtx %08X @ %d\n", 2) \
+X( 7, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_UNCOUNTED, "Kick (uncounted) for all DMs\n", 0) \
+X( 8, HTB_GROUP_MAIN, HTB_SF_MAIN_FWCCB_CMD, "FW CCB Cmd: %d\n", 1) \
+\
+X( 1, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL, "Bridge call: start: %010u: bid %03d fid %d\n", 3) \
+X( 2, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL_ERR, "Bridge call: start: %010u: bid %03d fid %d error %d\n", 4) \
+\
+X( 1, HTB_GROUP_DBG, HTB_SF_DBG_INTPAIR, "0x%8.8x 0x%8.8x\n", 2) \
+\
+X( 65535, HTB_GROUP_NONE, HTB_SF_LAST, "You should not use this string\n", 15)
+
+
+
+/* gid - Group numbers */
+typedef enum _HTB_LOG_SFGROUPS {
+#define X(A,B) A,
+ HTB_LOG_SFGROUPLIST
+#undef X
+} HTB_LOG_SFGROUPS;
+
+
+/* group flags are stored in an array of elements */
+/* each of which have a certain number of bits */
+#define HTB_FLAG_EL_T IMG_UINT32
+#define HTB_FLAG_NUM_BITS_IN_EL ( sizeof(HTB_FLAG_EL_T) * 8 )
+
+#define HTB_LOG_GROUP_FLAG_GROUP(gid) ( (gid-1) / HTB_FLAG_NUM_BITS_IN_EL )
+#define HTB_LOG_GROUP_FLAG(gid) (gid? (0x1 << ((gid-1)%HTB_FLAG_NUM_BITS_IN_EL)): 0)
+#define HTB_LOG_GROUP_FLAG_NAME(gid) HTB_LOG_TYPE_ ## gid
+
+/* group enable flags */
+typedef enum _HTB_LOG_TYPE {
+#define X(a, b) HTB_LOG_GROUP_FLAG_NAME(b) = HTB_LOG_GROUP_FLAG(a),
+ HTB_LOG_SFGROUPLIST
+#undef X
+} HTB_LOG_TYPE;
+
+
+
+/* The symbolic names found in the table above are assigned an ui32 value of
+ * the following format:
+ * 31 30 28 27 20 19 16 15 12 11 0 bits
+ * - --- ---- ---- ---- ---- ---- ---- ----
+ * 0-11: id number
+ * 12-15: group id number
+ * 16-19: number of parameters
+ * 20-27: unused
+ * 28-30: active: identify SF packet, otherwise regular int32
+ * 31: reserved for signed/unsigned compatibility
+ *
+ * The following macro assigns those values to the enum generated SF ids list.
+ */
+#define HTB_LOG_IDMARKER (0x70000000)
+#define HTB_LOG_CREATESFID(a,b,e) ((a) | (b<<12) | (e<<16)) | HTB_LOG_IDMARKER
+
+#define HTB_LOG_IDMASK (0xFFF00000)
+#define HTB_LOG_VALIDID(I) ( ((I) & HTB_LOG_IDMASK) == HTB_LOG_IDMARKER )
+
+typedef enum HTB_LOG_SFids {
+#define X(a, b, c, d, e) c = HTB_LOG_CREATESFID(a,b,e),
+ HTB_LOG_SFIDLIST
+#undef X
+} HTB_LOG_SFids;
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define HTB_SF_GID(x) (((x)>>12) & 0xf)
+/* future improvement to support log levels */
+#define HTB_SF_LVL(x) (0)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define HTB_SF_PARAMNUM(x) (((x)>>16) & 0xf)
+
+/* format of messages is: SF:PID:TIME:[PARn]*
+ */
+#define HTB_LOG_HEADER_SIZE 3
+#define HTB_LOG_MAX_PARAMS 15
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_SF_H__ */
+/*****************************************************************************
+ End of file (htbuffer_sf.h)
+*****************************************************************************/
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File htbuffer_types.h
+@Title Host Trace Buffer types.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Host Trace Buffer provides a mechanism to log Host events to a
+ buffer in a similar way to the Firmware Trace mechanism.
+ Host Trace Buffer logs data using a Transport Layer buffer.
+ The Transport Layer and pvrtld tool provides the mechanism to
+ retrieve the trace data.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_TYPES_H__
+#define __HTBUFFER_TYPES_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "htbuffer_sf.h"
+
+/* the group flags array of ints large enough to store all the group flags */
+#define HTB_FLAG_NUM_EL ( ((HTB_GROUP_DBG-1) / HTB_FLAG_NUM_BITS_IN_EL) + 1 )
+extern IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL];
+
+#define HTB_GROUP_ENABLED(SF) (g_auiHTBGroupEnable[HTB_LOG_GROUP_FLAG_GROUP(HTB_SF_GID(SF))] & HTB_LOG_GROUP_FLAG(HTB_SF_GID(SF)))
+
+/*************************************************************************/ /*!
+ Host Trace Buffer operation mode
+ Care must be taken if changing this enum to ensure the MapFlags[] array
+ in htbserver.c is kept in-step.
+*/ /**************************************************************************/
+typedef enum
+{
+ /*! Undefined operation mode */
+ HTB_OPMODE_UNDEF = 0,
+
+ /*! Drop latest, intended for continuous logging to a UM daemon.
+ * If the daemon does not keep up, the most recent log data
+ * will be dropped
+ */
+ HTB_OPMODE_DROPLATEST,
+
+ /*! Drop oldest, intended for crash logging.
+ * Data will be continuously written to a circular buffer.
+ * After a crash the buffer will contain events leading up to the crash
+ */
+ HTB_OPMODE_DROPOLDEST,
+
+ /*! Block write if buffer is full
+ */
+ HTB_OPMODE_BLOCK,
+
+ HTB_OPMODE_LAST = HTB_OPMODE_BLOCK
+} HTB_OPMODE_CTRL;
+
+
+/*************************************************************************/ /*!
+ Host Trace Buffer log mode control
+*/ /**************************************************************************/
+typedef enum
+{
+ /*! Undefined log mode, used if update is not applied */
+ HTB_LOGMODE_UNDEF = 0,
+
+ /*! Log trace messages for all PIDs.
+ */
+ HTB_LOGMODE_ALLPID,
+
+ /*! Log trace messages for specific PIDs only.
+ */
+ HTB_LOGMODE_RESTRICTEDPID,
+
+ HTB_LOGMODE_LAST = HTB_LOGMODE_RESTRICTEDPID
+} HTB_LOGMODE_CTRL;
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_TYPES_H__ */
+
+/*****************************************************************************
+ End of file (htbuffer.h)
+*****************************************************************************/
+
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 1.V.2.0
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_2_0_H_
+#define _RGXCONFIG_KM_1_V_2_0_H_
+
+/***** Automatically generated file (22/02/2016 07:00:33): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:33)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 0
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_1_V_2_0_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 1.V.2.20
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_2_20_H_
+#define _RGXCONFIG_KM_1_V_2_20_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 20
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_1_V_2_20_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 1.V.2.30
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_2_30_H_
+#define _RGXCONFIG_KM_1_V_2_30_H_
+
+/***** Automatically generated file (22/02/2016 07:00:33): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:33)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 30
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_1_V_2_30_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 1.V.2.5
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_2_5_H_
+#define _RGXCONFIG_KM_1_V_2_5_H_
+
+/***** Automatically generated file (22/02/2016 07:00:33): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:33)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 5
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_1_V_2_5_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 1.V.4.12
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_12_H_
+#define _RGXCONFIG_KM_1_V_4_12_H_
+
+/***** Automatically generated file (22/02/2016 07:00:33): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:33)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 12
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (256*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_1_V_4_12_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 1.V.4.15
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_15_H_
+#define _RGXCONFIG_KM_1_V_4_15_H_
+
+/***** Automatically generated file (22/02/2016 07:00:33): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:33)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 15
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (256*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_1_V_4_15_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 1.V.4.19
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_19_H_
+#define _RGXCONFIG_KM_1_V_4_19_H_
+
+/***** Automatically generated file (22/02/2016 07:00:33): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:33)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 19
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_1_V_4_19_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 1.V.4.5
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_5_H_
+#define _RGXCONFIG_KM_1_V_4_5_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 5
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_1_V_4_5_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 1.V.4.6
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_6_H_
+#define _RGXCONFIG_KM_1_V_4_6_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 6
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_1_V_4_6_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 10.V.2.26
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_10_V_2_26_H_
+#define _RGXCONFIG_KM_10_V_2_26_H_
+
+/***** Automatically generated file (15/06/2016 11:04:46): Do not edit manually ********************/
+/***** Timestamp: (15/06/2016 11:04:46)************************************************************/
+
+#define RGX_BNC_KM_B 10
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 26
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SCALABLE_VCE (1)
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_PDS_PER_DUST
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SLC_BANKS (2)
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (3)
+#define RGX_FEATURE_META_COREMEM_BANKS (8)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_META_COREMEM_SIZE (256)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (2)
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+
+
+#endif /* _RGXCONFIG_10_V_2_26_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 10.V.4.25
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_10_V_4_25_H_
+#define _RGXCONFIG_KM_10_V_4_25_H_
+
+/***** Automatically generated file (15/06/2016 11:04:46): Do not edit manually ********************/
+/***** Timestamp: (15/06/2016 11:04:46)************************************************************/
+
+#define RGX_BNC_KM_B 10
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 25
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SCALABLE_VCE (1)
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_PDS_PER_DUST
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (512)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SLC_BANKS (4)
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (3)
+#define RGX_FEATURE_META_COREMEM_BANKS (8)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_META_COREMEM_SIZE (256)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (2)
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+
+
+#endif /* _RGXCONFIG_10_V_4_25_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 12.V.1.20
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_12_V_1_20_H_
+#define _RGXCONFIG_KM_12_V_1_20_H_
+
+/***** Automatically generated file (24/08/2016 07:01:08): Do not edit manually ********************/
+/***** Timestamp: (24/08/2016 07:01:08)************************************************************/
+
+#define RGX_BNC_KM_B 12
+#define RGX_BNC_KM_N 1
+#define RGX_BNC_KM_C 20
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_META (LTP217)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (0*1024)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_12_V_1_20_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 12.V.1.48
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_12_V_1_48_H_
+#define _RGXCONFIG_KM_12_V_1_48_H_
+
+/***** Automatically generated file (24/08/2016 07:01:08): Do not edit manually ********************/
+/***** Timestamp: (24/08/2016 07:01:08)************************************************************/
+
+#define RGX_BNC_KM_B 12
+#define RGX_BNC_KM_N 1
+#define RGX_BNC_KM_C 48
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_META (LTP217)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (0*1024)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_12_V_1_48_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 15.V.1.64
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_15_V_1_64_H_
+#define _RGXCONFIG_KM_15_V_1_64_H_
+
+/***** Automatically generated file (24/08/2016 07:01:09): Do not edit manually ********************/
+/***** Timestamp: (24/08/2016 07:01:09)************************************************************/
+
+#define RGX_BNC_KM_B 15
+#define RGX_BNC_KM_N 1
+#define RGX_BNC_KM_C 64
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2)
+#define RGX_FEATURE_META (LTP217)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64*1024)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_15_V_1_64_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.208.312
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_208_312_H_
+#define _RGXCONFIG_KM_22_V_208_312_H_
+
+/***** Automatically generated file (26/08/2016 07:00:44): Do not edit manually ********************/
+/***** Timestamp: (26/08/2016 07:00:44)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 208
+#define RGX_BNC_KM_C 312
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64*1024)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_208_312_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.208.316
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_208_316_H_
+#define _RGXCONFIG_KM_22_V_208_316_H_
+
+/***** Automatically generated file (17/01/2017 07:00:49): Do not edit manually ********************/
+/***** Timestamp: (17/01/2017 07:00:49)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 208
+#define RGX_BNC_KM_C 316
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (12)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64*1024)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_208_316_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.21.11
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_21_11_H_
+#define _RGXCONFIG_KM_22_V_21_11_H_
+
+/***** Automatically generated file (14/11/2016 07:00:35): Do not edit manually ********************/
+/***** Timestamp: (14/11/2016 07:00:35)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 21
+#define RGX_BNC_KM_C 11
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_SLCSIZE8
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_21_11_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.21.16
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_21_16_H_
+#define _RGXCONFIG_KM_22_V_21_16_H_
+
+/***** Automatically generated file (08/02/2017 07:00:50): Do not edit manually ********************/
+/***** Timestamp: (08/02/2017 07:00:50)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 21
+#define RGX_BNC_KM_C 16
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_SLCSIZE8
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_21_16_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.22.22
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_22_22_H_
+#define _RGXCONFIG_KM_22_V_22_22_H_
+
+/***** Automatically generated file (26/08/2016 07:00:43): Do not edit manually ********************/
+/***** Timestamp: (26/08/2016 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 22
+#define RGX_BNC_KM_C 22
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_22_22_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.22.23
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_22_23_H_
+#define _RGXCONFIG_KM_22_V_22_23_H_
+
+/***** Automatically generated file (26/08/2016 07:00:43): Do not edit manually ********************/
+/***** Timestamp: (26/08/2016 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 22
+#define RGX_BNC_KM_C 23
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_22_23_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.22.25
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_22_25_H_
+#define _RGXCONFIG_KM_22_V_22_25_H_
+
+/***** Automatically generated file (08/02/2017 07:00:50): Do not edit manually ********************/
+/***** Timestamp: (08/02/2017 07:00:50)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 22
+#define RGX_BNC_KM_C 25
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_22_25_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.22.27
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_22_27_H_
+#define _RGXCONFIG_KM_22_V_22_27_H_
+
+/***** Automatically generated file (26/08/2016 07:00:43): Do not edit manually ********************/
+/***** Timestamp: (26/08/2016 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 22
+#define RGX_BNC_KM_C 27
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_22_27_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.22.29
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_22_29_H_
+#define _RGXCONFIG_KM_22_V_22_29_H_
+
+/***** Automatically generated file (08/02/2017 07:00:49): Do not edit manually ********************/
+/***** Timestamp: (08/02/2017 07:00:49)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 22
+#define RGX_BNC_KM_C 29
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_22_29_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.54.24
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_54_24_H_
+#define _RGXCONFIG_KM_22_V_54_24_H_
+
+/***** Automatically generated file (26/08/2016 07:00:43): Do not edit manually ********************/
+/***** Timestamp: (26/08/2016 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 24
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64*1024)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_54_24_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.54.25
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_54_25_H_
+#define _RGXCONFIG_KM_22_V_54_25_H_
+
+/***** Automatically generated file (26/08/2016 07:00:43): Do not edit manually ********************/
+/***** Timestamp: (26/08/2016 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 25
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64*1024)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_54_25_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.54.30
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_54_30_H_
+#define _RGXCONFIG_KM_22_V_54_30_H_
+
+/***** Automatically generated file (26/08/2016 07:00:43): Do not edit manually ********************/
+/***** Timestamp: (26/08/2016 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 30
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64*1024)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_54_30_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.54.328
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_54_328_H_
+#define _RGXCONFIG_KM_22_V_54_328_H_
+
+/***** Automatically generated file (26/08/2016 07:00:43): Do not edit manually ********************/
+/***** Timestamp: (26/08/2016 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 328
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64*1024)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_54_328_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 22.V.54.330
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_54_330_H_
+#define _RGXCONFIG_KM_22_V_54_330_H_
+
+/***** Automatically generated file (26/08/2016 07:00:43): Do not edit manually ********************/
+/***** Timestamp: (26/08/2016 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 330
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64*1024)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_22_V_54_330_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 4.V.2.51
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_2_51_H_
+#define _RGXCONFIG_KM_4_V_2_51_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 51
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_4_V_2_51_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 4.V.2.57
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_2_57_H_
+#define _RGXCONFIG_KM_4_V_2_57_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 57
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_4_V_2_57_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 4.V.2.58
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_2_58_H_
+#define _RGXCONFIG_KM_4_V_2_58_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 58
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_4_V_2_58_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 4.V.4.53
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_4_53_H_
+#define _RGXCONFIG_KM_4_V_4_53_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 53
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_4_V_4_53_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 4.V.4.55
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_4_55_H_
+#define _RGXCONFIG_KM_4_V_4_55_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 55
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_4_V_4_55_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 4.V.6.62
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_6_62_H_
+#define _RGXCONFIG_KM_4_V_6_62_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 6
+#define RGX_BNC_KM_C 62
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_NUM_CLUSTERS (6)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_SLC_BANKS (4)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_4_V_6_62_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 5.V.1.46
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_5_V_1_46_H_
+#define _RGXCONFIG_KM_5_V_1_46_H_
+
+/***** Automatically generated file (24/08/2016 07:01:08): Do not edit manually ********************/
+/***** Timestamp: (24/08/2016 07:01:08)************************************************************/
+
+#define RGX_BNC_KM_B 5
+#define RGX_BNC_KM_N 1
+#define RGX_BNC_KM_C 46
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_META (LTP217)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_ROGUEXE
+
+
+#endif /* _RGXCONFIG_5_V_1_46_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 6.V.4.35
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_6_V_4_35_H_
+#define _RGXCONFIG_KM_6_V_4_35_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 6
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 35
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_RAY_TRACING
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* _RGXCONFIG_6_V_4_35_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 8.V.2.39
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_8_V_2_39_H_
+#define _RGXCONFIG_KM_8_V_2_39_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp: (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 8
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 39
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SCALABLE_VCE (1)
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_PDS_PER_DUST
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SLC_BANKS (2)
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (3)
+#define RGX_FEATURE_META_COREMEM_BANKS (8)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_META_COREMEM_SIZE (64)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+
+
+#endif /* _RGXCONFIG_8_V_2_39_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 1.33.2.5
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_33_2_5_H_
+#define _RGXCORE_KM_1_33_2_5_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2106753 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.33.2.5
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 33
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 5
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_37918
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_42321
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_1_33_2_5_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 1.39.4.19
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_39_4_19_H_
+#define _RGXCORE_KM_1_39_4_19_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2784771 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.39.4.19
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 39
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 19
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_42321
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_39_4_19_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 1.48.2.0
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_48_2_0_H_
+#define _RGXCORE_KM_1_48_2_0_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2523218 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.48.2.0
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 48
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 0
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_42321
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_48_2_0_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 1.72.4.12
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_72_4_12_H_
+#define _RGXCORE_KM_1_72_4_12_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2646650 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.72.4.12
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 72
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 12
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_72_4_12_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 1.75.2.20
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_75_2_20_H_
+#define _RGXCORE_KM_1_75_2_20_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2309075 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.75.2.20
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 75
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 20
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_42321
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_75_2_20_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 1.75.2.30
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_75_2_30_H_
+#define _RGXCORE_KM_1_75_2_30_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2309075 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.75.2.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 75
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_42321
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_75_2_30_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 1.76.4.6
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_76_4_6_H_
+#define _RGXCORE_KM_1_76_4_6_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2318404 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.76.4.6
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 76
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 6
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_42480
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_76_4_6_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 1.81.4.15
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_81_4_15_H_
+#define _RGXCORE_KM_1_81_4_15_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2373516 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.81.4.15
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 81
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 15
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_42321
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_81_4_15_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 1.82.4.5
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_82_4_5_H_
+#define _RGXCORE_KM_1_82_4_5_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @2503111 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.82.4.5
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 82
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 5
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_82_4_5_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 10.22.4.25
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_10_22_4_25_H_
+#define _RGXCORE_KM_10_22_4_25_H_
+
+/***** Automatically generated file (29/11/2016 07:01:41): Do not edit manually ********************/
+/***** Timestamp: (29/11/2016 07:01:41)************************************************************/
+/***** CS: @3943572 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 10.22.4.25
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 10
+#define RGX_BVNC_KM_V 22
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_62204
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_45914
+#define HW_ERN_46066
+#define HW_ERN_47025
+#define HW_ERN_50539
+
+
+
+#endif /* _RGXCORE_KM_10_22_4_25_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 10.30.2.26
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_10_30_2_26_H_
+#define _RGXCORE_KM_10_30_2_26_H_
+
+/***** Automatically generated file (23/02/2017 15:15:18): Do not edit manually ********************/
+/***** Timestamp: (23/02/2017 15:15:18)************************************************************/
+/***** CS: @3943572 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 10.30.2.26
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 10
+#define RGX_BVNC_KM_V 30
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 26
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_62204
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_45914
+#define HW_ERN_46066
+#define HW_ERN_47025
+#define HW_ERN_50539
+
+
+
+#endif /* _RGXCORE_KM_10_30_2_26_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 10.32.4.25
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_10_32_4_25_H_
+#define _RGXCORE_KM_10_32_4_25_H_
+
+/***** Automatically generated file (29/11/2016 07:01:41): Do not edit manually ********************/
+/***** Timestamp: (29/11/2016 07:01:41)************************************************************/
+/***** CS: @3976602 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 10.32.4.25
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 10
+#define RGX_BVNC_KM_V 32
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_62204
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_45914
+#define HW_ERN_46066
+#define HW_ERN_47025
+#define HW_ERN_50539
+
+
+
+#endif /* _RGXCORE_KM_10_32_4_25_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 10.33.4.25
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_10_33_4_25_H_
+#define _RGXCORE_KM_10_33_4_25_H_
+
+/***** Automatically generated file (23/02/2017 15:15:18): Do not edit manually ********************/
+/***** Timestamp: (23/02/2017 15:15:18)************************************************************/
+/***** CS: @4036299 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 10.33.4.25
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 10
+#define RGX_BVNC_KM_V 33
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_62204
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_45914
+#define HW_ERN_46066
+#define HW_ERN_47025
+#define HW_ERN_50539
+
+
+
+#endif /* _RGXCORE_KM_10_33_4_25_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 12.4.1.48
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_12_4_1_48_H_
+#define _RGXCORE_KM_12_4_1_48_H_
+
+/***** Automatically generated file (20/02/2017 07:01:18): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:18)************************************************************/
+/***** CS: @2989295 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 12.4.1.48
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 12
+#define RGX_BVNC_KM_V 4
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 48
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_12_4_1_48_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 12.5.1.20
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_12_5_1_20_H_
+#define _RGXCORE_KM_12_5_1_20_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @3146507 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 12.5.1.20
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 12
+#define RGX_BVNC_KM_V 5
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 20
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_12_5_1_20_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 15.5.1.64
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_15_5_1_64_H_
+#define _RGXCORE_KM_15_5_1_64_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @3846532 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 15.5.1.64
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 15
+#define RGX_BVNC_KM_V 5
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 64
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_15_5_1_64_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.18.22.22
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_18_22_22_H_
+#define _RGXCORE_KM_22_18_22_22_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @3872583 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.18.22.22
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 18
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 22
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_22_18_22_22_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.26.54.24
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_26_54_24_H_
+#define _RGXCORE_KM_22_26_54_24_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @3943204 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.26.54.24
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 26
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 24
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_26_54_24_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.28.22.23
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_28_22_23_H_
+#define _RGXCORE_KM_22_28_22_23_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @3969181 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.28.22.23
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 28
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 23
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_28_22_23_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.29.22.27
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_29_22_27_H_
+#define _RGXCORE_KM_22_29_22_27_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @3976753 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.29.22.27
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 29
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 27
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_29_22_27_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.30.54.25
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_30_54_25_H_
+#define _RGXCORE_KM_22_30_54_25_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @4086500 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.30.54.25
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 30
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_30_54_25_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.32.54.328
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_32_54_328_H_
+#define _RGXCORE_KM_22_32_54_328_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @4048608 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.32.54.328
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 32
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 328
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_22_32_54_328_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.33.21.11
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_33_21_11_H_
+#define _RGXCORE_KM_22_33_21_11_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @4048565 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.33.21.11
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 33
+#define RGX_BVNC_KM_N 21
+#define RGX_BVNC_KM_C 11
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_33_21_11_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.35.22.27
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_35_22_27_H_
+#define _RGXCORE_KM_22_35_22_27_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @4005275 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.35.22.27
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 35
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 27
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_35_22_27_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.40.54.30
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_40_54_30_H_
+#define _RGXCORE_KM_22_40_54_30_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @4094817 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.40.54.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 40
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_40_54_30_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.41.54.330
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_41_54_330_H_
+#define _RGXCORE_KM_22_41_54_330_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @4075207 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.41.54.330
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 41
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 330
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_22_41_54_330_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.44.22.25
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_44_22_25_H_
+#define _RGXCORE_KM_22_44_22_25_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @4137146 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.44.22.25
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 44
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_44_22_25_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.45.22.29
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_45_22_29_H_
+#define _RGXCORE_KM_22_45_22_29_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @4127311 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.45.22.29
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 45
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 29
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_45_22_29_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.46.54.330
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_46_54_330_H_
+#define _RGXCORE_KM_22_46_54_330_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @4136505 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.46.54.330
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 46
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 330
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_22_46_54_330_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.47.208.312
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_47_208_312_H_
+#define _RGXCORE_KM_22_47_208_312_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @4202467 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.47.208.312
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 47
+#define RGX_BVNC_KM_N 208
+#define RGX_BVNC_KM_C 312
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_22_47_208_312_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.48.54.30
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_48_54_30_H_
+#define _RGXCORE_KM_22_48_54_30_H_
+
+/***** Automatically generated file (20/02/2017 07:01:18): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:18)************************************************************/
+/***** CS: @4158661 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.48.54.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 48
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_48_54_30_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.49.21.16
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_49_21_16_H_
+#define _RGXCORE_KM_22_49_21_16_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @4158766 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.49.21.16
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 49
+#define RGX_BVNC_KM_N 21
+#define RGX_BVNC_KM_C 16
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_49_21_16_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.50.22.29
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_50_22_29_H_
+#define _RGXCORE_KM_22_50_22_29_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @4156423 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.50.22.29
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 50
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 29
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_50_22_29_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.55.54.30
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_55_54_30_H_
+#define _RGXCORE_KM_22_55_54_30_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @4230075 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.55.54.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 55
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_55_54_30_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.57.54.30
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_57_54_30_H_
+#define _RGXCORE_KM_22_57_54_30_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @4279085 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.57.54.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 57
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_57_54_30_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.58.22.25
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_58_22_25_H_
+#define _RGXCORE_KM_22_58_22_25_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @4279077 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.58.22.25
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 58
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_58_22_25_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.59.54.30
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_59_54_30_H_
+#define _RGXCORE_KM_22_59_54_30_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @4317182 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.59.54.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 59
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_59_54_30_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.62.21.16
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_62_21_16_H_
+#define _RGXCORE_KM_22_62_21_16_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @4339985 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.62.21.16
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 62
+#define RGX_BVNC_KM_N 21
+#define RGX_BVNC_KM_C 16
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_62_21_16_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.63.54.330
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_63_54_330_H_
+#define _RGXCORE_KM_22_63_54_330_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @4400526 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.63.54.330
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 63
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 330
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_63_54_330_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.67.54.30
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_67_54_30_H_
+#define _RGXCORE_KM_22_67_54_30_H_
+
+/***** Automatically generated file (20/02/2017 07:01:18): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:18)************************************************************/
+/***** CS: @4339986 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.67.54.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 67
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_67_54_30_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.68.54.30
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_68_54_30_H_
+#define _RGXCORE_KM_22_68_54_30_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @4339984 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.68.54.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 68
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_68_54_30_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.69.22.25
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_69_22_25_H_
+#define _RGXCORE_KM_22_69_22_25_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @4339983 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.69.22.25
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 69
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_69_22_25_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 22.70.208.316
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_70_208_316_H_
+#define _RGXCORE_KM_22_70_208_316_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @4476117 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.70.208.316
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 70
+#define RGX_BVNC_KM_N 208
+#define RGX_BVNC_KM_C 316
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_55091
+#define FIX_HW_BRN_63027
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_70_208_316_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 4.29.2.51
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_29_2_51_H_
+#define _RGXCORE_KM_4_29_2_51_H_
+
+/***** Automatically generated file (06/02/2017 07:01:06): Do not edit manually ********************/
+/***** Timestamp: (06/02/2017 07:01:06)************************************************************/
+/***** CS: @2944502 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.29.2.51
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 29
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 51
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_29_2_51_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 4.31.4.55
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_31_4_55_H_
+#define _RGXCORE_KM_4_31_4_55_H_
+
+/***** Automatically generated file (06/02/2017 07:01:06): Do not edit manually ********************/
+/***** Timestamp: (06/02/2017 07:01:06)************************************************************/
+/***** CS: @2919104 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.31.4.55
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 31
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 55
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_31_4_55_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 4.40.2.51
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_40_2_51_H_
+#define _RGXCORE_KM_4_40_2_51_H_
+
+/***** Automatically generated file (06/02/2017 07:01:06): Do not edit manually ********************/
+/***** Timestamp: (06/02/2017 07:01:06)************************************************************/
+/***** CS: @3254374 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.40.2.51
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 40
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 51
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_40_2_51_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 4.41.2.57
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_41_2_57_H_
+#define _RGXCORE_KM_4_41_2_57_H_
+
+/***** Automatically generated file (06/02/2017 07:01:05): Do not edit manually ********************/
+/***** Timestamp: (06/02/2017 07:01:05)************************************************************/
+/***** CS: @3254338 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.41.2.57
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 41
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 57
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_41_2_57_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 4.42.4.53
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_42_4_53_H_
+#define _RGXCORE_KM_4_42_4_53_H_
+
+/***** Automatically generated file (06/02/2017 07:01:06): Do not edit manually ********************/
+/***** Timestamp: (06/02/2017 07:01:06)************************************************************/
+/***** CS: @3250390 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.42.4.53
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 42
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 53
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_42_4_53_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 4.43.6.62
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_43_6_62_H_
+#define _RGXCORE_KM_4_43_6_62_H_
+
+/***** Automatically generated file (06/02/2017 07:01:06): Do not edit manually ********************/
+/***** Timestamp: (06/02/2017 07:01:06)************************************************************/
+/***** CS: @3253129 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.43.6.62
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 43
+#define RGX_BVNC_KM_N 6
+#define RGX_BVNC_KM_C 62
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_43_6_62_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 4.45.2.58
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_45_2_58_H_
+#define _RGXCORE_KM_4_45_2_58_H_
+
+/***** Automatically generated file (06/02/2017 07:01:06): Do not edit manually ********************/
+/***** Timestamp: (06/02/2017 07:01:06)************************************************************/
+/***** CS: @3547765 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.45.2.58
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 45
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 58
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_45_2_58_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 4.46.6.62
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_46_6_62_H_
+#define _RGXCORE_KM_4_46_6_62_H_
+
+/***** Automatically generated file (06/02/2017 07:01:05): Do not edit manually ********************/
+/***** Timestamp: (06/02/2017 07:01:05)************************************************************/
+/***** CS: @4015666 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.46.6.62
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 46
+#define RGX_BVNC_KM_N 6
+#define RGX_BVNC_KM_C 62
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_46_6_62_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 5.11.1.46
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_5_11_1_46_H_
+#define _RGXCORE_KM_5_11_1_46_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:20)************************************************************/
+/***** CS: @3485232 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 5.11.1.46
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 5
+#define RGX_BVNC_KM_V 11
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 46
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_42321
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_5_11_1_46_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 5.9.1.46
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_5_9_1_46_H_
+#define _RGXCORE_KM_5_9_1_46_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp: (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2967148 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 5.9.1.46
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 5
+#define RGX_BVNC_KM_V 9
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 46
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_5_9_1_46_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 6.34.4.35
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_6_34_4_35_H_
+#define _RGXCORE_KM_6_34_4_35_H_
+
+/***** Automatically generated file (06/02/2017 07:01:05): Do not edit manually ********************/
+/***** Timestamp: (06/02/2017 07:01:05)************************************************************/
+/***** CS: @3533654 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 6.34.4.35
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 6
+#define RGX_BVNC_KM_V 34
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 35
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_6_34_4_35_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 8.47.2.39
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_8_47_2_39_H_
+#define _RGXCORE_KM_8_47_2_39_H_
+
+/***** Automatically generated file (23/02/2017 15:15:18): Do not edit manually ********************/
+/***** Timestamp: (23/02/2017 15:15:18)************************************************************/
+/***** CS: @3673034 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 8.47.2.39
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 8
+#define RGX_BVNC_KM_V 47
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 39
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_52563
+#define FIX_HW_BRN_62204
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_45914
+#define HW_ERN_46066
+#define HW_ERN_47025
+
+
+
+#endif /* _RGXCORE_KM_8_47_2_39_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 8.48.2.39
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_8_48_2_39_H_
+#define _RGXCORE_KM_8_48_2_39_H_
+
+/***** Automatically generated file (23/02/2017 15:15:18): Do not edit manually ********************/
+/***** Timestamp: (23/02/2017 15:15:18)************************************************************/
+/***** CS: @3753485 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 8.48.2.39
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 8
+#define RGX_BVNC_KM_V 48
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 39
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_52563
+#define FIX_HW_BRN_62204
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_45914
+#define HW_ERN_46066
+#define HW_ERN_47025
+
+
+
+#endif /* _RGXCORE_KM_8_48_2_39_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Hardware definition file rgx_bvnc_defs_km.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/**************************************************
+* Auto generated file by BVNCTableGen.py *
+* This file should not be edited manually *
+**************************************************/
+
+#ifndef _RGX_BVNC_DEFS_KM_H_
+#define _RGX_BVNC_DEFS_KM_H_
+
+#include "img_types.h"
+
+#define BVNC_FIELD_WIDTH (16U)
+
+#define RGX_FEATURE_AXI_ACELITE_POS (0U)
+#define RGX_FEATURE_AXI_ACELITE_BIT_MASK (IMG_UINT64_C(0x0000000000000001))
+
+#define RGX_FEATURE_CLUSTER_GROUPING_POS (1U)
+#define RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK (IMG_UINT64_C(0x0000000000000002))
+
+#define RGX_FEATURE_COMPUTE_POS (2U)
+#define RGX_FEATURE_COMPUTE_BIT_MASK (IMG_UINT64_C(0x0000000000000004))
+
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS (3U)
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK (IMG_UINT64_C(0x0000000000000008))
+
+#define RGX_FEATURE_COMPUTE_OVERLAP_POS (4U)
+#define RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK (IMG_UINT64_C(0x0000000000000010))
+
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS (5U)
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK (IMG_UINT64_C(0x0000000000000020))
+
+#define RGX_FEATURE_DYNAMIC_DUST_POWER_POS (6U)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK (IMG_UINT64_C(0x0000000000000040))
+
+#define RGX_FEATURE_FASTRENDER_DM_POS (7U)
+#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0000000000000080))
+
+#define RGX_FEATURE_GPU_CPU_COHERENCY_POS (8U)
+#define RGX_FEATURE_GPU_CPU_COHERENCY_BIT_MASK (IMG_UINT64_C(0x0000000000000100))
+
+#define RGX_FEATURE_GPU_VIRTUALISATION_POS (9U)
+#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000000200))
+
+#define RGX_FEATURE_GS_RTA_SUPPORT_POS (10U)
+#define RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000000400))
+
+#define RGX_FEATURE_META_DMA_POS (11U)
+#define RGX_FEATURE_META_DMA_BIT_MASK (IMG_UINT64_C(0x0000000000000800))
+
+#define RGX_FEATURE_MIPS_POS (12U)
+#define RGX_FEATURE_MIPS_BIT_MASK (IMG_UINT64_C(0x0000000000001000))
+
+#define RGX_FEATURE_PBE2_IN_XE_POS (13U)
+#define RGX_FEATURE_PBE2_IN_XE_BIT_MASK (IMG_UINT64_C(0x0000000000002000))
+
+#define RGX_FEATURE_PBVNC_COREID_REG_POS (14U)
+#define RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK (IMG_UINT64_C(0x0000000000004000))
+
+#define RGX_FEATURE_PDS_PER_DUST_POS (15U)
+#define RGX_FEATURE_PDS_PER_DUST_BIT_MASK (IMG_UINT64_C(0x0000000000008000))
+
+#define RGX_FEATURE_PDS_TEMPSIZE8_POS (16U)
+#define RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000000010000))
+
+#define RGX_FEATURE_PERFBUS_POS (17U)
+#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000000020000))
+
+#define RGX_FEATURE_RAY_TRACING_POS (18U)
+#define RGX_FEATURE_RAY_TRACING_BIT_MASK (IMG_UINT64_C(0x0000000000040000))
+
+#define RGX_FEATURE_ROGUEXE_POS (19U)
+#define RGX_FEATURE_ROGUEXE_BIT_MASK (IMG_UINT64_C(0x0000000000080000))
+
+#define RGX_FEATURE_S7_CACHE_HIERARCHY_POS (20U)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000000000100000))
+
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS (21U)
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000200000))
+
+#define RGX_FEATURE_SCALABLE_VDM_GPP_POS (22U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK (IMG_UINT64_C(0x0000000000400000))
+
+#define RGX_FEATURE_SIGNAL_SNOOPING_POS (23U)
+#define RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK (IMG_UINT64_C(0x0000000000800000))
+
+#define RGX_FEATURE_SINGLE_BIF_POS (24U)
+#define RGX_FEATURE_SINGLE_BIF_BIT_MASK (IMG_UINT64_C(0x0000000001000000))
+
+#define RGX_FEATURE_SLCSIZE8_POS (25U)
+#define RGX_FEATURE_SLCSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000002000000))
+
+#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_POS (26U)
+#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_BIT_MASK (IMG_UINT64_C(0x0000000004000000))
+
+#define RGX_FEATURE_SLC_VIVT_POS (27U)
+#define RGX_FEATURE_SLC_VIVT_BIT_MASK (IMG_UINT64_C(0x0000000008000000))
+
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (28U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0000000010000000))
+
+#define RGX_FEATURE_TESSELLATION_POS (29U)
+#define RGX_FEATURE_TESSELLATION_BIT_MASK (IMG_UINT64_C(0x0000000020000000))
+
+#define RGX_FEATURE_TLA_POS (30U)
+#define RGX_FEATURE_TLA_BIT_MASK (IMG_UINT64_C(0x0000000040000000))
+
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS (31U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000000080000000))
+
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS (32U)
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000000100000000))
+
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_POS (33U)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK (IMG_UINT64_C(0x0000000200000000))
+
+#define RGX_FEATURE_VDM_DRAWINDIRECT_POS (34U)
+#define RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK (IMG_UINT64_C(0x0000000400000000))
+
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS (35U)
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK (IMG_UINT64_C(0x0000000800000000))
+
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS (36U)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000001000000000))
+
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS (0U)
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK (IMG_UINT64_C(0x0000000000000003))
+
+#define RGX_FEATURE_FBCDC_ARCHITECTURE_POS (2U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK (IMG_UINT64_C(0x000000000000000C))
+
+#define RGX_FEATURE_META_POS (4U)
+#define RGX_FEATURE_META_BIT_MASK (IMG_UINT64_C(0x0000000000000030))
+
+#define RGX_FEATURE_META_COREMEM_BANKS_POS (6U)
+#define RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK (IMG_UINT64_C(0x00000000000001C0))
+
+#define RGX_FEATURE_META_COREMEM_SIZE_POS (9U)
+#define RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK (IMG_UINT64_C(0x0000000000000E00))
+
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS (12U)
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK (IMG_UINT64_C(0x0000000000003000))
+
+#define RGX_FEATURE_NUM_CLUSTERS_POS (14U)
+#define RGX_FEATURE_NUM_CLUSTERS_BIT_MASK (IMG_UINT64_C(0x000000000003C000))
+
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES_POS (18U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK (IMG_UINT64_C(0x00000000003C0000))
+
+#define RGX_FEATURE_PHYS_BUS_WIDTH_POS (22U)
+#define RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK (IMG_UINT64_C(0x0000000000C00000))
+
+#define RGX_FEATURE_SCALABLE_TE_ARCH_POS (24U)
+#define RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK (IMG_UINT64_C(0x0000000003000000))
+
+#define RGX_FEATURE_SCALABLE_VCE_POS (26U)
+#define RGX_FEATURE_SCALABLE_VCE_BIT_MASK (IMG_UINT64_C(0x000000000C000000))
+
+#define RGX_FEATURE_SLC_BANKS_POS (28U)
+#define RGX_FEATURE_SLC_BANKS_BIT_MASK (IMG_UINT64_C(0x0000000030000000))
+
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS (30U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK (IMG_UINT64_C(0x0000000040000000))
+
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES_POS (31U)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES_BIT_MASK (IMG_UINT64_C(0x0000000380000000))
+
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS (31U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK (IMG_UINT64_C(0x0000000380000000))
+
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS (36U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK (IMG_UINT64_C(0x0000001000000000))
+
+#define HW_ERN_36400_POS (0U)
+#define HW_ERN_36400_BIT_MASK (IMG_UINT64_C(0x0000000000000001))
+
+#define FIX_HW_BRN_37200_POS (1U)
+#define FIX_HW_BRN_37200_BIT_MASK (IMG_UINT64_C(0x0000000000000002))
+
+#define FIX_HW_BRN_37918_POS (2U)
+#define FIX_HW_BRN_37918_BIT_MASK (IMG_UINT64_C(0x0000000000000004))
+
+#define FIX_HW_BRN_38344_POS (3U)
+#define FIX_HW_BRN_38344_BIT_MASK (IMG_UINT64_C(0x0000000000000008))
+
+#define HW_ERN_41805_POS (4U)
+#define HW_ERN_41805_BIT_MASK (IMG_UINT64_C(0x0000000000000010))
+
+#define HW_ERN_42290_POS (5U)
+#define HW_ERN_42290_BIT_MASK (IMG_UINT64_C(0x0000000000000020))
+
+#define FIX_HW_BRN_42321_POS (6U)
+#define FIX_HW_BRN_42321_BIT_MASK (IMG_UINT64_C(0x0000000000000040))
+
+#define FIX_HW_BRN_42480_POS (7U)
+#define FIX_HW_BRN_42480_BIT_MASK (IMG_UINT64_C(0x0000000000000080))
+
+#define HW_ERN_42606_POS (8U)
+#define HW_ERN_42606_BIT_MASK (IMG_UINT64_C(0x0000000000000100))
+
+#define FIX_HW_BRN_43276_POS (9U)
+#define FIX_HW_BRN_43276_BIT_MASK (IMG_UINT64_C(0x0000000000000200))
+
+#define FIX_HW_BRN_44455_POS (10U)
+#define FIX_HW_BRN_44455_BIT_MASK (IMG_UINT64_C(0x0000000000000400))
+
+#define FIX_HW_BRN_44871_POS (11U)
+#define FIX_HW_BRN_44871_BIT_MASK (IMG_UINT64_C(0x0000000000000800))
+
+#define HW_ERN_44885_POS (12U)
+#define HW_ERN_44885_BIT_MASK (IMG_UINT64_C(0x0000000000001000))
+
+#define HW_ERN_45914_POS (13U)
+#define HW_ERN_45914_BIT_MASK (IMG_UINT64_C(0x0000000000002000))
+
+#define HW_ERN_46066_POS (14U)
+#define HW_ERN_46066_BIT_MASK (IMG_UINT64_C(0x0000000000004000))
+
+#define HW_ERN_47025_POS (15U)
+#define HW_ERN_47025_BIT_MASK (IMG_UINT64_C(0x0000000000008000))
+
+#define HW_ERN_49144_POS (16U)
+#define HW_ERN_49144_BIT_MASK (IMG_UINT64_C(0x0000000000010000))
+
+#define HW_ERN_50539_POS (17U)
+#define HW_ERN_50539_BIT_MASK (IMG_UINT64_C(0x0000000000020000))
+
+#define FIX_HW_BRN_50767_POS (18U)
+#define FIX_HW_BRN_50767_BIT_MASK (IMG_UINT64_C(0x0000000000040000))
+
+#define FIX_HW_BRN_51281_POS (19U)
+#define FIX_HW_BRN_51281_BIT_MASK (IMG_UINT64_C(0x0000000000080000))
+
+#define HW_ERN_51468_POS (20U)
+#define HW_ERN_51468_BIT_MASK (IMG_UINT64_C(0x0000000000100000))
+
+#define FIX_HW_BRN_52402_POS (21U)
+#define FIX_HW_BRN_52402_BIT_MASK (IMG_UINT64_C(0x0000000000200000))
+
+#define FIX_HW_BRN_52563_POS (22U)
+#define FIX_HW_BRN_52563_BIT_MASK (IMG_UINT64_C(0x0000000000400000))
+
+#define FIX_HW_BRN_54141_POS (23U)
+#define FIX_HW_BRN_54141_BIT_MASK (IMG_UINT64_C(0x0000000000800000))
+
+#define FIX_HW_BRN_54441_POS (24U)
+#define FIX_HW_BRN_54441_BIT_MASK (IMG_UINT64_C(0x0000000001000000))
+
+#define FIX_HW_BRN_55091_POS (25U)
+#define FIX_HW_BRN_55091_BIT_MASK (IMG_UINT64_C(0x0000000002000000))
+
+#define FIX_HW_BRN_57193_POS (26U)
+#define FIX_HW_BRN_57193_BIT_MASK (IMG_UINT64_C(0x0000000004000000))
+
+#define HW_ERN_57596_POS (27U)
+#define HW_ERN_57596_BIT_MASK (IMG_UINT64_C(0x0000000008000000))
+
+#define FIX_HW_BRN_60084_POS (28U)
+#define FIX_HW_BRN_60084_BIT_MASK (IMG_UINT64_C(0x0000000010000000))
+
+#define HW_ERN_61389_POS (29U)
+#define HW_ERN_61389_BIT_MASK (IMG_UINT64_C(0x0000000020000000))
+
+#define FIX_HW_BRN_61450_POS (30U)
+#define FIX_HW_BRN_61450_BIT_MASK (IMG_UINT64_C(0x0000000040000000))
+
+#define FIX_HW_BRN_62204_POS (31U)
+#define FIX_HW_BRN_62204_BIT_MASK (IMG_UINT64_C(0x0000000080000000))
+
+#define FIX_HW_BRN_63027_POS (32U)
+#define FIX_HW_BRN_63027_BIT_MASK (IMG_UINT64_C(0x0000000100000000))
+
+#define FIX_HW_BRN_63142_POS (33U)
+#define FIX_HW_BRN_63142_BIT_MASK (IMG_UINT64_C(0x0000000200000000))
+
+
+
+#endif /*_RGX_BVNC_DEFS_KM_H_ */
+
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@Title Hardware definition file rgx_bvnc_table_km.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/**************************************************
+* Auto generated file by BVNCTableGen.py *
+* This file should not be edited manually *
+**************************************************/
+
+#ifndef _RGX_BVNC_TABLE_KM_H_
+#define _RGX_BVNC_TABLE_KM_H_
+
+#include "img_types.h"
+#include "rgxdefs_km.h"
+
+#define CSF_MAX_VALUE (1)
+static IMG_UINT32 CSF[] = {2, };
+
+#define FBCDCArch_MAX_VALUE (3)
+static IMG_UINT32 FBCDCArch[] = {1, 2, 3, };
+
+#define MCRMB_MAX_VALUE (2)
+static IMG_UINT32 MCRMB[] = {3, 8, };
+
+#define MCRMS_MAX_VALUE (5)
+static IMG_UINT32 MCRMS[] = {0, 32, 48, 64, 256, };
+
+#define MDCC_MAX_VALUE (1)
+static IMG_UINT32 MDCC[] = {4, };
+
+#define NC_MAX_VALUE (7)
+static IMG_UINT32 NC[] = {1, 2, 4, 6, 8, 12, 16, };
+
+#define NIIP_MAX_VALUE (11)
+static IMG_UINT32 NIIP[] = {1, 2, 3, 4, 6, 7, 8, 12, 16, 24, 32, };
+
+#define PBW_MAX_VALUE (3)
+static IMG_UINT32 PBW[] = {32, 38, 40, };
+
+#define STEA_MAX_VALUE (3)
+static IMG_UINT32 STEA[] = {1, 2, 4, };
+
+#define SVCEA_MAX_VALUE (4)
+static IMG_UINT32 SVCEA[] = {1, 2, 3, 4, };
+
+#define SLCB_MAX_VALUE (3)
+static IMG_UINT32 SLCB[] = {1, 2, 4, };
+
+#define SLCCLSb_MAX_VALUE (2)
+static IMG_UINT32 SLCCLSb[] = {512, 1024, };
+
+#define SLCSKB_MAX_VALUE (7)
+static IMG_UINT32 SLCSKB[] = {0, 16, 32, 64, 128, 256, 512, };
+
+#define VASB_MAX_VALUE (1)
+static IMG_UINT32 VASB[] = {40, };
+
+#define META_MAX_VALUE (4)
+static IMG_UINT32 META[] = {LTP217, LTP218, MTP218, MTP219, };
+
+
+IMG_UINT64 gaFeatures[][3]=
+{
+ { 0x0001000000020000, 0x0000000040020415, 0x0000000200884020, },
+ { 0x0001000000020005, 0x0000000040020415, 0x0000000200884020, },
+ { 0x0001000000020014, 0x0000000040020415, 0x0000000200884020, },
+ { 0x000100000002001e, 0x0000000040020415, 0x0000000200884020, },
+ { 0x0001000000040005, 0x0000000040020414, 0x0000000200888020, },
+ { 0x0001000000040006, 0x0000000040020414, 0x0000000200888020, },
+ { 0x000100000004000c, 0x0000000040020414, 0x0000000280888020, },
+ { 0x000100000004000f, 0x0000000040020415, 0x0000000280888020, },
+ { 0x0001000000040013, 0x0000000040020415, 0x0000000200888020, },
+ { 0x0004000000020033, 0x00000012c002045f, 0x0000000200984214, },
+ { 0x0004000000020039, 0x00000012c002065f, 0x0000000200984214, },
+ { 0x000400000002003a, 0x00000012c002065f, 0x0000000200984214, },
+ { 0x0004000000040035, 0x00000012c002045f, 0x0000000200988214, },
+ { 0x0004000000040037, 0x00000012c002045e, 0x0000000200988214, },
+ { 0x000400000006003e, 0x00000012c002065f, 0x000000022098c214, },
+ { 0x000500000001002e, 0x00000000000a0445, 0x0000000080800000, },
+ { 0x0006000000040023, 0x00000012c006045f, 0x0000000200988214, },
+ { 0x0008000000020027, 0x0000000fa8738e3f, 0x00000002108c4658, },
+ { 0x000a00000002001a, 0x0000000fbcf38ebf, 0x0000000210984878, },
+ { 0x000a000000040019, 0x0000000fbcf38ebf, 0x0000000320988878, },
+ { 0x000c000000010014, 0x0000000000080005, 0x0000000000800000, },
+ { 0x000c000000010030, 0x0000000000080005, 0x0000000000800000, },
+ { 0x000f000000010040, 0x00000000000a0645, 0x0000000180840000, },
+ { 0x001600000015000b, 0x00000000130a7605, 0x0000000000000000, },
+ { 0x0016000000150010, 0x00000000130a7605, 0x0000000000040000, },
+ { 0x0016000000160016, 0x00000000010a7605, 0x0000000080000000, },
+ { 0x0016000000160017, 0x00000000110a7605, 0x0000000080000000, },
+ { 0x0016000000160019, 0x00000000110a7605, 0x0000000080040000, },
+ { 0x001600000016001b, 0x00000000110a7605, 0x0000000080000000, },
+ { 0x001600000016001d, 0x00000000110a7605, 0x0000000080040000, },
+ { 0x0016000000360018, 0x00000000010a7605, 0x0000000180080000, },
+ { 0x0016000000360019, 0x00000000110a7605, 0x0000000180080000, },
+ { 0x001600000036001e, 0x00000000110a7605, 0x00000001800c0000, },
+ { 0x0016000000360148, 0x00000000110a7605, 0x00000001800c0000, },
+ { 0x001600000036014a, 0x00000000110a7605, 0x00000001800c0000, },
+ { 0x0016000000d00138, 0x00000000110a7605, 0x00000001800c4000, },
+ { 0x0016000000d0013c, 0x00000000110a7605, 0x00000001801c4000, },
+};
+
+IMG_UINT64 gaErnsBrns[][2]=
+{
+ { 0x0001002100020005, 0x000000000100044c, },
+ { 0x0001002700040013, 0x0000000001000449, },
+ { 0x0001003000020000, 0x0000000001000449, },
+ { 0x000100480004000c, 0x0000000001000401, },
+ { 0x0001004b00020014, 0x0000000001000441, },
+ { 0x0001004b0002001e, 0x0000000001000441, },
+ { 0x0001004c00040006, 0x0000000001000489, },
+ { 0x000100510004000f, 0x0000000001000449, },
+ { 0x0001005200040005, 0x0000000001000401, },
+ { 0x0004001d00020033, 0x0000000205040121, },
+ { 0x0004001f00040037, 0x0000000205040121, },
+ { 0x0004002800020033, 0x0000000205040121, },
+ { 0x0004002900020039, 0x0000000205040121, },
+ { 0x0004002a00040035, 0x0000000205040121, },
+ { 0x0004002b0006003e, 0x0000000205040121, },
+ { 0x0004002d0002003a, 0x0000000205000121, },
+ { 0x0004002e0006003e, 0x0000000204040121, },
+ { 0x000500090001002e, 0x0000000000000a09, },
+ { 0x0005000b0001002e, 0x0000000000000a41, },
+ { 0x0006002200040023, 0x0000000205000121, },
+ { 0x0008002f00020027, 0x000000008040e121, },
+ { 0x0008003000020027, 0x000000008040e121, },
+ { 0x000a001600040019, 0x000000008002e121, },
+ { 0x000a001e0002001a, 0x000000008002e121, },
+ { 0x000a002000040019, 0x000000008002e121, },
+ { 0x000a002100040019, 0x000000008002e121, },
+ { 0x000c000400010030, 0x0000000000000a09, },
+ { 0x000c000500010014, 0x0000000000000a01, },
+ { 0x000f000500010040, 0x0000000000000a01, },
+ { 0x0016001200160016, 0x0000000152000b01, },
+ { 0x0016001a00360018, 0x000000015a000b01, },
+ { 0x0016001c00160017, 0x000000015a000b01, },
+ { 0x0016001d0016001b, 0x000000015a000b01, },
+ { 0x0016001e00360019, 0x000000015a000b01, },
+ { 0x0016002000360148, 0x0000000152000b21, },
+ { 0x001600210015000b, 0x000000015a000b01, },
+ { 0x001600230016001b, 0x000000015a000b01, },
+ { 0x001600280036001e, 0x000000015a000b01, },
+ { 0x001600290036014a, 0x0000000152000b21, },
+ { 0x0016002c00160019, 0x000000015a000b01, },
+ { 0x0016002d0016001d, 0x000000015a000b01, },
+ { 0x0016002e0036014a, 0x0000000152000b21, },
+ { 0x0016002f00d00138, 0x0000000142000121, },
+ { 0x001600300036001e, 0x000000015a000b01, },
+ { 0x0016003100150010, 0x000000015a000101, },
+ { 0x001600320016001d, 0x000000015a000b01, },
+ { 0x001600370036001e, 0x000000011a000b01, },
+ { 0x001600390036001e, 0x000000013a000b01, },
+ { 0x0016003a00160019, 0x000000012a000b01, },
+ { 0x0016003b0036001e, 0x0000000008000101, },
+ { 0x0016003b0036001e, 0x000000012a000b01, },
+ { 0x0016003e00150010, 0x000000012a000101, },
+ { 0x0016003e00150010, 0x0000000028000101, },
+ { 0x0016003f0036014a, 0x0000000122000b21, },
+ { 0x0016003f0036014a, 0x0000000022000b21, },
+ { 0x001600430036001e, 0x000000013a000b01, },
+ { 0x001600440036001e, 0x000000012a000b01, },
+ { 0x0016004500160019, 0x000000012a000b01, },
+ { 0x0016004600d0013c, 0x0000000122000121, },
+};
+
+
+#endif /*_RGX_BVNC_TABLE_KM_H_ */
+
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@Title Hardware definition file rgx_cr_defs_km.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* **** Autogenerated C -- do not edit **** */
+
+/*
+ */
+
+#if !defined(__IMG_EXPLICIT_INCLUDE_HWDEFS)
+#error This file may only be included if explicitly defined
+#endif
+
+#ifndef _RGX_CR_DEFS_KM_H_
+#define _RGX_CR_DEFS_KM_H_
+
+#include "img_types.h"
+
+
+#define RGX_CR_DEFS_KM_REVISION 1
+
+/*
+ Register RGX_CR_PBE_INDIRECT
+*/
+#define RGX_CR_PBE_INDIRECT (0x83E0U)
+#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_PBE_PERF_INDIRECT
+*/
+#define RGX_CR_PBE_PERF_INDIRECT (0x83D8U)
+#define RGX_CR_PBE_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_TPU_PERF_INDIRECT
+*/
+#define RGX_CR_TPU_PERF_INDIRECT (0x83F0U)
+#define RGX_CR_TPU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF8U)
+
+
+/*
+ Register RGX_CR_RASTERISATION_PERF_INDIRECT
+*/
+#define RGX_CR_RASTERISATION_PERF_INDIRECT (0x8318U)
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_TPU_MCU_L0_PERF_INDIRECT
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT (0x8028U)
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF8U)
+
+
+/*
+ Register RGX_CR_USC_PERF_INDIRECT
+*/
+#define RGX_CR_USC_PERF_INDIRECT (0x8030U)
+#define RGX_CR_USC_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_BLACKPEARL_INDIRECT
+*/
+#define RGX_CR_BLACKPEARL_INDIRECT (0x8388U)
+#define RGX_CR_BLACKPEARL_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_BLACKPEARL_PERF_INDIRECT
+*/
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT (0x83F8U)
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_TEXAS3_PERF_INDIRECT
+*/
+#define RGX_CR_TEXAS3_PERF_INDIRECT (0x83D0U)
+#define RGX_CR_TEXAS3_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF8U)
+
+
+/*
+ Register RGX_CR_TEXAS_PERF_INDIRECT
+*/
+#define RGX_CR_TEXAS_PERF_INDIRECT (0x8288U)
+#define RGX_CR_TEXAS_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_BX_TU_PERF_INDIRECT
+*/
+#define RGX_CR_BX_TU_PERF_INDIRECT (0xC900U)
+#define RGX_CR_BX_TU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_CLK_CTRL
+*/
+#define RGX_CR_CLK_CTRL (0x0000U)
+#define RGX_CR_CLK_CTRL__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF))
+#define RGX_CR_CLK_CTRL__S7_TOP__MASKFULL (IMG_UINT64_C(0xCFCF03000F3F3F0F))
+#define RGX_CR_CLK_CTRL_MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT (62U)
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_SHIFT (60U)
+#define RGX_CR_CLK_CTRL_IPP_CLRMSK (IMG_UINT64_C(0XCFFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_IPP_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_ON (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_AUTO (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_CLK_CTRL_FBC_SHIFT (58U)
+#define RGX_CR_CLK_CTRL_FBC_CLRMSK (IMG_UINT64_C(0XF3FFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FBC_ON (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_CLK_CTRL_FBC_AUTO (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_SHIFT (56U)
+#define RGX_CR_CLK_CTRL_FBDC_CLRMSK (IMG_UINT64_C(0XFCFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBDC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_ON (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT (54U)
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0XFF3FFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_CLK_CTRL_USCS_SHIFT (52U)
+#define RGX_CR_CLK_CTRL_USCS_CLRMSK (IMG_UINT64_C(0XFFCFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_USCS_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_USCS_ON (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_CLK_CTRL_USCS_AUTO (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_CLK_CTRL_PBE_SHIFT (50U)
+#define RGX_CR_CLK_CTRL_PBE_CLRMSK (IMG_UINT64_C(0XFFF3FFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_PBE_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PBE_ON (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_CLK_CTRL_PBE_AUTO (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT (48U)
+#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L1_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_CLK_CTRL_CDM_SHIFT (46U)
+#define RGX_CR_CLK_CTRL_CDM_CLRMSK (IMG_UINT64_C(0XFFFF3FFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_CDM_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_CDM_ON (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_CLK_CTRL_CDM_AUTO (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_SHIFT (44U)
+#define RGX_CR_CLK_CTRL_SIDEKICK_CLRMSK (IMG_UINT64_C(0XFFFFCFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_SIDEKICK_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_ON (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_AUTO (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT (42U)
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0XFFFFF3FFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_ON (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_AUTO (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_CLK_CTRL_BIF_SHIFT (40U)
+#define RGX_CR_CLK_CTRL_BIF_CLRMSK (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_ON (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_CLK_CTRL_BIF_AUTO (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT (28U)
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT (26U)
+#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L0_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_CTRL_TPU_SHIFT (24U)
+#define RGX_CR_CLK_CTRL_TPU_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_ON (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_CTRL_TPU_AUTO (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_CTRL_USC_SHIFT (20U)
+#define RGX_CR_CLK_CTRL_USC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_CTRL_USC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_USC_ON (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_CTRL_USC_AUTO (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_CTRL_TLA_SHIFT (18U)
+#define RGX_CR_CLK_CTRL_TLA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_CTRL_TLA_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TLA_ON (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_CTRL_TLA_AUTO (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_CTRL_SLC_SHIFT (16U)
+#define RGX_CR_CLK_CTRL_SLC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_CTRL_SLC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_SLC_ON (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_CTRL_SLC_AUTO (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_CLK_CTRL_UVS_SHIFT (14U)
+#define RGX_CR_CLK_CTRL_UVS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define RGX_CR_CLK_CTRL_UVS_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_UVS_ON (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_CTRL_UVS_AUTO (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_CLK_CTRL_PDS_SHIFT (12U)
+#define RGX_CR_CLK_CTRL_PDS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define RGX_CR_CLK_CTRL_PDS_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PDS_ON (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_CTRL_PDS_AUTO (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_CTRL_VDM_SHIFT (10U)
+#define RGX_CR_CLK_CTRL_VDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL_VDM_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_VDM_ON (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL_VDM_AUTO (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_CTRL_PM_SHIFT (8U)
+#define RGX_CR_CLK_CTRL_PM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL_PM_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PM_ON (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL_PM_AUTO (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL_GPP_SHIFT (6U)
+#define RGX_CR_CLK_CTRL_GPP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define RGX_CR_CLK_CTRL_GPP_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_GPP_ON (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_CTRL_GPP_AUTO (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_CTRL_TE_SHIFT (4U)
+#define RGX_CR_CLK_CTRL_TE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL_TE_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TE_ON (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_CTRL_TE_AUTO (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_CTRL_TSP_SHIFT (2U)
+#define RGX_CR_CLK_CTRL_TSP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF3))
+#define RGX_CR_CLK_CTRL_TSP_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TSP_ON (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_CTRL_TSP_AUTO (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_CTRL_ISP_SHIFT (0U)
+#define RGX_CR_CLK_CTRL_ISP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL_ISP_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_ISP_ON (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL_ISP_AUTO (IMG_UINT64_C(0x0000000000000002))
+
+
+/*
+ Register RGX_CR_CLK_STATUS
+*/
+#define RGX_CR_CLK_STATUS (0x0008U)
+#define RGX_CR_CLK_STATUS__PBE2_XE__MASKFULL (IMG_UINT64_C(0x00000001FFF077FF))
+#define RGX_CR_CLK_STATUS__S7_TOP__MASKFULL (IMG_UINT64_C(0x00000001B3101773))
+#define RGX_CR_CLK_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFF077FF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT (32U)
+#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT (31U)
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_STATUS_IPP_SHIFT (30U)
+#define RGX_CR_CLK_STATUS_IPP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_CLK_STATUS_IPP_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_IPP_RUNNING (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_STATUS_FBC_SHIFT (29U)
+#define RGX_CR_CLK_STATUS_FBC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FBC_RUNNING (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_STATUS_FBDC_SHIFT (28U)
+#define RGX_CR_CLK_STATUS_FBDC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBDC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FBDC_RUNNING (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT (27U)
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_STATUS_USCS_SHIFT (26U)
+#define RGX_CR_CLK_STATUS_USCS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_CLK_STATUS_USCS_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_USCS_RUNNING (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_STATUS_PBE_SHIFT (25U)
+#define RGX_CR_CLK_STATUS_PBE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define RGX_CR_CLK_STATUS_PBE_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PBE_RUNNING (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT (24U)
+#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_L1_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_STATUS_CDM_SHIFT (23U)
+#define RGX_CR_CLK_STATUS_CDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_CLK_STATUS_CDM_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_CDM_RUNNING (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_STATUS_SIDEKICK_SHIFT (22U)
+#define RGX_CR_CLK_STATUS_SIDEKICK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_CLK_STATUS_SIDEKICK_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT (21U)
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_STATUS_BIF_SHIFT (20U)
+#define RGX_CR_CLK_STATUS_BIF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_RUNNING (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT (14U)
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT (13U)
+#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_CLK_STATUS_MCU_L0_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_STATUS_TPU_SHIFT (12U)
+#define RGX_CR_CLK_STATUS_TPU_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_CLK_STATUS_TPU_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_RUNNING (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_STATUS_USC_SHIFT (10U)
+#define RGX_CR_CLK_STATUS_USC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_STATUS_USC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_USC_RUNNING (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_STATUS_TLA_SHIFT (9U)
+#define RGX_CR_CLK_STATUS_TLA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_STATUS_TLA_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TLA_RUNNING (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_STATUS_SLC_SHIFT (8U)
+#define RGX_CR_CLK_STATUS_SLC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_STATUS_SLC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_SLC_RUNNING (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_STATUS_UVS_SHIFT (7U)
+#define RGX_CR_CLK_STATUS_UVS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_STATUS_UVS_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_UVS_RUNNING (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_STATUS_PDS_SHIFT (6U)
+#define RGX_CR_CLK_STATUS_PDS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_STATUS_PDS_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PDS_RUNNING (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_STATUS_VDM_SHIFT (5U)
+#define RGX_CR_CLK_STATUS_VDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_STATUS_VDM_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_VDM_RUNNING (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_STATUS_PM_SHIFT (4U)
+#define RGX_CR_CLK_STATUS_PM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS_PM_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PM_RUNNING (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS_GPP_SHIFT (3U)
+#define RGX_CR_CLK_STATUS_GPP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_STATUS_GPP_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_GPP_RUNNING (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_STATUS_TE_SHIFT (2U)
+#define RGX_CR_CLK_STATUS_TE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS_TE_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TE_RUNNING (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_STATUS_TSP_SHIFT (1U)
+#define RGX_CR_CLK_STATUS_TSP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_STATUS_TSP_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TSP_RUNNING (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_STATUS_ISP_SHIFT (0U)
+#define RGX_CR_CLK_STATUS_ISP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS_ISP_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_ISP_RUNNING (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID__PBVNC (0x0020U)
+#define RGX_CR_CORE_ID__PBVNC__MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT (48U)
+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT (32U)
+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT (16U)
+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT (0U)
+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID (0x0018U)
+#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_ID_ID_SHIFT (16U)
+#define RGX_CR_CORE_ID_ID_CLRMSK (0X0000FFFFU)
+#define RGX_CR_CORE_ID_CONFIG_SHIFT (0U)
+#define RGX_CR_CORE_ID_CONFIG_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_CORE_REVISION
+*/
+#define RGX_CR_CORE_REVISION (0x0020U)
+#define RGX_CR_CORE_REVISION_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_REVISION_DESIGNER_SHIFT (24U)
+#define RGX_CR_CORE_REVISION_DESIGNER_CLRMSK (0X00FFFFFFU)
+#define RGX_CR_CORE_REVISION_MAJOR_SHIFT (16U)
+#define RGX_CR_CORE_REVISION_MAJOR_CLRMSK (0XFF00FFFFU)
+#define RGX_CR_CORE_REVISION_MINOR_SHIFT (8U)
+#define RGX_CR_CORE_REVISION_MINOR_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_SHIFT (0U)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_DESIGNER_REV_FIELD1
+*/
+#define RGX_CR_DESIGNER_REV_FIELD1 (0x0028U)
+#define RGX_CR_DESIGNER_REV_FIELD1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_DESIGNER_REV_FIELD2
+*/
+#define RGX_CR_DESIGNER_REV_FIELD2 (0x0030U)
+#define RGX_CR_DESIGNER_REV_FIELD2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_CHANGESET_NUMBER
+*/
+#define RGX_CR_CHANGESET_NUMBER (0x0040U)
+#define RGX_CR_CHANGESET_NUMBER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT (0U)
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK (IMG_UINT64_C(0000000000000000))
+
+
+/*
+ Register RGX_CR_CLK_XTPLUS_CTRL
+*/
+#define RGX_CR_CLK_XTPLUS_CTRL (0x0080U)
+#define RGX_CR_CLK_XTPLUS_CTRL_MASKFULL (IMG_UINT64_C(0x0000003FFFFF0000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_SHIFT (36U)
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_ON (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_AUTO (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT (34U)
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK (IMG_UINT64_C(0XFFFFFFF3FFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_ON (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_SHIFT (32U)
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK (IMG_UINT64_C(0XFFFFFFFCFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_ON (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_AUTO (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT (30U)
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF3FFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT (28U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_ON (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT (26U)
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_ON (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT (24U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_ON (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_AUTO (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT (22U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF3FFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT (20U)
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT (18U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT (16U)
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO (IMG_UINT64_C(0x0000000000020000))
+
+
+/*
+ Register RGX_CR_CLK_XTPLUS_STATUS
+*/
+#define RGX_CR_CLK_XTPLUS_STATUS (0x0088U)
+#define RGX_CR_CLK_XTPLUS_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000007FF))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_SHIFT (10U)
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_RUNNING (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_SHIFT (9U)
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_RUNNING (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT (8U)
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT (7U)
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT (6U)
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT (5U)
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT (4U)
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT (3U)
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT (2U)
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT (1U)
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT (0U)
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_SOFT_RESET
+*/
+#define RGX_CR_SOFT_RESET (0x0100U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFE7FFFFFFFFFC1D))
+#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00E7FFFFFFFFFC1D))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_SHIFT (63U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_CLRMSK (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_EN (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_SHIFT (62U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_CLRMSK (IMG_UINT64_C(0XBFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_EN (IMG_UINT64_C(0X4000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_SHIFT (61U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_CLRMSK (IMG_UINT64_C(0XDFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_EN (IMG_UINT64_C(0X2000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_SHIFT (60U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_CLRMSK (IMG_UINT64_C(0XEFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_EN (IMG_UINT64_C(0X1000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_SHIFT (59U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_CLRMSK (IMG_UINT64_C(0XF7FFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_EN (IMG_UINT64_C(0X0800000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_SHIFT (58U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_CLRMSK (IMG_UINT64_C(0XFBFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_EN (IMG_UINT64_C(0X0400000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_SHIFT (57U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_CLRMSK (IMG_UINT64_C(0XFDFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_EN (IMG_UINT64_C(0X0200000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_SHIFT (56U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_CLRMSK (IMG_UINT64_C(0XFEFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_EN (IMG_UINT64_C(0X0100000000000000))
+#define RGX_CR_SOFT_RESET_DPX1_CORE_SHIFT (55U)
+#define RGX_CR_SOFT_RESET_DPX1_CORE_CLRMSK (IMG_UINT64_C(0XFF7FFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX1_CORE_EN (IMG_UINT64_C(0X0080000000000000))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_SHIFT (54U)
+#define RGX_CR_SOFT_RESET_DPX0_CORE_CLRMSK (IMG_UINT64_C(0XFFBFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_EN (IMG_UINT64_C(0X0040000000000000))
+#define RGX_CR_SOFT_RESET_FBA_SHIFT (53U)
+#define RGX_CR_SOFT_RESET_FBA_CLRMSK (IMG_UINT64_C(0XFFDFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FBA_EN (IMG_UINT64_C(0X0020000000000000))
+#define RGX_CR_SOFT_RESET_SH_SHIFT (50U)
+#define RGX_CR_SOFT_RESET_SH_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_SH_EN (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_SOFT_RESET_VRDM_SHIFT (49U)
+#define RGX_CR_SOFT_RESET_VRDM_CLRMSK (IMG_UINT64_C(0XFFFDFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VRDM_EN (IMG_UINT64_C(0X0002000000000000))
+#define RGX_CR_SOFT_RESET_MCU_FBTC_SHIFT (48U)
+#define RGX_CR_SOFT_RESET_MCU_FBTC_CLRMSK (IMG_UINT64_C(0XFFFEFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MCU_FBTC_EN (IMG_UINT64_C(0X0001000000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT (47U)
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK (IMG_UINT64_C(0XFFFF7FFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_EN (IMG_UINT64_C(0X0000800000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT (46U)
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK (IMG_UINT64_C(0XFFFFBFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_EN (IMG_UINT64_C(0X0000400000000000))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_SHIFT (45U)
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK (IMG_UINT64_C(0XFFFFDFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_EN (IMG_UINT64_C(0X0000200000000000))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_SHIFT (44U)
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK (IMG_UINT64_C(0XFFFFEFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_EN (IMG_UINT64_C(0X0000100000000000))
+#define RGX_CR_SOFT_RESET_IPP_SHIFT (43U)
+#define RGX_CR_SOFT_RESET_IPP_CLRMSK (IMG_UINT64_C(0XFFFFF7FFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_IPP_EN (IMG_UINT64_C(0X0000080000000000))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (42U)
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0XFFFFFBFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0X0000040000000000))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_SHIFT (41U)
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFDFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_EN (IMG_UINT64_C(0X0000020000000000))
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_SHIFT (40U)
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_EN (IMG_UINT64_C(0X0000010000000000))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_SHIFT (39U)
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFF7FFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_EN (IMG_UINT64_C(0X0000008000000000))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_SHIFT (38U)
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFBFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_EN (IMG_UINT64_C(0X0000004000000000))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_SHIFT (37U)
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFDFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_EN (IMG_UINT64_C(0X0000002000000000))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_SHIFT (36U)
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_EN (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_SHIFT (35U)
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFF7FFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_EN (IMG_UINT64_C(0X0000000800000000))
+#define RGX_CR_SOFT_RESET_MMU_SHIFT (34U)
+#define RGX_CR_SOFT_RESET_MMU_CLRMSK (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MMU_EN (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_SOFT_RESET_BIF1_SHIFT (33U)
+#define RGX_CR_SOFT_RESET_BIF1_CLRMSK (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF1_EN (IMG_UINT64_C(0X0000000200000000))
+#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (32U)
+#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_SHIFT (31U)
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_EN (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_SHIFT (30U)
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_EN (IMG_UINT64_C(0X0000000040000000))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_SHIFT (29U)
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_EN (IMG_UINT64_C(0X0000000020000000))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_SHIFT (28U)
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_EN (IMG_UINT64_C(0X0000000010000000))
+#define RGX_CR_SOFT_RESET_SLC_SHIFT (27U)
+#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0X0000000008000000))
+#define RGX_CR_SOFT_RESET_TLA_SHIFT (26U)
+#define RGX_CR_SOFT_RESET_TLA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_SOFT_RESET_TLA_EN (IMG_UINT64_C(0X0000000004000000))
+#define RGX_CR_SOFT_RESET_UVS_SHIFT (25U)
+#define RGX_CR_SOFT_RESET_UVS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define RGX_CR_SOFT_RESET_UVS_EN (IMG_UINT64_C(0X0000000002000000))
+#define RGX_CR_SOFT_RESET_TE_SHIFT (24U)
+#define RGX_CR_SOFT_RESET_TE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_SOFT_RESET_TE_EN (IMG_UINT64_C(0X0000000001000000))
+#define RGX_CR_SOFT_RESET_GPP_SHIFT (23U)
+#define RGX_CR_SOFT_RESET_GPP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_SOFT_RESET_GPP_EN (IMG_UINT64_C(0X0000000000800000))
+#define RGX_CR_SOFT_RESET_FBDC_SHIFT (22U)
+#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0X0000000000400000))
+#define RGX_CR_SOFT_RESET_FBC_SHIFT (21U)
+#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_SOFT_RESET_PM_SHIFT (20U)
+#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0X0000000000100000))
+#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U)
+#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0X0000000000080000))
+#define RGX_CR_SOFT_RESET_USC_SHARED_SHIFT (18U)
+#define RGX_CR_SOFT_RESET_USC_SHARED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_SOFT_RESET_USC_SHARED_EN (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U)
+#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0X0000000000020000))
+#define RGX_CR_SOFT_RESET_BIF_SHIFT (16U)
+#define RGX_CR_SOFT_RESET_BIF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_CR_SOFT_RESET_BIF_EN (IMG_UINT64_C(0X0000000000010000))
+#define RGX_CR_SOFT_RESET_CDM_SHIFT (15U)
+#define RGX_CR_SOFT_RESET_CDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_SOFT_RESET_CDM_EN (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_SOFT_RESET_VDM_SHIFT (14U)
+#define RGX_CR_SOFT_RESET_VDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_SOFT_RESET_VDM_EN (IMG_UINT64_C(0X0000000000004000))
+#define RGX_CR_SOFT_RESET_TESS_SHIFT (13U)
+#define RGX_CR_SOFT_RESET_TESS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_SOFT_RESET_TESS_EN (IMG_UINT64_C(0X0000000000002000))
+#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U)
+#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0X0000000000001000))
+#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U)
+#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_SOFT_RESET_TSP_SHIFT (10U)
+#define RGX_CR_SOFT_RESET_TSP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_SOFT_RESET_TSP_EN (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT (4U)
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_EN (IMG_UINT64_C(0X0000000000000010))
+#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U)
+#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U)
+#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_SOFT_RESET_USC_SHIFT (0U)
+#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_SOFT_RESET2
+*/
+#define RGX_CR_SOFT_RESET2 (0x0108U)
+#define RGX_CR_SOFT_RESET2_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF))
+#define RGX_CR_SOFT_RESET2_SPFILTER_SHIFT (12U)
+#define RGX_CR_SOFT_RESET2_SPFILTER_CLRMSK (0XFFE00FFFU)
+#define RGX_CR_SOFT_RESET2_TDM_SHIFT (11U)
+#define RGX_CR_SOFT_RESET2_TDM_CLRMSK (0XFFFFF7FFU)
+#define RGX_CR_SOFT_RESET2_TDM_EN (0X00000800U)
+#define RGX_CR_SOFT_RESET2_ASTC_SHIFT (10U)
+#define RGX_CR_SOFT_RESET2_ASTC_CLRMSK (0XFFFFFBFFU)
+#define RGX_CR_SOFT_RESET2_ASTC_EN (0X00000400U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_SHIFT (9U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_EN (0X00000200U)
+#define RGX_CR_SOFT_RESET2_USCPS_SHIFT (8U)
+#define RGX_CR_SOFT_RESET2_USCPS_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_SOFT_RESET2_USCPS_EN (0X00000100U)
+#define RGX_CR_SOFT_RESET2_IPF_SHIFT (7U)
+#define RGX_CR_SOFT_RESET2_IPF_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_SOFT_RESET2_IPF_EN (0X00000080U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_SHIFT (6U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_EN (0X00000040U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_SHIFT (5U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_EN (0X00000020U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_SHIFT (4U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_EN (0X00000010U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT (3U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_EN (0X00000008U)
+#define RGX_CR_SOFT_RESET2_PIXEL_SHIFT (2U)
+#define RGX_CR_SOFT_RESET2_PIXEL_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SOFT_RESET2_PIXEL_EN (0X00000004U)
+#define RGX_CR_SOFT_RESET2_CDM_SHIFT (1U)
+#define RGX_CR_SOFT_RESET2_CDM_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SOFT_RESET2_CDM_EN (0X00000002U)
+#define RGX_CR_SOFT_RESET2_VERTEX_SHIFT (0U)
+#define RGX_CR_SOFT_RESET2_VERTEX_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SOFT_RESET2_VERTEX_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_EVENT_STATUS
+*/
+#define RGX_CR_EVENT_STATUS (0x0130U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0X7FFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0X80000000U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0X40000000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0X20000000U)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT (28U)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN (0X10000000U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT (27U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK (0XF7FFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN (0X08000000U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT (26U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK (0XFBFFFFFFU)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN (0X04000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT (25U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK (0XFDFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN (0X02000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT (24U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK (0XFEFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN (0X01000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT (23U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK (0XFF7FFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN (0X00800000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT (22U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN (0X00400000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT (21U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN (0X00200000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT (20U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN (0X00100000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT (19U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK (0XFFF7FFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN (0X00080000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN (0X00040000U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_EN (0X00040000U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT (17U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN (0X00020000U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_SHIFT (17U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_EN (0X00020000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0XFFFEFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0X00010000U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0XFFFF7FFFU)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0X00008000U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT (14U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK (0XFFFFBFFFU)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_EN (0X00004000U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0XFFFFDFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0X00002000U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0XFFFFEFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0X00001000U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0XFFFFF7FFU)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0X00000800U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0XFFFFFBFFU)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0X00000400U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0X00000200U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT (8U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN (0X00000100U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0X00000080U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0X00000040U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0X00000020U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0X00000010U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT (3U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN (0X00000008U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0X00000004U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0X00000002U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT (0U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TIMER
+*/
+#define RGX_CR_TIMER (0x0160U)
+#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_SHIFT (63U)
+#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_TIMER_VALUE_SHIFT (0U)
+#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0XFFFF000000000000))
+
+
+/*
+ Register RGX_CR_TLA_STATUS
+*/
+#define RGX_CR_TLA_STATUS (0x0178U)
+#define RGX_CR_TLA_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_SHIFT (39U)
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_CLRMSK (IMG_UINT64_C(0X0000007FFFFFFFFF))
+#define RGX_CR_TLA_STATUS_REQUEST_SHIFT (7U)
+#define RGX_CR_TLA_STATUS_REQUEST_CLRMSK (IMG_UINT64_C(0XFFFFFF800000007F))
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT (1U)
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF81))
+#define RGX_CR_TLA_STATUS_BUSY_SHIFT (0U)
+#define RGX_CR_TLA_STATUS_BUSY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_TLA_STATUS_BUSY_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_PM_PARTIAL_RENDER_ENABLE
+*/
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SIDEKICK_IDLE
+*/
+#define RGX_CR_SIDEKICK_IDLE (0x03C8U)
+#define RGX_CR_SIDEKICK_IDLE_MASKFULL (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_SHIFT (6U)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_EN (0X00000040U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_SHIFT (5U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_SIDEKICK_IDLE_MMU_EN (0X00000020U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_SHIFT (4U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_EN (0X00000010U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_SHIFT (3U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_EN (0X00000008U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_SHIFT (2U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_EN (0X00000004U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_SHIFT (1U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_EN (0X00000002U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_SHIFT (0U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS (0x0430U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000F3))
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT (4U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK (0XFFFFFF0FU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0X00000002U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_STORE_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0 (0x0438U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_STORE_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1 (0x0440U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_STORE_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2 (0x0448U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_RESUME_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0 (0x0450U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_RESUME_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1 (0x0458U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_RESUME_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2 (0x0460U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS (0x04A0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0X00000002U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U)
+#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0X40000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (27U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (0XF7FFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (0X08000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0XF03FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (0XF81FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0X00200000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT (20U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN (0X00100000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0XFFE00FFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT (11U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK (0XFFF007FFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (0XFFFFF87FU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0XFFFFF07FU)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (0XFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT (0U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_CDM_TERMINATE_PDS
+*/
+#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U)
+#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000000F))
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_CDM_TERMINATE_PDS1
+*/
+#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0X40000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (0X10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN (0X10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT (27U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK (0XF7FFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN (0X08000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0XF03FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (0XF81FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0X00200000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT (20U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN (0X00100000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0XFFE00FFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT (11U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK (0XFFF007FFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (0XFFFFF87FU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0XFFFFF07FU)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (1U)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (0XFFFFFF81U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT (0U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_LOAD_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_LOAD_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0X40000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (28U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT (27U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK (0XF7FFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN (0X08000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0XF03FFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (0XF81FFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0X00200000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT (20U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN (0X00100000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0XFFE00FFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT (11U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK (0XFFF007FFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (0XFFFFF87FU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0XFFFFF07FU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (0XFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT (0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_CONFIG
+*/
+#define RGX_CR_MIPS_WRAPPER_CONFIG (0x0810U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x000000010F01FFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT (32U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT (25U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF1FFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT (24U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN (IMG_UINT64_C(0X0000000001000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT (16U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32 (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1 (0x0818U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2 (0x0820U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL (IMG_UINT64_C(0x00000000FFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT (6U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT (5U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1 (0x0828U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2 (0x0830U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL (IMG_UINT64_C(0x00000000FFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT (6U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT (5U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1 (0x0838U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2 (0x0840U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL (IMG_UINT64_C(0x00000000FFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT (6U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT (5U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1 (0x0848U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2 (0x0850U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL (IMG_UINT64_C(0x00000000FFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT (6U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT (5U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1 (0x0858U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2 (0x0860U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL (IMG_UINT64_C(0x00000000FFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT (6U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT (5U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_IRQ_ENABLE
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE (0x08A0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_IRQ_STATUS
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS (0x08A8U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_IRQ_CLEAR
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR (0x08B0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_NMI_ENABLE
+*/
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE (0x08B8U)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_NMI_EVENT
+*/
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT (0x08C0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_DEBUG_CONFIG
+*/
+#define RGX_CR_MIPS_DEBUG_CONFIG (0x08C8U)
+#define RGX_CR_MIPS_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT (0U)
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_EXCEPTION_STATUS
+*/
+#define RGX_CR_MIPS_EXCEPTION_STATUS (0x08D0U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT (5U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN (0X00000020U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT (4U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN (0X00000010U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT (3U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN (0X00000008U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT (2U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN (0X00000004U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT (1U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN (0X00000002U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT (0U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAX
+*/
+#define RGX_CR_META_SP_MSLVDATAX (0x0A00U)
+#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAT
+*/
+#define RGX_CR_META_SP_MSLVDATAT (0x0A08U)
+#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL0
+*/
+#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U)
+#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0X00000003U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0X00000002U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL1
+*/
+#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U)
+#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F))
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0X3FFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0X20000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0X10000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0XFBFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0X04000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0XFDFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0X02000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0XFEFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0X01000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0XFF1FFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0X00100000U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0X00040000U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0XFFFFFFCFU)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0XFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVHANDSHKE
+*/
+#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0XFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICK
+*/
+#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U)
+#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICKI
+*/
+#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICK
+*/
+#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U)
+#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICKI
+*/
+#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICK
+*/
+#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICKI
+*/
+#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICK
+*/
+#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICKI
+*/
+#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVRST
+*/
+#define RGX_CR_META_SP_MSLVRST (0x0AC0U)
+#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQSTATUS
+*/
+#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0X00000008U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0X00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQENABLE
+*/
+#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0X00000008U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0X00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQLEVEL
+*/
+#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE
+*/
+#define RGX_CR_MTS_SCHEDULE (0x0B00U)
+#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE1
+*/
+#define RGX_CR_MTS_SCHEDULE1 (0x10B00U)
+#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE2
+*/
+#define RGX_CR_MTS_SCHEDULE2 (0x20B00U)
+#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE3
+*/
+#define RGX_CR_MTS_SCHEDULE3 (0x30B00U)
+#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE4
+*/
+#define RGX_CR_MTS_SCHEDULE4 (0x40B00U)
+#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE5
+*/
+#define RGX_CR_MTS_SCHEDULE5 (0x50B00U)
+#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE6
+*/
+#define RGX_CR_MTS_SCHEDULE6 (0x60B00U)
+#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE7
+*/
+#define RGX_CR_MTS_SCHEDULE7 (0x70B00U)
+#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG
+*/
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL (IMG_UINT64_C(0x000FF0FFFFFFF701))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFF001))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0XFFFF0FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0XFFF00FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT (40U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PERSISTENCE_SHIFT (9U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PERSISTENCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF9FF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_SHIFT (8U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_EN (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_MTS_INTCTX
+*/
+#define RGX_CR_MTS_INTCTX (0x0B98U)
+#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U)
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0XC03FFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_PTR_SHIFT (18U)
+#define RGX_CR_MTS_INTCTX_DM_PTR_CLRMSK (0XFFC3FFFFU)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT (16U)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK (0XFFFCFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX
+*/
+#define RGX_CR_MTS_BGCTX (0x0BA0U)
+#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x0000000000003FFF))
+#define RGX_CR_MTS_BGCTX_DM_PTR_SHIFT (10U)
+#define RGX_CR_MTS_BGCTX_DM_PTR_CLRMSK (0XFFFFC3FFU)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT (8U)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK (0XFFFFFCFFU)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE
+*/
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0X00FFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0XFFFF00FFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0XFFFFFF00FFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00FFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_MTS_GPU_INT_STATUS
+*/
+#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_META_BOOT
+*/
+#define RGX_CR_META_BOOT (0x0BF8U)
+#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_BOOT_MODE_SHIFT (0U)
+#define RGX_CR_META_BOOT_MODE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_META_BOOT_MODE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_GARTEN_SLC
+*/
+#define RGX_CR_GARTEN_SLC (0x0BB8U)
+#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_PPP
+*/
+#define RGX_CR_PPP (0x0CD0U)
+#define RGX_CR_PPP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_CHECKSUM_SHIFT (0U)
+#define RGX_CR_PPP_CHECKSUM_CLRMSK (00000000U)
+
+
+#define RGX_CR_ISP_RENDER_DIR_TYPE_MASK (0x00000003U)
+/*
+ Top-left to bottom-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TL2BR (0x00000000U)
+/*
+ Top-right to bottom-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TR2BL (0x00000001U)
+/*
+ Bottom-left to top-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BL2TR (0x00000002U)
+/*
+ Bottom-right to top-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BR2TL (0x00000003U)
+
+
+#define RGX_CR_ISP_RENDER_MODE_TYPE_MASK (0x00000003U)
+/*
+ Normal render */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_NORM (0x00000000U)
+/*
+ Fast 2D render */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_2D (0x00000002U)
+/*
+ Fast scale render */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE (0x00000003U)
+
+
+/*
+ Register RGX_CR_ISP_RENDER
+*/
+#define RGX_CR_ISP_RENDER (0x0F08U)
+#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U)
+#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_ISP_RENDER_RESUME_EN (0X00000010U)
+#define RGX_CR_ISP_RENDER_DIR_SHIFT (2U)
+#define RGX_CR_ISP_RENDER_DIR_CLRMSK (0XFFFFFFF3U)
+#define RGX_CR_ISP_RENDER_DIR_TL2BR (00000000U)
+#define RGX_CR_ISP_RENDER_DIR_TR2BL (0X00000004U)
+#define RGX_CR_ISP_RENDER_DIR_BL2TR (0X00000008U)
+#define RGX_CR_ISP_RENDER_DIR_BR2TL (0X0000000CU)
+#define RGX_CR_ISP_RENDER_MODE_SHIFT (0U)
+#define RGX_CR_ISP_RENDER_MODE_CLRMSK (0XFFFFFFFCU)
+#define RGX_CR_ISP_RENDER_MODE_NORM (00000000U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_2D (0X00000002U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE (0X00000003U)
+
+
+/*
+ Register RGX_CR_ISP_CTL
+*/
+#define RGX_CR_ISP_CTL (0x0F38U)
+#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x0000000007FFF3FF))
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_SHIFT (26U)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK (0XFBFFFFFFU)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_EN (0X04000000U)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_SHIFT (25U)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_CLRMSK (0XFDFFFFFFU)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_EN (0X02000000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT (23U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK (0XFE7FFFFFU)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9 (00000000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10 (0X00800000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL (0X01000000U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT (21U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK (0XFF9FFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (20U)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0X00100000U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0XFFF7FFFFU)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0X00080000U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT (18U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN (0X00040000U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT (17U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN (0X00020000U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_SHIFT (16U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_CLRMSK (0XFFFEFFFFU)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_EN (0X00010000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_SHIFT (12U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE (00000000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO (0X00001000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE (0X00002000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR (0X00003000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE (0X00004000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX (0X00005000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN (0X00006000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT (0X00007000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE (0X00008000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN (0X00009000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN (0X0000A000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE (0X0000B000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN (0X0000C000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN (0X0000D000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN (0X0000E000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN (0X0000F000U)
+#define RGX_CR_ISP_CTL_VALID_ID_SHIFT (4U)
+#define RGX_CR_ISP_CTL_VALID_ID_CLRMSK (0XFFFFFC0FU)
+#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U)
+#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_ISP_STATUS
+*/
+#define RGX_CR_ISP_STATUS (0x1038U)
+#define RGX_CR_ISP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_SHIFT (2U)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_EN (0X00000004U)
+#define RGX_CR_ISP_STATUS_ACTIVE_SHIFT (1U)
+#define RGX_CR_ISP_STATUS_ACTIVE_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_ISP_STATUS_ACTIVE_EN (0X00000002U)
+#define RGX_CR_ISP_STATUS_EOR_SHIFT (0U)
+#define RGX_CR_ISP_STATUS_EOR_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_ISP_STATUS_EOR_EN (0X00000001U)
+
+
+/*
+ Register group: RGX_CR_ISP_XTP_RESUME, with 64 repeats
+*/
+#define RGX_CR_ISP_XTP_RESUME_REPEATCOUNT (64)
+/*
+ Register RGX_CR_ISP_XTP_RESUME0
+*/
+#define RGX_CR_ISP_XTP_RESUME0 (0x3A00U)
+#define RGX_CR_ISP_XTP_RESUME0_MASKFULL (IMG_UINT64_C(0x00000000003FF3FF))
+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_SHIFT (12U)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK (0XFFC00FFFU)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT (0U)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK (0XFFFFFC00U)
+
+
+/*
+ Register group: RGX_CR_ISP_XTP_STORE, with 32 repeats
+*/
+#define RGX_CR_ISP_XTP_STORE_REPEATCOUNT (32)
+/*
+ Register RGX_CR_ISP_XTP_STORE0
+*/
+#define RGX_CR_ISP_XTP_STORE0 (0x3C00U)
+#define RGX_CR_ISP_XTP_STORE0_MASKFULL (IMG_UINT64_C(0x000000007F3FF3FF))
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_SHIFT (30U)
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_EN (0X40000000U)
+#define RGX_CR_ISP_XTP_STORE0_EOR_SHIFT (29U)
+#define RGX_CR_ISP_XTP_STORE0_EOR_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_EOR_EN (0X20000000U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT (28U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_EN (0X10000000U)
+#define RGX_CR_ISP_XTP_STORE0_MT_SHIFT (24U)
+#define RGX_CR_ISP_XTP_STORE0_MT_CLRMSK (0XF0FFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_X_SHIFT (12U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_X_CLRMSK (0XFFC00FFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_SHIFT (0U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK (0XFFFFFC00U)
+
+
+/*
+ Register group: RGX_CR_BIF_CAT_BASE, with 8 repeats
+*/
+#define RGX_CR_BIF_CAT_BASE_REPEATCOUNT (8)
+/*
+ Register RGX_CR_BIF_CAT_BASE0
+*/
+#define RGX_CR_BIF_CAT_BASE0 (0x1200U)
+#define RGX_CR_BIF_CAT_BASE0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE1
+*/
+#define RGX_CR_BIF_CAT_BASE1 (0x1208U)
+#define RGX_CR_BIF_CAT_BASE1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE2
+*/
+#define RGX_CR_BIF_CAT_BASE2 (0x1210U)
+#define RGX_CR_BIF_CAT_BASE2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE3
+*/
+#define RGX_CR_BIF_CAT_BASE3 (0x1218U)
+#define RGX_CR_BIF_CAT_BASE3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE4
+*/
+#define RGX_CR_BIF_CAT_BASE4 (0x1220U)
+#define RGX_CR_BIF_CAT_BASE4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE5
+*/
+#define RGX_CR_BIF_CAT_BASE5 (0x1228U)
+#define RGX_CR_BIF_CAT_BASE5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE6
+*/
+#define RGX_CR_BIF_CAT_BASE6 (0x1230U)
+#define RGX_CR_BIF_CAT_BASE6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE7
+*/
+#define RGX_CR_BIF_CAT_BASE7 (0x1238U)
+#define RGX_CR_BIF_CAT_BASE7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE_INDEX
+*/
+#define RGX_CR_BIF_CAT_BASE_INDEX (0x1240U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_MASKFULL (IMG_UINT64_C(0x0007070707070707))
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT (48U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT (40U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT (32U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT (24U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT (16U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT (8U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_SHIFT (0U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_VCE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0 (0x1248U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_TE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE0 (0x1250U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_ALIST0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0 (0x1260U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_VCE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1 (0x1268U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_TE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE1 (0x1270U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_ALIST1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1 (0x1280U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_MMU_ENTRY_STATUS
+*/
+#define RGX_CR_BIF_MMU_ENTRY_STATUS (0x1288U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF0F3))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT (12U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT (4U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT (0U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_BIF_MMU_ENTRY
+*/
+#define RGX_CR_BIF_MMU_ENTRY (0x1290U)
+#define RGX_CR_BIF_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_SHIFT (1U)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_EN (0X00000002U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_SHIFT (0U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_CTRL_INVAL
+*/
+#define RGX_CR_BIF_CTRL_INVAL (0x12A0U)
+#define RGX_CR_BIF_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_SHIFT (3U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_EN (0X00000008U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_SHIFT (2U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_INVAL_PC_EN (0X00000004U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_SHIFT (1U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_INVAL_PD_EN (0X00000002U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_SHIFT (0U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_INVAL_PT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_CTRL
+*/
+#define RGX_CR_BIF_CTRL (0x12A8U)
+#define RGX_CR_BIF_CTRL_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT (7U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN (0X00000080U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT (6U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN (0X00000040U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT (5U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN (0X00000020U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT (4U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN (0X00000010U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_SHIFT (3U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_EN (0X00000008U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT (2U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_EN (0X00000004U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT (1U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN (0X00000002U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT (0U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS (0x12B0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0X00000010U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS (0x12B8U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_BIF_FAULT_BANK1_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS (0x12C0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN (0X00000010U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_FAULT_BANK1_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS (0x12C8U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT (50U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_BIF_MMU_STATUS
+*/
+#define RGX_CR_BIF_MMU_STATUS (0x12D0U)
+#define RGX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7))
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT (28U)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_EN (0X10000000U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0XF00FFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0XFFF00FFFU)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0XFFFFF00FU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_EN (0X00000004U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_EN (0X00000002U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_READS_EXT_STATUS
+*/
+#define RGX_CR_BIF_READS_EXT_STATUS (0x1320U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_SHIFT (16U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK (0XFF80FFFFU)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT (0U)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIF_READS_INT_STATUS
+*/
+#define RGX_CR_BIF_READS_INT_STATUS (0x1328U)
+#define RGX_CR_BIF_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_SHIFT (16U)
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_CLRMSK (0XFF80FFFFU)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_SHIFT (0U)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIFPM_READS_INT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_INT_STATUS (0x1330U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT (0U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIFPM_READS_EXT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_EXT_STATUS (0x1338U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT (0U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIFPM_STATUS_MMU
+*/
+#define RGX_CR_BIFPM_STATUS_MMU (0x1350U)
+#define RGX_CR_BIFPM_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT (0U)
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_BIF_STATUS_MMU
+*/
+#define RGX_CR_BIF_STATUS_MMU (0x1358U)
+#define RGX_CR_BIF_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_SHIFT (0U)
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_BIF_FAULT_READ
+*/
+#define RGX_CR_BIF_FAULT_READ (0x13E0U)
+#define RGX_CR_BIF_FAULT_READ_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_SHIFT (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS (0x1430U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0X00000010U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS (0x1438U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_MCU_FENCE
+*/
+#define RGX_CR_MCU_FENCE (0x1740U)
+#define RGX_CR_MCU_FENCE_MASKFULL (IMG_UINT64_C(0x000007FFFFFFFFE0))
+#define RGX_CR_MCU_FENCE_DM_SHIFT (40U)
+#define RGX_CR_MCU_FENCE_DM_CLRMSK (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define RGX_CR_MCU_FENCE_DM_VERTEX (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_MCU_FENCE_DM_PIXEL (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_MCU_FENCE_DM_COMPUTE (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_MCU_FENCE_DM_RAY_VERTEX (IMG_UINT64_C(0x0000030000000000))
+#define RGX_CR_MCU_FENCE_DM_RAY (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_MCU_FENCE_ADDR_SHIFT (5U)
+#define RGX_CR_MCU_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF000000001F))
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSHIFT (5U)
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE (32U)
+
+
+/*
+ Register RGX_CR_SPFILTER_SIGNAL_DESCR
+*/
+#define RGX_CR_SPFILTER_SIGNAL_DESCR (0x2700U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT (0U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK (0XFFFF0000U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_SPFILTER_SIGNAL_DESCR_MIN
+*/
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN (0x2708U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_SLC_CTRL_MISC
+*/
+#define RGX_CR_SLC_CTRL_MISC (0x3800U)
+#define RGX_CR_SLC_CTRL_MISC_MASKFULL (IMG_UINT64_C(0xFFFFFFFF00FF0107))
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT (32U)
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (16U)
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 (IMG_UINT64_C(0x0000000000110000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1 (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE (IMG_UINT64_C(0x0000000000210000))
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_SHIFT (8U)
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_EN (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT (2U)
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT (1U)
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT (0U)
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_SLC_CTRL_FLUSH_INVAL
+*/
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL (0x3818U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL (IMG_UINT64_C(0x00000000800007FF))
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT (31U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK (0X7FFFFFFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN (0X80000000U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT (10U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK (0XFFFFFBFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN (0X00000400U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT (9U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN (0X00000200U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT (8U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN (0X00000100U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT (7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN (0X00000080U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT (6U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN (0X00000040U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT (5U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN (0X00000020U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT (4U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN (0X00000010U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT (3U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN (0X00000008U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT (2U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN (0X00000004U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT (1U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN (0X00000002U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT (0U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SLC_STATUS0
+*/
+#define RGX_CR_SLC_STATUS0 (0x3820U)
+#define RGX_CR_SLC_STATUS0_MASKFULL (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT (2U)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN (0X00000004U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_SHIFT (1U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_EN (0X00000002U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT (0U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SLC_CTRL_BYPASS
+*/
+#define RGX_CR_SLC_CTRL_BYPASS (0x3828U)
+#define RGX_CR_SLC_CTRL_BYPASS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT (27U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK (0XF7FFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN (0X08000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT (26U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK (0XFBFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_EN (0X04000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT (25U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK (0XFDFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN (0X02000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT (24U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK (0XFEFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN (0X01000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT (23U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK (0XFF7FFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN (0X00800000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT (22U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_EN (0X00400000U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT (21U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN (0X00200000U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT (20U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN (0X00100000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT (19U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK (0XFFF7FFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_EN (0X00080000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT (18U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_EN (0X00040000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT (17U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_EN (0X00020000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT (16U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK (0XFFFEFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN (0X00010000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT (15U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK (0XFFFF7FFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN (0X00008000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT (14U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK (0XFFFFBFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_EN (0X00004000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT (13U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK (0XFFFFDFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_EN (0X00002000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT (12U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK (0XFFFFEFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_EN (0X00001000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT (11U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK (0XFFFFF7FFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN (0X00000800U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT (10U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK (0XFFFFFBFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN (0X00000400U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT (9U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN (0X00000200U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT (8U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_EN (0X00000100U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT (7U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_EN (0X00000080U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT (6U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_EN (0X00000040U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT (5U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN (0X00000020U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT (4U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_EN (0X00000010U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT (3U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN (0X00000008U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT (2U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN (0X00000004U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT (1U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_EN (0X00000002U)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_SHIFT (0U)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SLC_STATUS1
+*/
+#define RGX_CR_SLC_STATUS1 (0x3870U)
+#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0x800003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_SHIFT (63U)
+#define RGX_CR_SLC_STATUS1_PAUSED_CLRMSK (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_EN (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_SLC_STATUS1_READS1_SHIFT (32U)
+#define RGX_CR_SLC_STATUS1_READS1_CLRMSK (IMG_UINT64_C(0XFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS1_READS0_SHIFT (16U)
+#define RGX_CR_SLC_STATUS1_READS0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS1_READS1_EXT_SHIFT (8U)
+#define RGX_CR_SLC_STATUS1_READS1_EXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS1_READS0_EXT_SHIFT (0U)
+#define RGX_CR_SLC_STATUS1_READS0_EXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_SLC_IDLE
+*/
+#define RGX_CR_SLC_IDLE (0x3898U)
+#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_SLC_IDLE_IMGBV4_SHIFT (7U)
+#define RGX_CR_SLC_IDLE_IMGBV4_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_SLC_IDLE_IMGBV4_EN (0X00000080U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (6U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_EN (0X00000040U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_SHIFT (5U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_SLC_IDLE_RBOFIFO_EN (0X00000020U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_SHIFT (4U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SLC_IDLE_FRC_CONV_EN (0X00000010U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_SHIFT (3U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_EN (0X00000008U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_SHIFT (2U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SLC_IDLE_VXD_CONV_EN (0X00000004U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_SHIFT (1U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_EN (0X00000002U)
+#define RGX_CR_SLC_IDLE_CBAR_SHIFT (0U)
+#define RGX_CR_SLC_IDLE_CBAR_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC_IDLE_CBAR_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SLC_STATUS2
+*/
+#define RGX_CR_SLC_STATUS2 (0x3908U)
+#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0x000003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS2_READS3_SHIFT (32U)
+#define RGX_CR_SLC_STATUS2_READS3_CLRMSK (IMG_UINT64_C(0XFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS2_READS2_SHIFT (16U)
+#define RGX_CR_SLC_STATUS2_READS2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS2_READS3_EXT_SHIFT (8U)
+#define RGX_CR_SLC_STATUS2_READS3_EXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS2_READS2_EXT_SHIFT (0U)
+#define RGX_CR_SLC_STATUS2_READS2_EXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_SLC_CTRL_MISC2
+*/
+#define RGX_CR_SLC_CTRL_MISC2 (0x3930U)
+#define RGX_CR_SLC_CTRL_MISC2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT (0U)
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SLC_CROSSBAR_LOAD_BALANCE
+*/
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE (0x3938U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT (0U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_USC_UVS0_CHECKSUM
+*/
+#define RGX_CR_USC_UVS0_CHECKSUM (0x5000U)
+#define RGX_CR_USC_UVS0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVS1_CHECKSUM
+*/
+#define RGX_CR_USC_UVS1_CHECKSUM (0x5008U)
+#define RGX_CR_USC_UVS1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVS2_CHECKSUM
+*/
+#define RGX_CR_USC_UVS2_CHECKSUM (0x5010U)
+#define RGX_CR_USC_UVS2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVS3_CHECKSUM
+*/
+#define RGX_CR_USC_UVS3_CHECKSUM (0x5018U)
+#define RGX_CR_USC_UVS3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PPP_SIGNATURE
+*/
+#define RGX_CR_PPP_SIGNATURE (0x5020U)
+#define RGX_CR_PPP_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_SIGNATURE_VALUE_SHIFT (0U)
+#define RGX_CR_PPP_SIGNATURE_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TE_SIGNATURE
+*/
+#define RGX_CR_TE_SIGNATURE (0x5028U)
+#define RGX_CR_TE_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_SIGNATURE_VALUE_SHIFT (0U)
+#define RGX_CR_TE_SIGNATURE_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TE_CHECKSUM
+*/
+#define RGX_CR_TE_CHECKSUM (0x5110U)
+#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVB_CHECKSUM
+*/
+#define RGX_CR_USC_UVB_CHECKSUM (0x5118U)
+#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_VCE_CHECKSUM
+*/
+#define RGX_CR_VCE_CHECKSUM (0x5030U)
+#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_ISP_PDS_CHECKSUM
+*/
+#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U)
+#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_ISP_TPF_CHECKSUM
+*/
+#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U)
+#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TFPU_PLANE0_CHECKSUM
+*/
+#define RGX_CR_TFPU_PLANE0_CHECKSUM (0x5048U)
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TFPU_PLANE1_CHECKSUM
+*/
+#define RGX_CR_TFPU_PLANE1_CHECKSUM (0x5050U)
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PBE_CHECKSUM
+*/
+#define RGX_CR_PBE_CHECKSUM (0x5058U)
+#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PDS_DOUTM_STM_SIGNATURE
+*/
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE (0x5060U)
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT (0U)
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_IFPU_ISP_CHECKSUM
+*/
+#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVS4_CHECKSUM
+*/
+#define RGX_CR_USC_UVS4_CHECKSUM (0x5100U)
+#define RGX_CR_USC_UVS4_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVS5_CHECKSUM
+*/
+#define RGX_CR_USC_UVS5_CHECKSUM (0x5108U)
+#define RGX_CR_USC_UVS5_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PPP_CLIP_CHECKSUM
+*/
+#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_TA_PHASE
+*/
+#define RGX_CR_PERF_TA_PHASE (0x6008U)
+#define RGX_CR_PERF_TA_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_PHASE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_TA_PHASE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_3D_PHASE
+*/
+#define RGX_CR_PERF_3D_PHASE (0x6010U)
+#define RGX_CR_PERF_3D_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_PHASE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_3D_PHASE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_COMPUTE_PHASE
+*/
+#define RGX_CR_PERF_COMPUTE_PHASE (0x6018U)
+#define RGX_CR_PERF_COMPUTE_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_TA_CYCLE
+*/
+#define RGX_CR_PERF_TA_CYCLE (0x6020U)
+#define RGX_CR_PERF_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_CYCLE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_TA_CYCLE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_3D_CYCLE
+*/
+#define RGX_CR_PERF_3D_CYCLE (0x6028U)
+#define RGX_CR_PERF_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_CYCLE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_3D_CYCLE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_COMPUTE_CYCLE
+*/
+#define RGX_CR_PERF_COMPUTE_CYCLE (0x6030U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_TA_OR_3D_CYCLE
+*/
+#define RGX_CR_PERF_TA_OR_3D_CYCLE (0x6038U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_INITIAL_TA_CYCLE
+*/
+#define RGX_CR_PERF_INITIAL_TA_CYCLE (0x6040U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC0_READ_STALL
+*/
+#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U)
+#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC0_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC1_READ_STALL
+*/
+#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC1_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC2_READ_STALL
+*/
+#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U)
+#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC2_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC3_READ_STALL
+*/
+#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U)
+#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC3_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_3D_SPINUP
+*/
+#define RGX_CR_PERF_3D_SPINUP (0x6220U)
+#define RGX_CR_PERF_3D_SPINUP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_SHIFT (0U)
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_AXI_ACE_LITE_CONFIGURATION
+*/
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION (0x38C0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL (IMG_UINT64_C(0x00001FFFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT (37U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0XFFFFE01FFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT (36U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT (35U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK (IMG_UINT64_C(0XFFFFFFF7FFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN (IMG_UINT64_C(0X0000000800000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT (34U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT (30U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC3FFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT (26U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC3FFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT (22U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFC3FFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT (20U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT (18U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT (16U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT (14U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT (12U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT (10U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT (8U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT (4U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT (0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+ Register RGX_CR_POWER_ESTIMATE_RESULT
+*/
+#define RGX_CR_POWER_ESTIMATE_RESULT (0x6328U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT (0U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TA_PERF
+*/
+#define RGX_CR_TA_PERF (0x7600U)
+#define RGX_CR_TA_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TA_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_TA_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TA_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_TA_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_TA_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_TA_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_TA_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_TA_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TA_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_TA_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_TA_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_TA_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TA_PERF_SELECT0
+*/
+#define RGX_CR_TA_PERF_SELECT0 (0x7608U)
+#define RGX_CR_TA_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_TA_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TA_PERF_SELECT1
+*/
+#define RGX_CR_TA_PERF_SELECT1 (0x7610U)
+#define RGX_CR_TA_PERF_SELECT1_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_MODE_SHIFT (21U)
+#define RGX_CR_TA_PERF_SELECT1_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TA_PERF_SELECT2
+*/
+#define RGX_CR_TA_PERF_SELECT2 (0x7618U)
+#define RGX_CR_TA_PERF_SELECT2_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_MODE_SHIFT (21U)
+#define RGX_CR_TA_PERF_SELECT2_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TA_PERF_SELECT3
+*/
+#define RGX_CR_TA_PERF_SELECT3 (0x7620U)
+#define RGX_CR_TA_PERF_SELECT3_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_MODE_SHIFT (21U)
+#define RGX_CR_TA_PERF_SELECT3_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TA_PERF_SELECTED_BITS
+*/
+#define RGX_CR_TA_PERF_SELECTED_BITS (0x7648U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT (48U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT (32U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT (16U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT (0U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TA_PERF_COUNTER_0
+*/
+#define RGX_CR_TA_PERF_COUNTER_0 (0x7650U)
+#define RGX_CR_TA_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_TA_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TA_PERF_COUNTER_1
+*/
+#define RGX_CR_TA_PERF_COUNTER_1 (0x7658U)
+#define RGX_CR_TA_PERF_COUNTER_1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_1_REG_SHIFT (0U)
+#define RGX_CR_TA_PERF_COUNTER_1_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TA_PERF_COUNTER_2
+*/
+#define RGX_CR_TA_PERF_COUNTER_2 (0x7660U)
+#define RGX_CR_TA_PERF_COUNTER_2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_2_REG_SHIFT (0U)
+#define RGX_CR_TA_PERF_COUNTER_2_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TA_PERF_COUNTER_3
+*/
+#define RGX_CR_TA_PERF_COUNTER_3 (0x7668U)
+#define RGX_CR_TA_PERF_COUNTER_3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_3_REG_SHIFT (0U)
+#define RGX_CR_TA_PERF_COUNTER_3_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_RASTERISATION_PERF
+*/
+#define RGX_CR_RASTERISATION_PERF (0x7700U)
+#define RGX_CR_RASTERISATION_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_RASTERISATION_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_RASTERISATION_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_RASTERISATION_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_RASTERISATION_PERF_SELECT0
+*/
+#define RGX_CR_RASTERISATION_PERF_SELECT0 (0x7708U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_RASTERISATION_PERF_COUNTER_0
+*/
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0 (0x7750U)
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_HUB_BIFPMCACHE_PERF
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF (0x7800U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0 (0x7808U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 (0x7850U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TPU_MCU_L0_PERF
+*/
+#define RGX_CR_TPU_MCU_L0_PERF (0x7900U)
+#define RGX_CR_TPU_MCU_L0_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TPU_MCU_L0_PERF_SELECT0
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0 (0x7908U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TPU_MCU_L0_PERF_COUNTER_0
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0 (0x7950U)
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_PERF
+*/
+#define RGX_CR_USC_PERF (0x8100U)
+#define RGX_CR_USC_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_USC_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_USC_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_USC_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_USC_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_USC_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_USC_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_USC_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_USC_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_USC_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_USC_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_USC_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_USC_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_USC_PERF_SELECT0
+*/
+#define RGX_CR_USC_PERF_SELECT0 (0x8108U)
+#define RGX_CR_USC_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_USC_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_USC_PERF_COUNTER_0
+*/
+#define RGX_CR_USC_PERF_COUNTER_0 (0x8150U)
+#define RGX_CR_USC_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_USC_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_JONES_IDLE
+*/
+#define RGX_CR_JONES_IDLE (0x8328U)
+#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000007FFF))
+#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U)
+#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0XFFFFBFFFU)
+#define RGX_CR_JONES_IDLE_TDM_EN (0X00004000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0XFFFFDFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0X00002000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U)
+#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0XFFFFEFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_EN (0X00001000U)
+#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U)
+#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0XFFFFF7FFU)
+#define RGX_CR_JONES_IDLE_MMU_EN (0X00000800U)
+#define RGX_CR_JONES_IDLE_TLA_SHIFT (10U)
+#define RGX_CR_JONES_IDLE_TLA_CLRMSK (0XFFFFFBFFU)
+#define RGX_CR_JONES_IDLE_TLA_EN (0X00000400U)
+#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U)
+#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_JONES_IDLE_GARTEN_EN (0X00000200U)
+#define RGX_CR_JONES_IDLE_HOSTIF_SHIFT (8U)
+#define RGX_CR_JONES_IDLE_HOSTIF_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_JONES_IDLE_HOSTIF_EN (0X00000100U)
+#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U)
+#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_JONES_IDLE_SOCIF_EN (0X00000080U)
+#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U)
+#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_JONES_IDLE_TILING_EN (0X00000040U)
+#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U)
+#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_JONES_IDLE_IPP_EN (0X00000020U)
+#define RGX_CR_JONES_IDLE_USCS_SHIFT (4U)
+#define RGX_CR_JONES_IDLE_USCS_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_JONES_IDLE_USCS_EN (0X00000010U)
+#define RGX_CR_JONES_IDLE_PM_SHIFT (3U)
+#define RGX_CR_JONES_IDLE_PM_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_JONES_IDLE_PM_EN (0X00000008U)
+#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U)
+#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_JONES_IDLE_CDM_EN (0X00000004U)
+#define RGX_CR_JONES_IDLE_VDM_SHIFT (1U)
+#define RGX_CR_JONES_IDLE_VDM_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_JONES_IDLE_VDM_EN (0X00000002U)
+#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U)
+#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_JONES_IDLE_BIF_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TORNADO_PERF
+*/
+#define RGX_CR_TORNADO_PERF (0x8228U)
+#define RGX_CR_TORNADO_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TORNADO_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_TORNADO_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TORNADO_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_TORNADO_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_TORNADO_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_TORNADO_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_TORNADO_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_TORNADO_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TORNADO_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_TORNADO_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_TORNADO_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_TORNADO_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TORNADO_PERF_SELECT0
+*/
+#define RGX_CR_TORNADO_PERF_SELECT0 (0x8230U)
+#define RGX_CR_TORNADO_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TORNADO_PERF_COUNTER_0
+*/
+#define RGX_CR_TORNADO_PERF_COUNTER_0 (0x8268U)
+#define RGX_CR_TORNADO_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TEXAS_PERF
+*/
+#define RGX_CR_TEXAS_PERF (0x8290U)
+#define RGX_CR_TEXAS_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_TEXAS_PERF_CLR_5_SHIFT (6U)
+#define RGX_CR_TEXAS_PERF_CLR_5_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_TEXAS_PERF_CLR_5_EN (0X00000040U)
+#define RGX_CR_TEXAS_PERF_CLR_4_SHIFT (5U)
+#define RGX_CR_TEXAS_PERF_CLR_4_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_TEXAS_PERF_CLR_4_EN (0X00000020U)
+#define RGX_CR_TEXAS_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_TEXAS_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TEXAS_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_TEXAS_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_TEXAS_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_TEXAS_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_TEXAS_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_TEXAS_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TEXAS_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_TEXAS_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_TEXAS_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_TEXAS_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TEXAS_PERF_SELECT0
+*/
+#define RGX_CR_TEXAS_PERF_SELECT0 (0x8298U)
+#define RGX_CR_TEXAS_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_SHIFT (31U)
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC0FFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TEXAS_PERF_COUNTER_0
+*/
+#define RGX_CR_TEXAS_PERF_COUNTER_0 (0x82D8U)
+#define RGX_CR_TEXAS_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_JONES_PERF
+*/
+#define RGX_CR_JONES_PERF (0x8330U)
+#define RGX_CR_JONES_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_JONES_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_JONES_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_JONES_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_JONES_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_JONES_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_JONES_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_JONES_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_JONES_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_JONES_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_JONES_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_JONES_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_JONES_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_JONES_PERF_SELECT0
+*/
+#define RGX_CR_JONES_PERF_SELECT0 (0x8338U)
+#define RGX_CR_JONES_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_JONES_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_JONES_PERF_COUNTER_0
+*/
+#define RGX_CR_JONES_PERF_COUNTER_0 (0x8368U)
+#define RGX_CR_JONES_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_JONES_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_JONES_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_BLACKPEARL_PERF
+*/
+#define RGX_CR_BLACKPEARL_PERF (0x8400U)
+#define RGX_CR_BLACKPEARL_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_SHIFT (6U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_EN (0X00000040U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_SHIFT (5U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_EN (0X00000020U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BLACKPEARL_PERF_SELECT0
+*/
+#define RGX_CR_BLACKPEARL_PERF_SELECT0 (0x8408U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT (31U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC0FFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_BLACKPEARL_PERF_COUNTER_0
+*/
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0 (0x8448U)
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PBE_PERF
+*/
+#define RGX_CR_PBE_PERF (0x8478U)
+#define RGX_CR_PBE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_PBE_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_PBE_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_PBE_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_PBE_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_PBE_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_PBE_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_PBE_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_PBE_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_PBE_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_PBE_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_PBE_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_PBE_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_PBE_PERF_SELECT0
+*/
+#define RGX_CR_PBE_PERF_SELECT0 (0x8480U)
+#define RGX_CR_PBE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_PBE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_PBE_PERF_COUNTER_0
+*/
+#define RGX_CR_PBE_PERF_COUNTER_0 (0x84B0U)
+#define RGX_CR_PBE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_PBE_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_OCP_REVINFO
+*/
+#define RGX_CR_OCP_REVINFO (0x9000U)
+#define RGX_CR_OCP_REVINFO_MASKFULL (IMG_UINT64_C(0x00000007FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT (33U)
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK (IMG_UINT64_C(0XFFFFFFF9FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT (32U)
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_EN (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_OCP_REVINFO_REVISION_SHIFT (0U)
+#define RGX_CR_OCP_REVINFO_REVISION_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_OCP_SYSCONFIG
+*/
+#define RGX_CR_OCP_SYSCONFIG (0x9010U)
+#define RGX_CR_OCP_SYSCONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000FFF))
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT (10U)
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK (0XFFFFF3FFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT (8U)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK (0XFFFFFCFFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT (6U)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT (4U)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK (0XFFFFFFCFU)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT (2U)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK (0XFFFFFFF3U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT (0U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_RAW_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_0 (0x9020U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_RAW_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_1 (0x9028U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_RAW_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_2 (0x9030U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_0 (0x9038U)
+#define RGX_CR_OCP_IRQSTATUS_0_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_1 (0x9040U)
+#define RGX_CR_OCP_IRQSTATUS_1_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_2 (0x9048U)
+#define RGX_CR_OCP_IRQSTATUS_2_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_SET_0
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_0 (0x9050U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_SET_1
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_1 (0x9058U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_SET_2
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_2 (0x9060U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_CLR_0
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_0 (0x9068U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_CLR_1
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_1 (0x9070U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_CLR_2
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_2 (0x9078U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQ_EVENT
+*/
+#define RGX_CR_OCP_IRQ_EVENT (0x9080U)
+#define RGX_CR_OCP_IRQ_EVENT_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT (19U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0X0000000000080000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT (18U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT (17U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0X0000000000020000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT (16U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0X0000000000010000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT (15U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT (14U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0X0000000000004000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT (13U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0X0000000000002000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT (12U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000001000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT (11U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT (10U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT (9U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0X0000000000000200))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT (8U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT (7U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000080))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT (6U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0X0000000000000040))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT (5U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT (4U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000010))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT (3U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT (2U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT (1U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT (0U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_OCP_DEBUG_CONFIG
+*/
+#define RGX_CR_OCP_DEBUG_CONFIG (0x9088U)
+#define RGX_CR_OCP_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_SHIFT (0U)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_DEBUG_STATUS
+*/
+#define RGX_CR_OCP_DEBUG_STATUS (0x9090U)
+#define RGX_CR_OCP_DEBUG_STATUS_MASKFULL (IMG_UINT64_C(0x001F1F77FFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT (51U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK (IMG_UINT64_C(0XFFE7FFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT (50U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT (48U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT (43U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK (IMG_UINT64_C(0XFFFFE7FFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT (42U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFBFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN (IMG_UINT64_C(0X0000040000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT (40U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT (38U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK (IMG_UINT64_C(0XFFFFFFBFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN (IMG_UINT64_C(0X0000004000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT (37U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0XFFFFFFDFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN (IMG_UINT64_C(0X0000002000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT (36U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT (34U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT (33U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN (IMG_UINT64_C(0X0000000200000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT (32U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT (31U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT (30U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN (IMG_UINT64_C(0X0000000040000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT (29U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN (IMG_UINT64_C(0X0000000020000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT (27U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFE7FFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT (26U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN (IMG_UINT64_C(0X0000000004000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT (24U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT (23U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN (IMG_UINT64_C(0X0000000000800000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT (22U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN (IMG_UINT64_C(0X0000000000400000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT (21U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT (19U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE7FFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT (18U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT (16U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT (15U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT (14U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN (IMG_UINT64_C(0X0000000000004000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT (13U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN (IMG_UINT64_C(0X0000000000002000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT (11U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFE7FF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT (10U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT (8U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT (7U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN (IMG_UINT64_C(0X0000000000000080))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT (6U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN (IMG_UINT64_C(0X0000000000000040))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT (5U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT (3U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE7))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT (2U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT (0U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT (6U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN (0X00000040U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT (5U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_EN (0X00000020U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_SHIFT (4U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_EN (0X00000010U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT (3U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN (0X00000008U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT (2U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_EN (0X00000004U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT (1U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN (0X00000002U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT (0U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_EN (0X00000001U)
+
+
+#define RGX_CR_BIF_TRUST_DM_MASK (0x0000007FU)
+
+
+/*
+ Register RGX_CR_BIF_TRUST
+*/
+#define RGX_CR_BIF_TRUST (0xA000U)
+#define RGX_CR_BIF_TRUST_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF))
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT (20U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN (0X00100000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT (19U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK (0XFFF7FFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN (0X00080000U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT (18U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN (0X00040000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT (17U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN (0X00020000U)
+#define RGX_CR_BIF_TRUST_ENABLE_SHIFT (16U)
+#define RGX_CR_BIF_TRUST_ENABLE_CLRMSK (0XFFFEFFFFU)
+#define RGX_CR_BIF_TRUST_ENABLE_EN (0X00010000U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_SHIFT (9U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_CLRMSK (0XFFFF01FFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT (8U)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN (0X00000100U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT (7U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN (0X00000080U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT (6U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN (0X00000040U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT (5U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN (0X00000020U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT (4U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN (0X00000010U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT (3U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN (0X00000008U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT (2U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN (0X00000004U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT (1U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN (0X00000002U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT (0U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SYS_BUS_SECURE
+*/
+#define RGX_CR_SYS_BUS_SECURE (0xA100U)
+#define RGX_CR_SYS_BUS_SECURE__SECR__MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_FBA_FC0_CHECKSUM
+*/
+#define RGX_CR_FBA_FC0_CHECKSUM (0xD170U)
+#define RGX_CR_FBA_FC0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_FBA_FC1_CHECKSUM
+*/
+#define RGX_CR_FBA_FC1_CHECKSUM (0xD178U)
+#define RGX_CR_FBA_FC1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_FBA_FC2_CHECKSUM
+*/
+#define RGX_CR_FBA_FC2_CHECKSUM (0xD180U)
+#define RGX_CR_FBA_FC2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_FBA_FC3_CHECKSUM
+*/
+#define RGX_CR_FBA_FC3_CHECKSUM (0xD188U)
+#define RGX_CR_FBA_FC3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_CLK_CTRL2
+*/
+#define RGX_CR_CLK_CTRL2 (0xD200U)
+#define RGX_CR_CLK_CTRL2_MASKFULL (IMG_UINT64_C(0x0000000000000F33))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_SHIFT (10U)
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_ON (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_AUTO (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_CTRL2_VRDM_SHIFT (8U)
+#define RGX_CR_CLK_CTRL2_VRDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL2_VRDM_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_VRDM_ON (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL2_VRDM_AUTO (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL2_SH_SHIFT (4U)
+#define RGX_CR_CLK_CTRL2_SH_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL2_SH_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_SH_ON (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_CTRL2_SH_AUTO (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_CTRL2_FBA_SHIFT (0U)
+#define RGX_CR_CLK_CTRL2_FBA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL2_FBA_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_FBA_ON (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL2_FBA_AUTO (IMG_UINT64_C(0x0000000000000002))
+
+
+/*
+ Register RGX_CR_CLK_STATUS2
+*/
+#define RGX_CR_CLK_STATUS2 (0xD208U)
+#define RGX_CR_CLK_STATUS2_MASKFULL (IMG_UINT64_C(0x0000000000000015))
+#define RGX_CR_CLK_STATUS2_VRDM_SHIFT (4U)
+#define RGX_CR_CLK_STATUS2_VRDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS2_VRDM_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS2_VRDM_RUNNING (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS2_SH_SHIFT (2U)
+#define RGX_CR_CLK_STATUS2_SH_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS2_SH_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS2_SH_RUNNING (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_STATUS2_FBA_SHIFT (0U)
+#define RGX_CR_CLK_STATUS2_FBA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS2_FBA_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS2_FBA_RUNNING (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_RPM_SHF_FPL
+*/
+#define RGX_CR_RPM_SHF_FPL (0xD520U)
+#define RGX_CR_RPM_SHF_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC))
+#define RGX_CR_RPM_SHF_FPL_SIZE_SHIFT (40U)
+#define RGX_CR_RPM_SHF_FPL_SIZE_CLRMSK (IMG_UINT64_C(0XC00000FFFFFFFFFF))
+#define RGX_CR_RPM_SHF_FPL_BASE_SHIFT (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000003))
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSIZE (4U)
+
+
+/*
+ Register RGX_CR_RPM_SHF_FPL_READ
+*/
+#define RGX_CR_RPM_SHF_FPL_READ (0xD528U)
+#define RGX_CR_RPM_SHF_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT (22U)
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_EN (0X00400000U)
+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT (0U)
+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK (0XFFC00000U)
+
+
+/*
+ Register RGX_CR_RPM_SHF_FPL_WRITE
+*/
+#define RGX_CR_RPM_SHF_FPL_WRITE (0xD530U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT (22U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN (0X00400000U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT (0U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK (0XFFC00000U)
+
+
+/*
+ Register RGX_CR_RPM_SHG_FPL
+*/
+#define RGX_CR_RPM_SHG_FPL (0xD538U)
+#define RGX_CR_RPM_SHG_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC))
+#define RGX_CR_RPM_SHG_FPL_SIZE_SHIFT (40U)
+#define RGX_CR_RPM_SHG_FPL_SIZE_CLRMSK (IMG_UINT64_C(0XC00000FFFFFFFFFF))
+#define RGX_CR_RPM_SHG_FPL_BASE_SHIFT (2U)
+#define RGX_CR_RPM_SHG_FPL_BASE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000003))
+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT (2U)
+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSIZE (4U)
+
+
+/*
+ Register RGX_CR_RPM_SHG_FPL_READ
+*/
+#define RGX_CR_RPM_SHG_FPL_READ (0xD540U)
+#define RGX_CR_RPM_SHG_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT (22U)
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_EN (0X00400000U)
+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT (0U)
+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK (0XFFC00000U)
+
+
+/*
+ Register RGX_CR_RPM_SHG_FPL_WRITE
+*/
+#define RGX_CR_RPM_SHG_FPL_WRITE (0xD548U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT (22U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN (0X00400000U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT (0U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK (0XFFC00000U)
+
+
+/*
+ Register RGX_CR_SH_PERF
+*/
+#define RGX_CR_SH_PERF (0xD5F8U)
+#define RGX_CR_SH_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_SH_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_SH_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SH_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_SH_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_SH_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SH_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_SH_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_SH_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SH_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_SH_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_SH_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SH_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SH_PERF_SELECT0
+*/
+#define RGX_CR_SH_PERF_SELECT0 (0xD600U)
+#define RGX_CR_SH_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_SH_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_SH_PERF_COUNTER_0
+*/
+#define RGX_CR_SH_PERF_COUNTER_0 (0xD628U)
+#define RGX_CR_SH_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SH_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_SH_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SHF_SHG_CHECKSUM
+*/
+#define RGX_CR_SHF_SHG_CHECKSUM (0xD1C0U)
+#define RGX_CR_SHF_SHG_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SHF_VERTEX_BIF_CHECKSUM
+*/
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM (0xD1C8U)
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SHF_VARY_BIF_CHECKSUM
+*/
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM (0xD1D0U)
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_RPM_BIF_CHECKSUM
+*/
+#define RGX_CR_RPM_BIF_CHECKSUM (0xD1D8U)
+#define RGX_CR_RPM_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SHG_BIF_CHECKSUM
+*/
+#define RGX_CR_SHG_BIF_CHECKSUM (0xD1E0U)
+#define RGX_CR_SHG_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SHG_FE_BE_CHECKSUM
+*/
+#define RGX_CR_SHG_FE_BE_CHECKSUM (0xD1E8U)
+#define RGX_CR_SHG_FE_BE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register DPX_CR_BF_PERF
+*/
+#define DPX_CR_BF_PERF (0xC458U)
+#define DPX_CR_BF_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BF_PERF_CLR_3_SHIFT (4U)
+#define DPX_CR_BF_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define DPX_CR_BF_PERF_CLR_3_EN (0X00000010U)
+#define DPX_CR_BF_PERF_CLR_2_SHIFT (3U)
+#define DPX_CR_BF_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define DPX_CR_BF_PERF_CLR_2_EN (0X00000008U)
+#define DPX_CR_BF_PERF_CLR_1_SHIFT (2U)
+#define DPX_CR_BF_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BF_PERF_CLR_1_EN (0X00000004U)
+#define DPX_CR_BF_PERF_CLR_0_SHIFT (1U)
+#define DPX_CR_BF_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define DPX_CR_BF_PERF_CLR_0_EN (0X00000002U)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT (0U)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_BF_PERF_SELECT0
+*/
+#define DPX_CR_BF_PERF_SELECT0 (0xC460U)
+#define DPX_CR_BF_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT (21U)
+#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register DPX_CR_BF_PERF_COUNTER_0
+*/
+#define DPX_CR_BF_PERF_COUNTER_0 (0xC488U)
+#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT (0U)
+#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register DPX_CR_BT_PERF
+*/
+#define DPX_CR_BT_PERF (0xC3D0U)
+#define DPX_CR_BT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BT_PERF_CLR_3_SHIFT (4U)
+#define DPX_CR_BT_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define DPX_CR_BT_PERF_CLR_3_EN (0X00000010U)
+#define DPX_CR_BT_PERF_CLR_2_SHIFT (3U)
+#define DPX_CR_BT_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define DPX_CR_BT_PERF_CLR_2_EN (0X00000008U)
+#define DPX_CR_BT_PERF_CLR_1_SHIFT (2U)
+#define DPX_CR_BT_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BT_PERF_CLR_1_EN (0X00000004U)
+#define DPX_CR_BT_PERF_CLR_0_SHIFT (1U)
+#define DPX_CR_BT_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define DPX_CR_BT_PERF_CLR_0_EN (0X00000002U)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT (0U)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_BT_PERF_SELECT0
+*/
+#define DPX_CR_BT_PERF_SELECT0 (0xC3D8U)
+#define DPX_CR_BT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT (21U)
+#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register DPX_CR_BT_PERF_COUNTER_0
+*/
+#define DPX_CR_BT_PERF_COUNTER_0 (0xC420U)
+#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT (0U)
+#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register DPX_CR_RQ_USC_DEBUG
+*/
+#define DPX_CR_RQ_USC_DEBUG (0xC110U)
+#define DPX_CR_RQ_USC_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT (0U)
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS (0xC5C8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775))
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN (0X00000010U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT (0U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS (0xC5D0U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x03FFFFFFFFFFFFF0))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT (57U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFDFFFFFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN (IMG_UINT64_C(0X0200000000000000))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFE000FFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+ Register DPX_CR_BIF_MMU_STATUS
+*/
+#define DPX_CR_BIF_MMU_STATUS (0xC5D8U)
+#define DPX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFF7))
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U)
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0XF00FFFFFU)
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U)
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0XFFF00FFFU)
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U)
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0XFFFFF00FU)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_EN (0X00000004U)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0XFFFFFFFDU)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN (0X00000002U)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_RT_PERF
+*/
+#define DPX_CR_RT_PERF (0xC700U)
+#define DPX_CR_RT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_RT_PERF_CLR_3_SHIFT (4U)
+#define DPX_CR_RT_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define DPX_CR_RT_PERF_CLR_3_EN (0X00000010U)
+#define DPX_CR_RT_PERF_CLR_2_SHIFT (3U)
+#define DPX_CR_RT_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define DPX_CR_RT_PERF_CLR_2_EN (0X00000008U)
+#define DPX_CR_RT_PERF_CLR_1_SHIFT (2U)
+#define DPX_CR_RT_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_RT_PERF_CLR_1_EN (0X00000004U)
+#define DPX_CR_RT_PERF_CLR_0_SHIFT (1U)
+#define DPX_CR_RT_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define DPX_CR_RT_PERF_CLR_0_EN (0X00000002U)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT (0U)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_RT_PERF_SELECT0
+*/
+#define DPX_CR_RT_PERF_SELECT0 (0xC708U)
+#define DPX_CR_RT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT (21U)
+#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register DPX_CR_RT_PERF_COUNTER_0
+*/
+#define DPX_CR_RT_PERF_COUNTER_0 (0xC730U)
+#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT (0U)
+#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register DPX_CR_BX_TU_PERF
+*/
+#define DPX_CR_BX_TU_PERF (0xC908U)
+#define DPX_CR_BX_TU_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT (4U)
+#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define DPX_CR_BX_TU_PERF_CLR_3_EN (0X00000010U)
+#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT (3U)
+#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define DPX_CR_BX_TU_PERF_CLR_2_EN (0X00000008U)
+#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT (2U)
+#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BX_TU_PERF_CLR_1_EN (0X00000004U)
+#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT (1U)
+#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define DPX_CR_BX_TU_PERF_CLR_0_EN (0X00000002U)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT (0U)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_BX_TU_PERF_SELECT0
+*/
+#define DPX_CR_BX_TU_PERF_SELECT0 (0xC910U)
+#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT (21U)
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register DPX_CR_BX_TU_PERF_COUNTER_0
+*/
+#define DPX_CR_BX_TU_PERF_COUNTER_0 (0xC938U)
+#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT (0U)
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register DPX_CR_RS_PDS_RR_CHECKSUM
+*/
+#define DPX_CR_RS_PDS_RR_CHECKSUM (0xC0F0U)
+#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT (0U)
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING
+*/
+#define RGX_CR_MMU_CBASE_MAPPING (0xE148U)
+#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0XF0000000U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MMU_FAULT_STATUS
+*/
+#define RGX_CR_MMU_FAULT_STATUS (0xE150U)
+#define RGX_CR_MMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT (28U)
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0X000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT (20U)
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT (12U)
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT (6U)
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT (4U)
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_SHIFT (3U)
+#define RGX_CR_MMU_FAULT_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_EN (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT (1U)
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MMU_FAULT_STATUS_META
+*/
+#define RGX_CR_MMU_FAULT_STATUS_META (0xE158U)
+#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (28U)
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0X000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (20U)
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT (12U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (6U)
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (4U)
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U)
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U)
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_SLC3_CTRL_MISC
+*/
+#define RGX_CR_SLC3_CTRL_MISC (0xE200U)
+#define RGX_CR_SLC3_CTRL_MISC_MASKFULL (IMG_UINT64_C(0x0000000000000107))
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT (8U)
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN (0X00000100U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (0U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (0XFFFFFFF8U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR (00000000U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH (0X00000001U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH (0X00000002U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH (0X00000003U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH (0X00000004U)
+
+
+/*
+ Register RGX_CR_SLC3_SCRAMBLE
+*/
+#define RGX_CR_SLC3_SCRAMBLE (0xE208U)
+#define RGX_CR_SLC3_SCRAMBLE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE_BITS_SHIFT (0U)
+#define RGX_CR_SLC3_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0000000000000000))
+
+
+/*
+ Register RGX_CR_SLC3_SCRAMBLE2
+*/
+#define RGX_CR_SLC3_SCRAMBLE2 (0xE210U)
+#define RGX_CR_SLC3_SCRAMBLE2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_SHIFT (0U)
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_CLRMSK (IMG_UINT64_C(0000000000000000))
+
+
+/*
+ Register RGX_CR_SLC3_SCRAMBLE3
+*/
+#define RGX_CR_SLC3_SCRAMBLE3 (0xE218U)
+#define RGX_CR_SLC3_SCRAMBLE3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_SHIFT (0U)
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_CLRMSK (IMG_UINT64_C(0000000000000000))
+
+
+/*
+ Register RGX_CR_SLC3_SCRAMBLE4
+*/
+#define RGX_CR_SLC3_SCRAMBLE4 (0xE260U)
+#define RGX_CR_SLC3_SCRAMBLE4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_SHIFT (0U)
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_CLRMSK (IMG_UINT64_C(0000000000000000))
+
+
+/*
+ Register RGX_CR_SLC3_STATUS
+*/
+#define RGX_CR_SLC3_STATUS (0xE220U)
+#define RGX_CR_SLC3_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_STATUS_WRITES1_SHIFT (48U)
+#define RGX_CR_SLC3_STATUS_WRITES1_CLRMSK (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define RGX_CR_SLC3_STATUS_WRITES0_SHIFT (32U)
+#define RGX_CR_SLC3_STATUS_WRITES0_CLRMSK (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define RGX_CR_SLC3_STATUS_READS1_SHIFT (16U)
+#define RGX_CR_SLC3_STATUS_READS1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define RGX_CR_SLC3_STATUS_READS0_SHIFT (0U)
+#define RGX_CR_SLC3_STATUS_READS0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_SLC3_IDLE
+*/
+#define RGX_CR_SLC3_IDLE (0xE228U)
+#define RGX_CR_SLC3_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF))
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT (18U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK (0XFFF3FFFFU)
+#define RGX_CR_SLC3_IDLE_MMU_SHIFT (17U)
+#define RGX_CR_SLC3_IDLE_MMU_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_SLC3_IDLE_MMU_EN (0X00020000U)
+#define RGX_CR_SLC3_IDLE_RDI_SHIFT (16U)
+#define RGX_CR_SLC3_IDLE_RDI_CLRMSK (0XFFFEFFFFU)
+#define RGX_CR_SLC3_IDLE_RDI_EN (0X00010000U)
+#define RGX_CR_SLC3_IDLE_IMGBV4_SHIFT (12U)
+#define RGX_CR_SLC3_IDLE_IMGBV4_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_SHIFT (4U)
+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK (0XFFFFF00FU)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT (2U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK (0XFFFFFFF3U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT (1U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_EN (0X00000002U)
+#define RGX_CR_SLC3_IDLE_XBAR_SHIFT (0U)
+#define RGX_CR_SLC3_IDLE_XBAR_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC3_IDLE_XBAR_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SLC3_FAULT_STOP_STATUS
+*/
+#define RGX_CR_SLC3_FAULT_STOP_STATUS (0xE248U)
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000001FFF))
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT (0U)
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK (0XFFFFE000U)
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_STORE_MODE
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_MODE (0xF048U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK (0XFFFFFFFCU)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX (00000000U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE (0X00000001U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST (0X00000002U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING0
+*/
+#define RGX_CR_CONTEXT_MAPPING0 (0xF078U)
+#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U)
+#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (0X00FFFFFFU)
+#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (0XFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING0_TA_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING0_TA_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING1
+*/
+#define RGX_CR_CONTEXT_MAPPING1 (0xF080U)
+#define RGX_CR_CONTEXT_MAPPING1_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_CONTEXT_MAPPING1_HOST_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING1_HOST_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING1_TLA_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING1_TLA_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING2
+*/
+#define RGX_CR_CONTEXT_MAPPING2 (0xF088U)
+#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0XFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING3
+*/
+#define RGX_CR_CONTEXT_MAPPING3 (0xF090U)
+#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0XFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_BIF_JONES_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ (0xF098U)
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ (0xF0A0U)
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIF_DUST_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ (0xF0A8U)
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING4
+*/
+#define RGX_CR_CONTEXT_MAPPING4 (0xF210U)
+#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0XFFFF00FFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0XFFFFFF00FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+#endif /* _RGX_CR_DEFS_KM_H_ */
+
+/*****************************************************************************
+ End of file (rgx_cr_defs_km.h)
+*****************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@Title Rogue hw definitions (kernel mode)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXDEFS_KM_H_
+#define _RGXDEFS_KM_H_
+
+#include RGX_BVNC_CORE_KM_HEADER
+#include RGX_BNC_CONFIG_KM_HEADER
+
+#define __IMG_EXPLICIT_INCLUDE_HWDEFS
+#if defined(__KERNEL__)
+#include "rgx_cr_defs_km.h"
+#else
+#include RGX_BVNC_CORE_HEADER
+#include RGX_BNC_CONFIG_HEADER
+#include "rgx_cr_defs.h"
+#endif
+#undef __IMG_EXPLICIT_INCLUDE_HWDEFS
+
+/* The following Macros are picked up through BVNC headers for PDUMP and
+ * no hardware operations to be compatible with old build infrastructure.
+ */
+#if defined(PDUMP) || defined(NO_HARDWARE) || !defined(SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION)
+/******************************************************************************
+ * Check for valid B.X.N.C
+ *****************************************************************************/
+#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C)
+#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)"
+#endif
+#endif
+
+#if defined(PDUMP) || defined(NO_HARDWARE)
+/* Check core/config compatibility */
+#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C)
+#error "BVNC headers are mismatching (KM core/config)"
+#endif
+
+#endif
+
+/******************************************************************************
+ * RGX Version name
+ *****************************************************************************/
+#define _RGX_BVNC_ST2(S) #S
+#define _RGX_BVNC_ST(S) _RGX_BVNC_ST2(S)
+#if defined(PDUMP) || defined(NO_HARDWARE) || defined(PVRSRV_GPUVIRT_GUESTDRV) || !defined(SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION)
+#define RGX_BVNC_KM _RGX_BVNC_ST(RGX_BVNC_KM_B) "." _RGX_BVNC_ST(RGX_BVNC_KM_V) "." _RGX_BVNC_ST(RGX_BVNC_KM_N) "." _RGX_BVNC_ST(RGX_BVNC_KM_C)
+#endif
+#define RGX_BVNC_KM_V_ST _RGX_BVNC_ST(RGX_BVNC_KM_V)
+
+/******************************************************************************
+ * RGX Defines
+ *****************************************************************************/
+
+#define BVNC_FIELD_MASK ((1 << BVNC_FIELD_WIDTH) - 1)
+#define C_POSITION (0)
+#define N_POSITION ((C_POSITION) + (BVNC_FIELD_WIDTH))
+#define V_POSITION ((N_POSITION) + (BVNC_FIELD_WIDTH))
+#define B_POSITION ((V_POSITION) + (BVNC_FIELD_WIDTH))
+
+#define B_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (B_POSITION)))
+#define V_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (V_POSITION)))
+#define N_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (N_POSITION)))
+#define C_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (C_POSITION)))
+
+#define GET_B(x) (((x) & (B_POSTION_MASK)) >> (B_POSITION))
+#define GET_V(x) (((x) & (V_POSTION_MASK)) >> (V_POSITION))
+#define GET_N(x) (((x) & (N_POSTION_MASK)) >> (N_POSITION))
+#define GET_C(x) (((x) & (C_POSTION_MASK)) >> (C_POSITION))
+
+#define BVNC_PACK(B,V,N,C) ((((IMG_UINT64)B)) << (B_POSITION) | \
+ (((IMG_UINT64)V)) << (V_POSITION) | \
+ (((IMG_UINT64)N)) << (N_POSITION) | \
+ (((IMG_UINT64)C)) << (C_POSITION) \
+ )
+
+#define RGX_CR_CORE_ID_CONFIG_N_SHIFT (8U)
+#define RGX_CR_CORE_ID_CONFIG_C_SHIFT (0U)
+
+#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK (0XFFFFFF00U)
+
+/* META cores (required for the RGX_FEATURE_META) */
+#define MTP218 (1)
+#define MTP219 (2)
+#define LTP218 (3)
+#define LTP217 (4)
+
+/* META Core memory feature depending on META variants */
+#define RGX_META_COREMEM_32K (32*1024)
+#define RGX_META_COREMEM_48K (48*1024)
+#define RGX_META_COREMEM_64K (64*1024)
+#define RGX_META_COREMEM_128K (128*1024)
+#define RGX_META_COREMEM_256K (256*1024)
+
+#if !defined(__KERNEL__)
+#if (!defined(SUPPORT_TRUSTED_DEVICE) || defined(RGX_FEATURE_META_DMA)) && (RGX_FEATURE_META_COREMEM_SIZE != 0)
+#define RGX_META_COREMEM_SIZE (RGX_FEATURE_META_COREMEM_SIZE*1024)
+#define RGX_META_COREMEM (1)
+#define RGX_META_COREMEM_CODE (1)
+#if !defined(FIX_HW_BRN_50767)
+#define RGX_META_COREMEM_DATA (1)
+#endif
+#else
+#undef SUPPORT_META_COREMEM
+#undef RGX_FEATURE_META_COREMEM_SIZE
+#undef RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_META_COREMEM_SIZE (0)
+#endif
+#endif
+
+/* ISP requires valid state on all three pipes regardless of the number of
+ * active pipes/tiles in flight.
+ */
+#define RGX_MAX_NUM_PIPES 3
+
+#define GET_ROGUE_CACHE_LINE_SIZE(x) ((x)/8)
+
+
+#define MAX_HW_TA3DCONTEXTS 2
+
+
+/* useful extra defines for clock ctrl*/
+#define RGX_CR_CLK_CTRL_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL_MASKFULL)
+#define RGX_CR_CLK_CTRL_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL_MASKFULL)
+
+#define RGX_CR_SOFT_RESET_DUST_n_CORE_EN (RGX_CR_SOFT_RESET_DUST_A_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_B_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_C_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_D_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_E_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_F_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_G_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_H_CORE_EN)
+
+/* SOFT_RESET Rascal and DUSTs bits */
+#define RGX_CR_SOFT_RESET_RASCALDUSTS_EN (RGX_CR_SOFT_RESET_RASCAL_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+
+
+
+/* SOFT_RESET steps as defined in the TRM */
+#define RGX_S7_SOFT_RESET_DUSTS (RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+#define RGX_S7_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN | \
+ RGX_CR_SOFT_RESET_VDM_EN | \
+ RGX_CR_SOFT_RESET_ISP_EN)
+
+#define RGX_S7_SOFT_RESET_JONES_ALL (RGX_S7_SOFT_RESET_JONES | \
+ RGX_CR_SOFT_RESET_BIF_EN | \
+ RGX_CR_SOFT_RESET_SLC_EN | \
+ RGX_CR_SOFT_RESET_GARTEN_EN)
+
+#define RGX_S7_SOFT_RESET2 (RGX_CR_SOFT_RESET2_BLACKPEARL_EN | \
+ RGX_CR_SOFT_RESET2_PIXEL_EN | \
+ RGX_CR_SOFT_RESET2_CDM_EN | \
+ RGX_CR_SOFT_RESET2_VERTEX_EN)
+
+
+
+#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12)
+#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE (1 << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+
+#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14)
+#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE (1 << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT)
+
+/* To get the number of required Dusts, divide the number of clusters by 2 and round up */
+#define RGX_REQ_NUM_DUSTS(CLUSTERS) ((CLUSTERS + 1) / 2)
+
+/* To get the number of required Bernado/Phantom, divide the number of clusters by 4 and round up */
+#define RGX_REQ_NUM_PHANTOMS(CLUSTERS) ((CLUSTERS + 3) / 4)
+#define RGX_REQ_NUM_BERNADOS(CLUSTERS) ((CLUSTERS + 3) / 4)
+#define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) ((CLUSTERS + 3) / 4)
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__)
+ #define RGX_GET_NUM_PHANTOMS(x) (RGX_REQ_NUM_PHANTOMS(x))
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+ #define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS))
+#else
+ #define RGX_NUM_PHANTOMS (1)
+#endif
+#else
+ #if defined(RGX_FEATURE_CLUSTER_GROUPING)
+ #define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS))
+ #else
+ #define RGX_NUM_PHANTOMS (1)
+ #endif
+ #define RGX_GET_NUM_PHANTOMS(x) (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS))
+#endif
+
+
+/* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT is not defined for format 1 cores (so define it now). */
+#if !defined(RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT)
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1)
+#endif
+
+/* META second thread feature depending on META variants and available CoreMem*/
+#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && (RGX_FEATURE_META_COREMEM_SIZE == 256)
+#define RGXFW_META_SUPPORT_2ND_THREAD
+#endif
+
+/*
+ Start at 903GiB. Size of 32MB per OSID (see rgxheapconfig.h)
+ NOTE:
+ The firmware heap base and size is defined here to
+ simplify #include dependencies, see rgxheapconfig.h
+ for the full RGX virtual address space layout.
+*/
+#define RGX_FIRMWARE_HEAP_BASE IMG_UINT64_C(0xE1C0000000)
+#define RGX_FIRMWARE_HEAP_SIZE (1<<RGX_FW_HEAP_SHIFT)
+#define RGX_FIRMWARE_HEAP_SHIFT RGX_FW_HEAP_SHIFT
+
+/* Default number of OSIDs is 1 unless GPU Virtualization is supported and enabled */
+#if defined(SUPPORT_PVRSRV_GPUVIRT) && !defined(PVRSRV_GPUVIRT_GUESTDRV) && (PVRSRV_GPUVIRT_NUM_OSID +1> 1)
+#define RGXFW_NUM_OS PVRSRV_GPUVIRT_NUM_OSID
+#else
+#define RGXFW_NUM_OS 1
+#endif
+
+/******************************************************************************
+ * WA HWBRNs
+ *****************************************************************************/
+#if defined(FIX_HW_BRN_36492)
+
+#undef RGX_CR_SOFT_RESET_SLC_EN
+#undef RGX_CR_SOFT_RESET_SLC_CLRMSK
+#undef RGX_CR_SOFT_RESET_SLC_SHIFT
+
+/* Remove the SOFT_RESET_SLC_EN bit from SOFT_RESET_MASKFULL */
+#undef RGX_CR_SOFT_RESET_MASKFULL
+#define RGX_CR_SOFT_RESET_MASKFULL IMG_UINT64_C(0x000001FFF7FFFC1D)
+
+#endif /* FIX_HW_BRN_36492 */
+
+
+#if defined(RGX_CR_JONES_IDLE_MASKFULL)
+/* Workaround for HW BRN 57289 */
+#if (RGX_CR_JONES_IDLE_MASKFULL != 0x0000000000007FFF)
+#error This WA must be updated if RGX_CR_JONES_IDLE is expanded!!!
+#endif
+#undef RGX_CR_JONES_IDLE_MASKFULL
+#undef RGX_CR_JONES_IDLE_TDM_SHIFT
+#undef RGX_CR_JONES_IDLE_TDM_CLRMSK
+#undef RGX_CR_JONES_IDLE_TDM_EN
+#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000003FFF))
+#endif
+
+
+#define DPX_MAX_RAY_CONTEXTS 4 /* FIXME should this be in dpx file? */
+#define DPX_MAX_FBA_AP 16
+#define DPX_MAX_FBA_FILTER_WIDTH 24
+
+#if !defined(__KERNEL__)
+#if !defined(RGX_FEATURE_SLC_SIZE_IN_BYTES)
+#if defined(RGX_FEATURE_SLC_SIZE_IN_KILOBYTES)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (RGX_FEATURE_SLC_SIZE_IN_KILOBYTES * 1024)
+#else
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (0)
+#endif
+#endif
+#endif
+
+
+#endif /* _RGXDEFS_KM_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Hardware definition file rgxmmudefs_km.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* **** Autogenerated C -- do not edit **** */
+
+/*
+ * Generated by regconv version MAIN@3328745
+ * from files:
+ * rogue_bif.def
+ * rogue_bif.def
+ */
+
+
+#ifndef _RGXMMUDEFS_KM_H_
+#define _RGXMMUDEFS_KM_H_
+
+#include "img_types.h"
+
+/*
+
+ Encoding of DM (note value 0x6 not used)
+
+*/
+#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U)
+#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U)
+#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U)
+#define RGX_BIF_DM_ENCODING_TLA (0x00000003U)
+#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U)
+#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U)
+#define RGX_BIF_DM_ENCODING_META (0x00000007U)
+#define RGX_BIF_DM_ENCODING_HOST (0x00000008U)
+#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U)
+
+
+/*
+
+ Labelling of fields within virtual address
+
+*/
+/*
+Page Catalogue entry #
+*/
+#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U)
+#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFF003FFFFFFF))
+/*
+Page Directory entry #
+*/
+#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U)
+#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC01FFFFF))
+/*
+Page Table entry #
+*/
+#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U)
+#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE00FFF))
+
+
+/*
+
+ Number of entries in a PC
+
+*/
+#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U)
+
+
+/*
+
+ Number of entries in a PD
+
+*/
+#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U)
+
+
+/*
+
+ Number of entries in a PT
+
+*/
+#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U)
+
+
+/*
+
+ Size in bits of the PC entries in memory
+
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U)
+
+
+/*
+
+ Size in bits of the PD entries in memory
+
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U)
+
+
+/*
+
+ Size in bits of the PT entries in memory
+
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U)
+
+
+/*
+
+ Encoding of page size field
+
+*/
+#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U)
+#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U)
+#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U)
+#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U)
+#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U)
+#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U)
+#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U)
+
+
+/*
+
+ Range of bits used for 4KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U)
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+
+
+/*
+
+ Range of bits used for 16KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U)
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000003FFF))
+
+
+/*
+
+ Range of bits used for 64KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U)
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF000000FFFF))
+
+
+/*
+
+ Range of bits used for 256KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U)
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF000003FFFF))
+
+
+/*
+
+ Range of bits used for 1MB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U)
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF00000FFFFF))
+
+
+/*
+
+ Range of bits used for 2MB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U)
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF00001FFFFF))
+
+
+/*
+
+ Range of bits used for PT Base Address for 4KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U)
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+
+
+/*
+
+ Range of bits used for PT Base Address for 16KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U)
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF00000003FF))
+
+
+/*
+
+ Range of bits used for PT Base Address for 64KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U)
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+
+ Range of bits used for PT Base Address for 256KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U)
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF000000003F))
+
+
+/*
+
+ Range of bits used for PT Base Address for 1MB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U)
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF000000001F))
+
+
+/*
+
+ Range of bits used for PT Base Address for 2MB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U)
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF000000001F))
+
+
+/*
+
+ Format of Page Table data
+
+*/
+/*
+PM/Meta protect bit
+*/
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U)
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0XBFFFFFFFFFFFFFFF))
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0X4000000000000000))
+/*
+Upper part of vp page field
+*/
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0XC00000FFFFFFFFFF))
+/*
+Physical page address
+*/
+#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U)
+#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+/*
+Lower part of vp page field
+*/
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U)
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0X0000000000000020))
+/*
+PM Src
+*/
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U)
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0X0000000000000010))
+/*
+SLC Bypass Ctrl
+*/
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT (3U)
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN (IMG_UINT64_C(0X0000000000000008))
+/*
+Cache Coherency bit
+*/
+#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U)
+#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0X0000000000000004))
+/*
+Read only
+*/
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U)
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0X0000000000000002))
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U)
+#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+
+ Format of Page Directory data
+
+*/
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U)
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0X0000010000000000))
+/*
+Page Table base address
+*/
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U)
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0XFFFFFF000000001F))
+/*
+Page Size
+*/
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U)
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF1))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0000000000000000))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a))
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U)
+#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+
+ Format of Page Catalogue data
+
+*/
+/*
+Page Catalogue base address
+*/
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0X0000000FU)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U)
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0XFFFFFFFDU)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0X00000002U)
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U)
+#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0XFFFFFFFEU)
+#define RGX_MMUCTRL_PC_DATA_VALID_EN (0X00000001U)
+
+
+#endif /* _RGXMMUDEFS_KM_H_ */
+
+/*****************************************************************************
+ End of file (rgxmmudefs_km.h)
+*****************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Global 3D types for use by IMG APIs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines 3D types for use by IMG APIs
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __IMG_3DTYPES_H__
+#define __IMG_3DTYPES_H__
+
+#include <powervr/buffer_attribs.h>
+
+/**
+ * Comparison functions
+ * This comparison function is defined as:
+ * A {CmpFunc} B
+ * A is a reference value, e.g., incoming depth etc.
+ * B is the sample value, e.g., value in depth buffer.
+ */
+typedef enum _IMG_COMPFUNC_
+{
+ IMG_COMPFUNC_NEVER, /**< The comparison never succeeds */
+ IMG_COMPFUNC_LESS, /**< The comparison is a less-than operation */
+ IMG_COMPFUNC_EQUAL, /**< The comparison is an equal-to operation */
+ IMG_COMPFUNC_LESS_EQUAL, /**< The comparison is a less-than or equal-to
+ operation */
+ IMG_COMPFUNC_GREATER, /**< The comparison is a greater-than operation
+ */
+ IMG_COMPFUNC_NOT_EQUAL, /**< The comparison is a no-equal-to operation
+ */
+ IMG_COMPFUNC_GREATER_EQUAL, /**< The comparison is a greater-than or
+ equal-to operation */
+ IMG_COMPFUNC_ALWAYS, /**< The comparison always succeeds */
+} IMG_COMPFUNC;
+
+/**
+ * Stencil op functions
+ */
+typedef enum _IMG_STENCILOP_
+{
+ IMG_STENCILOP_KEEP, /**< Keep original value */
+ IMG_STENCILOP_ZERO, /**< Set stencil to 0 */
+ IMG_STENCILOP_REPLACE, /**< Replace stencil entry */
+ IMG_STENCILOP_INCR_SAT, /**< Increment stencil entry, clamping to max */
+ IMG_STENCILOP_DECR_SAT, /**< Decrement stencil entry, clamping to zero */
+ IMG_STENCILOP_INVERT, /**< Invert bits in stencil entry */
+ IMG_STENCILOP_INCR, /**< Increment stencil entry,
+ wrapping if necessary */
+ IMG_STENCILOP_DECR, /**< Decrement stencil entry,
+ wrapping if necessary */
+} IMG_STENCILOP;
+
+/**
+ * Alpha blending allows colours and textures on one surface
+ * to be blended with transparency onto another surface.
+ * These definitions apply to both source and destination blending
+ * states
+ */
+typedef enum _IMG_BLEND_
+{
+ IMG_BLEND_ZERO = 0, /**< Blend factor is (0,0,0,0) */
+ IMG_BLEND_ONE, /**< Blend factor is (1,1,1,1) */
+ IMG_BLEND_SRC_COLOUR, /**< Blend factor is the source colour */
+ IMG_BLEND_INV_SRC_COLOUR, /**< Blend factor is the inverted source colour
+ (i.e. 1-src_col) */
+ IMG_BLEND_SRC_ALPHA, /**< Blend factor is the source alpha */
+ IMG_BLEND_INV_SRC_ALPHA, /**< Blend factor is the inverted source alpha
+ (i.e. 1-src_alpha) */
+ IMG_BLEND_DEST_ALPHA, /**< Blend factor is the destination alpha */
+ IMG_BLEND_INV_DEST_ALPHA, /**< Blend factor is the inverted destination
+ alpha */
+ IMG_BLEND_DEST_COLOUR, /**< Blend factor is the destination colour */
+ IMG_BLEND_INV_DEST_COLOUR, /**< Blend factor is the inverted destination
+ colour */
+ IMG_BLEND_SRC_ALPHASAT, /**< Blend factor is the alpha saturation (the
+ minimum of (Src alpha,
+ 1 - destination alpha)) */
+ IMG_BLEND_BLEND_FACTOR, /**< Blend factor is a constant */
+ IMG_BLEND_INVBLEND_FACTOR, /**< Blend factor is a constant (inverted)*/
+ IMG_BLEND_SRC1_COLOUR, /**< Blend factor is the colour outputted from
+ the pixel shader */
+ IMG_BLEND_INV_SRC1_COLOUR, /**< Blend factor is the inverted colour
+ outputted from the pixel shader */
+ IMG_BLEND_SRC1_ALPHA, /**< Blend factor is the alpha outputted from
+ the pixel shader */
+ IMG_BLEND_INV_SRC1_ALPHA /**< Blend factor is the inverted alpha
+ outputted from the pixel shader */
+} IMG_BLEND;
+
+/**
+ * The arithmetic operation to perform when blending
+ */
+typedef enum _IMG_BLENDOP_
+{
+ IMG_BLENDOP_ADD = 0, /**< Result = (Source + Destination) */
+ IMG_BLENDOP_SUBTRACT, /**< Result = (Source - Destination) */
+ IMG_BLENDOP_REV_SUBTRACT, /**< Result = (Destination - Source) */
+ IMG_BLENDOP_MIN, /**< Result = min (Source, Destination) */
+ IMG_BLENDOP_MAX /**< Result = max (Source, Destination) */
+} IMG_BLENDOP;
+
+/**
+ * Logical operation to perform when logic ops are enabled
+ */
+typedef enum _IMG_LOGICOP_
+{
+ IMG_LOGICOP_CLEAR = 0, /**< Result = 0 */
+ IMG_LOGICOP_SET, /**< Result = -1 */
+ IMG_LOGICOP_COPY, /**< Result = Source */
+ IMG_LOGICOP_COPY_INVERTED, /**< Result = ~Source */
+ IMG_LOGICOP_NOOP, /**< Result = Destination */
+ IMG_LOGICOP_INVERT, /**< Result = ~Destination */
+ IMG_LOGICOP_AND, /**< Result = Source & Destination */
+ IMG_LOGICOP_NAND, /**< Result = ~(Source & Destination) */
+ IMG_LOGICOP_OR, /**< Result = Source | Destination */
+ IMG_LOGICOP_NOR, /**< Result = ~(Source | Destination) */
+ IMG_LOGICOP_XOR, /**< Result = Source ^ Destination */
+ IMG_LOGICOP_EQUIV, /**< Result = ~(Source ^ Destination) */
+ IMG_LOGICOP_AND_REVERSE, /**< Result = Source & ~Destination */
+ IMG_LOGICOP_AND_INVERTED, /**< Result = ~Source & Destination */
+ IMG_LOGICOP_OR_REVERSE, /**< Result = Source | ~Destination */
+ IMG_LOGICOP_OR_INVERTED /**< Result = ~Source | Destination */
+} IMG_LOGICOP;
+
+/**
+ * Type of fog blending supported
+ */
+typedef enum _IMG_FOGMODE_
+{
+ IMG_FOGMODE_NONE, /**< No fog blending - fog calculations are
+ * based on the value output from the vertex phase */
+ IMG_FOGMODE_LINEAR, /**< Linear interpolation */
+ IMG_FOGMODE_EXP, /**< Exponential */
+ IMG_FOGMODE_EXP2, /**< Exponential squaring */
+} IMG_FOGMODE;
+
+/**
+ * Types of filtering
+ */
+typedef enum _IMG_FILTER_
+{
+ IMG_FILTER_DONTCARE, /**< Any filtering mode is acceptable */
+ IMG_FILTER_POINT, /**< Point filtering */
+ IMG_FILTER_LINEAR, /**< Bi-linear filtering */
+ IMG_FILTER_BICUBIC, /**< Bi-cubic filtering */
+} IMG_FILTER;
+
+/**
+ * Addressing modes for textures
+ */
+typedef enum _IMG_ADDRESSMODE_
+{
+ IMG_ADDRESSMODE_REPEAT, /**< Texture repeats continuously */
+ IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */
+ IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */
+ IMG_ADDRESSMODE_FLIPCLAMP, /**< Flipped once, then clamp */
+ IMG_ADDRESSMODE_CLAMPBORDER,
+ IMG_ADDRESSMODE_OGL_CLAMP,
+ IMG_ADDRESSMODE_OVG_TILEFILL,
+ IMG_ADDRESSMODE_DONTCARE,
+} IMG_ADDRESSMODE;
+
+/**
+ * Culling based on winding order of triangle.
+ */
+typedef enum _IMG_CULLMODE_
+{
+ IMG_CULLMODE_NONE, /**< Don't cull */
+ IMG_CULLMODE_FRONTFACING, /**< Front facing triangles */
+ IMG_CULLMODE_BACKFACING, /**< Back facing triangles */
+} IMG_CULLMODE;
+
+
+/*! ************************************************************************//**
+@brief Specifies the MSAA resolve operation.
+*/ /**************************************************************************/
+typedef enum _IMG_RESOLVE_OP_
+{
+ IMG_RESOLVE_BLEND = 0, /*!< box filter on the samples */
+ IMG_RESOLVE_MIN = 1, /*!< minimum of the samples */
+ IMG_RESOLVE_MAX = 2, /*!< maximum of the samples */
+ IMG_RESOLVE_SAMPLE0 = 3, /*!< choose sample 0 */
+ IMG_RESOLVE_SAMPLE1 = 4, /*!< choose sample 1 */
+ IMG_RESOLVE_SAMPLE2 = 5, /*!< choose sample 2 */
+ IMG_RESOLVE_SAMPLE3 = 6, /*!< choose sample 3 */
+ IMG_RESOLVE_SAMPLE4 = 7, /*!< choose sample 4 */
+ IMG_RESOLVE_SAMPLE5 = 8, /*!< choose sample 5 */
+ IMG_RESOLVE_SAMPLE6 = 9, /*!< choose sample 6 */
+ IMG_RESOLVE_SAMPLE7 = 10, /*!< choose sample 7 */
+} IMG_RESOLVE_OP;
+
+
+#endif /* __IMG_3DTYPES_H__ */
+/******************************************************************************
+ End of file (img_3dtypes.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common header containing type definitions for portability
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Contains variable and structure definitions. Any platform
+ specific types should be defined in this file.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__IMG_DEFS_H__)
+#define __IMG_DEFS_H__
+
+#include <stddef.h>
+
+#include "img_types.h"
+
+#if defined (NO_INLINE_FUNCS)
+ #define INLINE
+ #define FORCE_INLINE
+#elif defined(INTEGRITY_OS)
+ #ifndef INLINE
+ #define INLINE
+ #endif
+ #define FORCE_INLINE static
+ #define INLINE_IS_PRAGMA
+#else
+#if defined (__cplusplus)
+ #define INLINE inline
+ #define FORCE_INLINE static inline
+#else
+#if !defined(INLINE)
+ #define INLINE __inline
+#endif
+#if (defined(UNDER_WDDM) || defined(WINDOWS_WDF)) && defined(_X86_)
+ #define FORCE_INLINE __forceinline
+#else
+ #define FORCE_INLINE static __inline
+#endif
+#endif
+#endif
+
+/* True if the GCC version is at least the given version. False for older
+ * versions of GCC, or other compilers.
+ */
+#define GCC_VERSION_AT_LEAST(major, minor) \
+ (defined(__GNUC__) && ( \
+ __GNUC__ > (major) || \
+ (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))))
+
+/* Ensure Clang's __has_extension macro is defined for all compilers so we
+ * can use it safely in preprocessor conditionals.
+ */
+#if !defined(__has_extension)
+#define __has_extension(e) 0
+#endif
+
+/* Use this in any file, or use attributes under GCC - see below */
+#ifndef PVR_UNREFERENCED_PARAMETER
+#define PVR_UNREFERENCED_PARAMETER(param) ((void)(param))
+#endif
+
+/* static_assert(condition, "message to print if it fails");
+ *
+ * Assert something at compile time. If the assertion fails, try to print
+ * the message, otherwise do nothing. static_assert is available if:
+ *
+ * - It's already defined as a macro (e.g. by <assert.h> in C11)
+ * - We're using MSVC which exposes static_assert unconditionally
+ * - We're using a C++ compiler that supports C++11
+ * - We're using GCC 4.6 and up in C mode (in which case it's available as
+ * _Static_assert)
+ *
+ * In all other cases, fall back to an equivalent that makes an invalid
+ * declaration.
+ */
+#if !defined(static_assert) && !defined(_MSC_VER) && \
+ (!defined(__cplusplus) || __cplusplus < 201103L)
+ /* static_assert isn't already available */
+ #if !defined(__cplusplus) && (GCC_VERSION_AT_LEAST(4, 6) || \
+ (defined(__clang__) && __has_extension(c_static_assert)))
+ #define static_assert _Static_assert
+ #else
+ #define static_assert(expr, message) \
+ extern int _static_assert_failed[2*!!(expr) - 1] __attribute__((unused))
+ #endif
+#else
+#if defined(CONFIG_L4)
+ /* Defined but not compatible with DDK usage
+ so undefine & ignore */
+ #undef static_assert
+ #define static_assert(expr, message)
+#endif
+#endif
+
+/*! Macro to calculate the n-byte aligned value from that supplied rounding up.
+ * n must be a power of two.
+ *
+ * Both arguments should be of a type with the same size otherwise the macro may
+ * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n.
+ */
+#define PVR_ALIGN(_x, _n) (((_x)+((_n)-1)) & ~((_n)-1))
+
+#if defined(_WIN32)
+
+#if defined(WINDOWS_WDF)
+
+ /*
+ * For WINDOWS_WDF drivers we don't want these defines to overwrite calling conventions propagated through the build system.
+ * This 'empty' choice helps to resolve all the calling conv issues.
+ *
+ */
+ #define IMG_CALLCONV
+ #define C_CALLCONV
+
+ #define IMG_INTERNAL
+ #define IMG_RESTRICT __restrict
+
+ /*
+ * The proper way of dll linking under MS compilers is made of two things:
+ * - decorate implementation with __declspec(dllexport)
+ * this decoration helps compiler with making the so called 'export library'
+ * - decorate forward-declaration (in a source dependent on a dll) with __declspec(dllimport),
+ * this decoration helps compiler with making faster and smaller code in terms of calling dll-imported functions
+ *
+ * Usually these decorations are performed by having a single macro define that expands to a proper __declspec()
+ * depending on the translation unit, dllexport inside the dll source and dllimport outside the dll source.
+ * Having IMG_EXPORT and IMG_IMPORT resolving to the same __declspec() makes no sense, but at least works.
+ */
+ #define IMG_IMPORT __declspec(dllexport)
+ #define IMG_EXPORT __declspec(dllexport)
+
+#else
+
+ #define IMG_CALLCONV __stdcall
+ #define IMG_INTERNAL
+ #define IMG_EXPORT __declspec(dllexport)
+ #define IMG_RESTRICT __restrict
+ #define C_CALLCONV __cdecl
+
+ /*
+ * IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations match.
+ * Some compilers require the header to be declared IMPORT, while the implementation is declared EXPORT
+ */
+ #define IMG_IMPORT IMG_EXPORT
+
+#endif
+
+#if defined(UNDER_WDDM)
+ #ifndef _INC_STDLIB
+ #if defined(__mips)
+ /* do nothing */
+ #elif defined(UNDER_MSBUILD)
+ _CRTIMP __declspec(noreturn) void __cdecl abort(void);
+ #else
+ _CRTIMP void __cdecl abort(void);
+ #endif
+ #endif
+#endif /* UNDER_WDDM */
+#else
+ #if defined(LINUX) || defined(__METAG) || defined(__QNXNTO__)
+
+ #define IMG_CALLCONV
+ #define C_CALLCONV
+ #if defined(__linux__) || defined(__QNXNTO__)
+ #define IMG_INTERNAL __attribute__((visibility("hidden")))
+ #else
+ #define IMG_INTERNAL
+ #endif
+ #define IMG_EXPORT __attribute__((visibility("default")))
+ #define IMG_IMPORT
+ #define IMG_RESTRICT __restrict__
+
+ #elif defined(INTEGRITY_OS)
+ #define IMG_CALLCONV
+ #define IMG_INTERNAL
+ #define IMG_EXPORT
+ #define IMG_RESTRICT
+ #define C_CALLCONV
+ #define __cdecl
+ /* IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations match.
+ * Some compilers require the header to be declared IMPORT, while the implementation is declared EXPORT
+ */
+ #define IMG_IMPORT IMG_EXPORT
+ #ifndef USE_CODE
+ #define IMG_ABORT() printf("IMG_ABORT was called.\n")
+
+ #endif
+ #else
+ #error("define an OS")
+ #endif
+#endif
+
+// Use default definition if not overridden
+#ifndef IMG_ABORT
+ #if defined(EXIT_ON_ABORT)
+ #define IMG_ABORT() exit(1)
+ #else
+ #define IMG_ABORT() abort()
+ #endif
+#endif
+
+/* The best way to suppress unused parameter warnings using GCC is to use a
+ * variable attribute. Place the __maybe_unused between the type and name of an
+ * unused parameter in a function parameter list, eg `int __maybe_unused var'. This
+ * should only be used in GCC build environments, for example, in files that
+ * compile only on Linux. Other files should use PVR_UNREFERENCED_PARAMETER */
+
+/* Kernel macros for compiler attributes */
+/* Note: param positions start at 1 */
+#if defined(LINUX) && defined(__KERNEL__)
+ #include <linux/compiler.h>
+#elif defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+ #define __must_check __attribute__((warn_unused_result))
+ #define __maybe_unused __attribute__((unused))
+ #define __malloc __attribute__((malloc))
+
+ /* Bionic's <sys/cdefs.h> might have defined these already */
+ #if !defined(__packed)
+ #define __packed __attribute__((packed))
+ #endif
+ #if !defined(__aligned)
+ #define __aligned(n) __attribute__((aligned(n)))
+ #endif
+
+ /* That one compiler that supports attributes but doesn't support
+ * the printf attribute... */
+ #if defined(__GNUC__)
+ #define __printf(fmt, va) __attribute__((format(printf, fmt, va)))
+ #else
+ #define __printf(fmt, va)
+ #endif /* defined(__GNUC__) */
+
+#else
+ /* Silently ignore those attributes */
+ #define __printf(fmt, va)
+ #define __packed
+ #define __aligned(n)
+ #define __must_check
+ #define __maybe_unused
+ #define __malloc
+#endif
+
+
+/* Other attributes, following the same style */
+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+ #define __param_nonnull(...) __attribute__((nonnull(__VA_ARGS__)))
+ #define __returns_nonnull __attribute__((returns_nonnull))
+#else
+ #define __param_nonnull(...)
+ #define __returns_nonnull
+#endif
+
+
+/* GCC builtins */
+#if defined(LINUX) && defined(__KERNEL__)
+ #include <linux/compiler.h>
+#elif defined(__GNUC__)
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+
+ /* Compiler memory barrier to prevent reordering */
+ #define barrier() __asm__ __volatile__("": : :"memory")
+#else
+ #define barrier() do { static_assert(0, "barrier() isn't supported by your compiler"); } while(0)
+#endif
+
+/* That one OS that defines one but not the other... */
+#ifndef likely
+ #define likely(x) (x)
+#endif
+#ifndef unlikely
+ #define unlikely(x) (x)
+#endif
+
+
+#if defined(__noreturn)
+ /* Already defined by the Kernel */
+#elif defined(_MSC_VER) || defined(CC_ARM)
+ #define __noreturn __declspec(noreturn)
+#elif defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+ #define __noreturn __attribute__((noreturn))
+#else
+ #define __noreturn
+#endif
+
+#ifndef MAX
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+
+/* Get a structures address from the address of a member */
+#define IMG_CONTAINER_OF(ptr, type, member) \
+ (type *) ((IMG_UINT8 *) (ptr) - offsetof(type, member))
+
+/* The number of elements in a fixed-sized array, IMGs ARRAY_SIZE macro */
+#define IMG_ARR_NUM_ELEMS(ARR) \
+ (sizeof(ARR) / sizeof((ARR)[0]))
+
+/* To guarantee that __func__ can be used, define it as a macro here if it
+ isn't already provided by the compiler. */
+#if defined(_MSC_VER)
+#define __func__ __FUNCTION__
+#endif
+
+#if defined(__cplusplus)
+/* C++ Specific:
+ * Disallow use of copy and assignment operator within a class.
+ * Should be placed under private. */
+#define IMG_DISALLOW_COPY_AND_ASSIGN(C) \
+ C(const C&); \
+ void operator=(const C&)
+#endif
+
+#if defined(SUPPORT_PVR_VALGRIND) && !defined(__METAG)
+ #include "/usr/include/valgrind/memcheck.h"
+
+ #define VG_MARK_INITIALIZED(pvData,ui32Size) VALGRIND_MAKE_MEM_DEFINED(pvData,ui32Size)
+ #define VG_MARK_NOACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_NOACCESS(pvData,ui32Size)
+ #define VG_MARK_ACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_UNDEFINED(pvData,ui32Size)
+#else
+ #if defined(_MSC_VER)
+ # define PVR_MSC_SUPPRESS_4127 __pragma(warning(suppress:4127))
+ #else
+ # define PVR_MSC_SUPPRESS_4127
+ #endif
+
+ #define VG_MARK_INITIALIZED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+ #define VG_MARK_NOACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+ #define VG_MARK_ACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+#endif
+
+#define _STRINGIFY(x) # x
+#define IMG_STRINGIFY(x) _STRINGIFY(x)
+
+#if defined(INTEGRITY_OS)
+ /* Definitions not present in INTEGRITY. */
+ #define PATH_MAX 200
+#endif
+
+#if defined (__clang__) || defined (__GNUC__)
+ /* __SIZEOF_POINTER__ is defined already by these compilers */
+#elif defined (INTEGRITY_OS)
+ #if defined (__Ptr_Is_64)
+ #define __SIZEOF_POINTER__ 8
+ #else
+ #define __SIZEOF_POINTER__ 4
+ #endif
+#elif defined(_WIN32)
+ #define __SIZEOF_POINTER__ sizeof(char *)
+#else
+ #warning Unknown OS - using default method to determine whether CPU arch is 64-bit.
+ #define __SIZEOF_POINTER__ sizeof(char *)
+#endif
+
+/* RDI8567: clang/llvm load/store optimisations cause issues with device
+ * memory allocations. Some pointers are made 'volatile' to prevent
+ * this optimisations being applied to writes through that particular pointer.
+ */
+#if defined(__clang__) && defined(__aarch64__)
+#define NOLDSTOPT volatile
+/* after applying 'volatile' to a pointer, we may need to cast it to 'void *'
+ * to keep it compatible with its existing uses
+ */
+#define NOLDSTOPT_VOID (void *)
+#else
+#define NOLDSTOPT
+#define NOLDSTOPT_VOID
+#endif
+
+#endif /* #if !defined (__IMG_DEFS_H__) */
+/*****************************************************************************
+ End of file (IMG_DEFS.H)
+*****************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Global types for use by IMG APIs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines type aliases for use by IMG APIs.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __IMG_TYPES_H__
+#define __IMG_TYPES_H__
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* To use C99 types and definitions, there are two special cases we need to
+ * cater for:
+ *
+ * - Visual Studio: in VS2010 or later, some standard headers are available,
+ * and MSVC has its own built-in sized types. We can define the C99 types
+ * in terms of these.
+ *
+ * - Linux kernel code: C99 sized types are defined in <linux/types.h>, but
+ * some other features (like macros for constants or printf format
+ * strings) are missing, so we need to fill in the gaps ourselves.
+ *
+ * For other cases (userspace code under Linux, Android or Neutrino, or
+ * firmware code), we can include the standard headers.
+ */
+#if defined(_MSC_VER)
+ #include "msvc_types.h"
+#elif defined(LINUX) && defined(__KERNEL__)
+ #include <linux/types.h>
+ #include "kernel_types.h"
+#elif defined(LINUX) || defined(__METAG) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+ #include <stddef.h> /* NULL */
+ #include <inttypes.h> /* intX_t/uintX_t, format specifiers */
+ #include <limits.h> /* INT_MIN, etc */
+#elif defined(__mips)
+ #include <stddef.h> /* NULL */
+ #include <inttypes.h> /* intX_t/uintX_t, format specifiers */
+#else
+ #error C99 support not set up for this build
+#endif
+
+typedef unsigned int IMG_UINT, *IMG_PUINT;
+typedef int IMG_INT, *IMG_PINT;
+
+typedef uint8_t IMG_UINT8, *IMG_PUINT8;
+typedef uint8_t IMG_BYTE, *IMG_PBYTE;
+typedef int8_t IMG_INT8, *IMG_PINT8;
+typedef char IMG_CHAR, *IMG_PCHAR;
+
+typedef uint16_t IMG_UINT16, *IMG_PUINT16;
+typedef int16_t IMG_INT16, *IMG_PINT16;
+typedef uint32_t IMG_UINT32, *IMG_PUINT32;
+typedef int32_t IMG_INT32, *IMG_PINT32;
+
+typedef uint64_t IMG_UINT64, *IMG_PUINT64;
+typedef int64_t IMG_INT64, *IMG_PINT64;
+#define IMG_INT64_C(c) INT64_C(c)
+#define IMG_UINT64_C(c) UINT64_C(c)
+#define IMG_UINT64_FMTSPEC PRIu64
+#define IMG_UINT64_FMTSPECX PRIX64
+#define IMG_UINT64_FMTSPECx PRIx64
+#define IMG_UINT64_FMTSPECo PRIo64
+#define IMG_INT64_FMTSPECd PRId64
+
+#define IMG_UINT16_MAX UINT16_MAX
+#define IMG_UINT32_MAX UINT32_MAX
+#define IMG_UINT64_MAX UINT64_MAX
+
+#define IMG_INT16_MAX INT16_MAX
+#define IMG_INT32_MAX INT32_MAX
+#define IMG_INT64_MAX INT64_MAX
+
+/* Linux kernel mode does not use floating point */
+typedef float IMG_FLOAT, *IMG_PFLOAT;
+typedef double IMG_DOUBLE, *IMG_PDOUBLE;
+
+typedef union _IMG_UINT32_FLOAT_
+{
+ IMG_UINT32 ui32;
+ IMG_FLOAT f;
+} IMG_UINT32_FLOAT;
+
+typedef int IMG_SECURE_TYPE;
+
+typedef enum tag_img_bool
+{
+ IMG_FALSE = 0,
+ IMG_TRUE = 1,
+ IMG_FORCE_ALIGN = 0x7FFFFFFF
+} IMG_BOOL, *IMG_PBOOL;
+
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+typedef void IMG_VOID, *IMG_PVOID;
+
+typedef uintptr_t IMG_UINTPTR_T;
+typedef size_t IMG_SIZE_T;
+
+#define IMG_SIZE_T_MAX SIZE_MAX
+#define IMG_NULL NULL
+
+typedef IMG_CHAR const* IMG_PCCHAR;
+#endif
+
+#if defined(_MSC_VER)
+#define IMG_SIZE_FMTSPEC "%Iu"
+#define IMG_SIZE_FMTSPECX "%Ix"
+#else
+#define IMG_SIZE_FMTSPEC "%zu"
+#define IMG_SIZE_FMTSPECX "%zx"
+#endif
+
+#if defined(LINUX) && defined(__KERNEL__)
+/* prints the function name when used with printk */
+#define IMG_PFN_FMTSPEC "%pf"
+#else
+#define IMG_PFN_FMTSPEC "%p"
+#endif
+
+typedef void *IMG_HANDLE;
+
+/* services/stream ID */
+typedef IMG_UINT64 IMG_SID;
+
+/* Process IDs */
+typedef IMG_UINT32 IMG_PID;
+
+/* OS connection type */
+typedef int IMG_OS_CONNECTION;
+
+
+/*
+ * Address types.
+ * All types used to refer to a block of memory are wrapped in structures
+ * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot
+ * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the
+ * same thing.
+ *
+ * There is an assumption that the system contains at most one non-cpu mmu,
+ * and a memory block is only mapped by the MMU once.
+ *
+ * Different devices could have offset views of the physical address space.
+ *
+ */
+
+
+/*
+ *
+ * +------------+ +------------+ +------------+ +------------+
+ * | CPU | | DEV | | DEV | | DEV |
+ * +------------+ +------------+ +------------+ +------------+
+ * | | | |
+ * | void * |IMG_DEV_VIRTADDR |IMG_DEV_VIRTADDR |
+ * | \-------------------/ |
+ * | | |
+ * +------------+ +------------+ |
+ * | MMU | | MMU | |
+ * +------------+ +------------+ |
+ * | | |
+ * | | |
+ * | | |
+ * +--------+ +---------+ +--------+
+ * | Offset | | (Offset)| | Offset |
+ * +--------+ +---------+ +--------+
+ * | | IMG_DEV_PHYADDR |
+ * | | |
+ * | | IMG_DEV_PHYADDR |
+ * +---------------------------------------------------------------------+
+ * | System Address bus |
+ * +---------------------------------------------------------------------+
+ *
+ */
+
+#define IMG_DEV_VIRTADDR_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_SIZE_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+
+/* cpu physical address */
+typedef struct _IMG_CPU_PHYADDR
+{
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+ uintptr_t uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (uintptr_t)(var)
+#elif defined(LINUX) && defined(__KERNEL__)
+ phys_addr_t uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (phys_addr_t)(var)
+#else
+ IMG_UINT64 uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (IMG_UINT64)(var)
+#endif
+} IMG_CPU_PHYADDR;
+
+/* device physical address */
+typedef struct _IMG_DEV_PHYADDR
+{
+ IMG_UINT64 uiAddr;
+} IMG_DEV_PHYADDR;
+
+/* system physical address */
+typedef struct _IMG_SYS_PHYADDR
+{
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+ uintptr_t uiAddr;
+#else
+ IMG_UINT64 uiAddr;
+#endif
+} IMG_SYS_PHYADDR;
+
+/* 32-bit device virtual address (e.g. MSVDX) */
+typedef struct _IMG_DEV_VIRTADDR32
+{
+ IMG_UINT32 uiAddr;
+#define IMG_CAST_TO_DEVVADDR_UINT32(var) (IMG_UINT32)(var)
+} IMG_DEV_VIRTADDR32;
+
+/*
+ rectangle structure
+*/
+typedef struct _IMG_RECT_
+{
+ IMG_INT32 x0;
+ IMG_INT32 y0;
+ IMG_INT32 x1;
+ IMG_INT32 y1;
+}IMG_RECT, *PIMG_RECT;
+
+typedef struct _IMG_RECT_16_
+{
+ IMG_INT16 x0;
+ IMG_INT16 y0;
+ IMG_INT16 x1;
+ IMG_INT16 y1;
+}IMG_RECT_16, *PIMG_RECT_16;
+
+typedef struct _IMG_RECT_32_
+{
+ IMG_FLOAT x0;
+ IMG_FLOAT y0;
+ IMG_FLOAT x1;
+ IMG_FLOAT y1;
+} IMG_RECT_F32, *PIMG_RECT_F32;
+
+/*
+ * box structure
+ */
+typedef struct _IMG_BOX_
+{
+ IMG_INT32 x0;
+ IMG_INT32 y0;
+ IMG_INT32 z0;
+ IMG_INT32 x1;
+ IMG_INT32 y1;
+ IMG_INT32 z1;
+} IMG_BOX, *PIMG_BOX;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#include "img_defs.h"
+
+#endif /* __IMG_TYPES_H__ */
+/******************************************************************************
+ End of file (img_types.h)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/interrupt.h>
+
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "interrupt_support.h"
+
+typedef struct LISR_DATA_TAG
+{
+ IMG_UINT32 ui32IRQ;
+ PFN_SYS_LISR pfnLISR;
+ void *pvData;
+} LISR_DATA;
+
+static irqreturn_t SystemISRWrapper(int irq, void *dev_id)
+{
+ LISR_DATA *psLISRData = (LISR_DATA *)dev_id;
+
+ PVR_UNREFERENCED_PARAMETER(irq);
+
+ if (psLISRData)
+ {
+ if (psLISRData->pfnLISR(psLISRData->pvData))
+ {
+ return IRQ_HANDLED;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Missing interrupt data", __FUNCTION__));
+ }
+
+ return IRQ_NONE;
+}
+
+PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszDevName,
+ PFN_SYS_LISR pfnLISR,
+ void *pvData,
+ IMG_UINT32 ui32Flags)
+{
+ LISR_DATA *psLISRData;
+ unsigned long ulIRQFlags = 0;
+
+ if (pfnLISR == NULL || pvData == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32Flags & ~SYS_IRQ_FLAG_MASK)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ switch (ui32Flags & SYS_IRQ_FLAG_TRIGGER_MASK)
+ {
+ case SYS_IRQ_FLAG_TRIGGER_DEFAULT:
+ break;
+ case SYS_IRQ_FLAG_TRIGGER_LOW:
+ ulIRQFlags |= IRQF_TRIGGER_LOW;
+ break;
+ case SYS_IRQ_FLAG_TRIGGER_HIGH:
+ ulIRQFlags |= IRQF_TRIGGER_HIGH;
+ break;
+ default:
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32Flags & SYS_IRQ_FLAG_SHARED)
+ {
+ ulIRQFlags |= IRQF_SHARED;
+ }
+
+ psLISRData = OSAllocMem(sizeof *psLISRData);
+ if (psLISRData == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psLISRData->ui32IRQ = ui32IRQ;
+ psLISRData->pfnLISR = pfnLISR;
+ psLISRData->pvData = pvData;
+
+ if (request_irq(ui32IRQ, SystemISRWrapper, ulIRQFlags, pszDevName, psLISRData))
+ {
+ OSFreeMem(psLISRData);
+
+ return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER;
+ }
+
+ *phLISR = (IMG_HANDLE)psLISRData;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISR)
+{
+ LISR_DATA *psLISRData = (LISR_DATA *)hLISR;
+
+ if (psLISRData == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ free_irq(psLISRData->ui32IRQ, psLISRData);
+
+ OSFreeMem(psLISRData);
+
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__INTERRUPT_SUPPORT_H__)
+#define __INTERRUPT_SUPPORT_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_device.h"
+
+#define SYS_IRQ_FLAG_TRIGGER_DEFAULT (0x0 << 0)
+#define SYS_IRQ_FLAG_TRIGGER_LOW (0x1 << 0)
+#define SYS_IRQ_FLAG_TRIGGER_HIGH (0x2 << 0)
+#define SYS_IRQ_FLAG_TRIGGER_MASK (SYS_IRQ_FLAG_TRIGGER_DEFAULT | \
+ SYS_IRQ_FLAG_TRIGGER_LOW | \
+ SYS_IRQ_FLAG_TRIGGER_HIGH)
+#define SYS_IRQ_FLAG_SHARED (0x1 << 8)
+
+#define SYS_IRQ_FLAG_MASK (SYS_IRQ_FLAG_TRIGGER_MASK | \
+ SYS_IRQ_FLAG_SHARED)
+
+typedef IMG_BOOL (*PFN_SYS_LISR)(void *pvData);
+
+typedef struct _SYS_INTERRUPT_DATA_
+{
+ void *psSysData;
+ const IMG_CHAR *pszName;
+ PFN_SYS_LISR pfnLISR;
+ void *pvData;
+ IMG_UINT32 ui32InterruptFlag;
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ IMG_UINT32 ui32IRQ;
+#endif
+} SYS_INTERRUPT_DATA;
+
+/*************************************************************************/ /*!
+@Function OSInstallSystemLISR
+@Description Installs a system low-level interrupt handler
+@Output phLISR On return, contains a handle to the
+ installed LISR
+@Input ui32IRQ The IRQ number for which the
+ interrupt handler should be installed
+@Input pszDevName Name of the device for which the handler
+ is being installed
+@Input pfnLISR A pointer to an interrupt handler
+ function
+@Input pvData A pointer to data that should be passed
+ to pfnLISR when it is called
+@Input ui32Flags Interrupt flags
+@Return PVRSRV_OK on success, a failure code otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszDevName,
+ PFN_SYS_LISR pfnLISR,
+ void *pvData,
+ IMG_UINT32 ui32Flags);
+
+/*************************************************************************/ /*!
+@Function OSUninstallSystemLISR
+@Description Uninstalls a system low-level interrupt handler
+@Input hLISRData The handle to the LISR to uninstall
+@Return PVRSRV_OK on success, a failure code otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISRData);
+#endif /* !defined(__INTERRUPT_SUPPORT_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(_WIN32)
+#pragma warning(disable:4201)
+#pragma warning(disable:4214)
+#pragma warning(disable:4115)
+#pragma warning(disable:4514)
+
+#include <ntddk.h>
+#include <windef.h>
+
+#endif /* _WIN32 */
+
+#ifdef LINUX
+#include <asm/uaccess.h>
+#include "pvr_uaccess.h"
+#endif /* LINUX */
+
+#include "img_types.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "dbgdriv_ioctl.h"
+#include "hostfunc.h"
+
+#ifdef _WIN32
+#pragma warning(default:4214)
+#pragma warning(default:4115)
+#endif /* _WIN32 */
+
+/*****************************************************************************
+ Code
+*****************************************************************************/
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivGetServiceTable
+
+ PURPOSE :
+
+ PARAMETERS :
+
+ RETURNS :
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetServiceTable(void * pvInBuffer, void * pvOutBuffer, IMG_BOOL bCompat)
+{
+ void **ppvOut;
+
+ PVR_UNREFERENCED_PARAMETER(pvInBuffer);
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+ ppvOut = (void **) pvOutBuffer;
+
+ *ppvOut = DBGDrivGetServiceTable();
+
+ return(IMG_TRUE);
+}
+
+#if defined(__QNXNTO__)
+/*****************************************************************************
+ FUNCTION : DBGIODrivCreateStream
+
+ PURPOSE :
+
+ PARAMETERS :
+
+ RETURNS :
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivCreateStream(void * pvInBuffer, void * pvOutBuffer, IMG_BOOL bCompat)
+{
+ PDBG_IN_CREATESTREAM psIn;
+ PDBG_OUT_CREATESTREAM psOut;
+
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ psIn = (PDBG_IN_CREATESTREAM) pvInBuffer;
+ psOut = (PDBG_OUT_CREATESTREAM) pvOutBuffer;
+
+ return (ExtDBGDrivCreateStream(psIn->u.pszName, DEBUG_FLAGS_NO_BUF_EXPANDSION, psIn->ui32Pages, &psOut->phInit, &psOut->phMain, &psOut->phDeinit));
+}
+#endif
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivGetStream
+
+ PURPOSE :
+
+ PARAMETERS :
+
+ RETURNS :
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetStream(void * pvInBuffer, void * pvOutBuffer, IMG_BOOL bCompat)
+{
+ PDBG_IN_FINDSTREAM psParams;
+ IMG_SID * phStream;
+
+ psParams = (PDBG_IN_FINDSTREAM)pvInBuffer;
+ phStream = (IMG_SID *)pvOutBuffer;
+
+ /* Ensure that the name will be NULL terminated */
+ psParams->pszName[DEBUG_STREAM_NAME_MAX-1] = '\0';
+
+ *phStream = PStream2SID(ExtDBGDrivFindStream(psParams->pszName, psParams->bResetStream));
+
+ return(IMG_TRUE);
+}
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivRead
+
+ PURPOSE :
+
+ PARAMETERS :
+
+ RETURNS :
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivRead(void * pvInBuffer, void * pvOutBuffer, IMG_BOOL bCompat)
+{
+ IMG_UINT32 * pui32BytesCopied;
+ PDBG_IN_READ psInParams;
+ PDBG_STREAM psStream;
+ IMG_UINT8 *pui8ReadBuffer;
+
+ psInParams = (PDBG_IN_READ) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+ pui8ReadBuffer = WIDEPTR_GET_PTR(psInParams->pui8OutBuffer, bCompat);
+
+ psStream = SID2PStream(psInParams->hStream);
+
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivRead(psStream,
+ psInParams->ui32BufID,
+ psInParams->ui32OutBufferSize,
+ pui8ReadBuffer);
+ return(IMG_TRUE);
+ }
+ else
+ {
+ /* invalid SID */
+ *pui32BytesCopied = 0;
+ return(IMG_FALSE);
+ }
+}
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivSetMarker
+
+ PURPOSE : Sets the marker in the stream to split output files
+
+ PARAMETERS : pvInBuffer, pvOutBuffer
+
+ RETURNS : success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivSetMarker(void * pvInBuffer, void * pvOutBuffer, IMG_BOOL bCompat)
+{
+ PDBG_IN_SETMARKER psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETMARKER) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ ExtDBGDrivSetMarker(psStream, psParams->ui32Marker);
+ return(IMG_TRUE);
+ }
+ else
+ {
+ /* invalid SID */
+ return(IMG_FALSE);
+ }
+}
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivGetMarker
+
+ PURPOSE : Gets the marker in the stream to split output files
+
+ PARAMETERS : pvInBuffer, pvOutBuffer
+
+ RETURNS : success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetMarker(void * pvInBuffer, void * pvOutBuffer, IMG_BOOL bCompat)
+{
+ PDBG_STREAM psStream;
+ IMG_UINT32 *pui32Current;
+
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(*(IMG_SID *)pvInBuffer);
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ *pui32Current = ExtDBGDrivGetMarker(psStream);
+ return(IMG_TRUE);
+ }
+ else
+ {
+ /* invalid SID */
+ *pui32Current = 0;
+ return(IMG_FALSE);
+ }
+}
+
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivWaitForEvent
+
+ PURPOSE :
+
+ PARAMETERS :
+
+ RETURNS :
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivWaitForEvent(void * pvInBuffer, void * pvOutBuffer, IMG_BOOL bCompat)
+{
+ DBG_EVENT eEvent = (DBG_EVENT)(*(IMG_UINT32 *)pvInBuffer);
+
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ ExtDBGDrivWaitForEvent(eEvent);
+
+ return(IMG_TRUE);
+}
+
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivGetFrame
+
+ PURPOSE : Gets the marker in the stream to split output files
+
+ PARAMETERS : pvInBuffer, pvOutBuffer
+
+ RETURNS : success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetFrame(void * pvInBuffer, void * pvOutBuffer, IMG_BOOL bCompat)
+{
+ IMG_UINT32 *pui32Current;
+
+ PVR_UNREFERENCED_PARAMETER(pvInBuffer);
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+ *pui32Current = ExtDBGDrivGetFrame();
+
+ return(IMG_TRUE);
+}
+
+/*
+ ioctl interface jump table.
+ Accessed from the UM debug driver client
+*/
+IMG_UINT32 (*g_DBGDrivProc[DEBUG_SERVICE_MAX_API])(void *, void *, IMG_BOOL) =
+{
+ DBGDIOCDrivGetServiceTable, /* WDDM only for KMD to retrieve address from DBGDRV, Not used by umdbgdrvlnx */
+ DBGDIOCDrivGetStream,
+ DBGDIOCDrivRead,
+ DBGDIOCDrivSetMarker,
+ DBGDIOCDrivGetMarker,
+ DBGDIOCDrivWaitForEvent,
+ DBGDIOCDrivGetFrame,
+#if defined(__QNXNTO__)
+ DBGDIOCDrivCreateStream
+#endif
+};
+
+/*****************************************************************************
+ End of file (IOCTL.C)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File ion_support.h
+@Title Generic Ion support header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This file defines the API for generic Ion support.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR IonInit(void *pvPrivateData);
+
+void IonDeinit(void);
--- /dev/null
+/*************************************************************************/ /*!
+@File ion_sys.c
+@Title System level interface for Ion
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This file defined the API between services and system layer
+ required for Ion integration.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _ION_SYS_H_
+#define _ION_SYS_H_
+
+#include "pvrsrv_error.h"
+#include PVR_ANDROID_ION_HEADER
+
+
+PVRSRV_ERROR IonInit(void *phPrivateData);
+
+struct ion_device *IonDevAcquire(void);
+
+void IonDevRelease(struct ion_device *psIonDev);
+
+void IonDeinit(void);
+
+#endif /* _ION_SYS_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File services/server/env/linux/kernel_compatibility.h
+@Title Kernel versions compatibility macros
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Per-version macros to allow code to seamlessly use older kernel
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KERNEL_COMPATIBILITY_H__
+#define __KERNEL_COMPATIBILITY_H__
+
+#include <linux/version.h>
+
+/*
+ * Stop supporting an old kernel? Remove the top block.
+ * New incompatible kernel? Append a new block at the bottom.
+ *
+ * Please write you version test as `VERSION < X.Y`, and use the earliest
+ * possible version :)
+ */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+
+/* Linux 3.7 split VM_RESERVED into VM_DONTDUMP and VM_DONTEXPAND */
+#define VM_DONTDUMP VM_RESERVED
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) */
+
+/*
+ * Note: this fix had to be written backwards because get_unused_fd_flags
+ * was already defined but not exported on kernels < 3.7
+ *
+ * When removing support for kernels < 3.7, this block should be removed
+ * and all `get_unused_fd()` should be manually replaced with
+ * `get_unused_fd_flags(0)`
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+
+/* Linux 3.19 removed get_unused_fd() */
+/* get_unused_fd_flags was introduced in 3.7 */
+#define get_unused_fd() get_unused_fd_flags(0)
+
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+
+/* Linux 3.12 introduced a new shrinker API */
+#define SHRINK_STOP (~0UL)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) && defined(CONFIG_ARM)
+
+/* Linux 3.13 renamed ioremap_cached to ioremap_cache */
+#define ioremap_cache(cookie,size) ioremap_cached(cookie,size)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) && defined(CONFIG_ARM) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+
+/* Linux 3.17 changed the 3rd argument from a `struct page ***pages` to
+ * `struct page **pages` */
+#define map_vm_area(area, prot, pages) map_vm_area(area, prot, &pages)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
+
+/*
+ * Linux 4.7 removed this function but its replacement was available since 3.19.
+ */
+#define drm_crtc_send_vblank_event(crtc, e) drm_send_vblank_event((crtc)->dev, drm_crtc_index(crtc), e)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+
+/* Linux 4.4 renamed GFP_WAIT to GFP_RECLAIM */
+#define __GFP_RECLAIM __GFP_WAIT
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
+
+/* Linux 4.5 added a new printf-style parameter for debug messages */
+
+#define drm_encoder_init(dev, encoder, funcs, encoder_type, name, ...) \
+ drm_encoder_init(dev, encoder, funcs, encoder_type)
+
+#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, name, ...) \
+ drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type)
+
+#define drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, name, ...) \
+ drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
+
+/*
+ * Linux 4.6 removed the start and end arguments as it now always maps
+ * the entire DMA-BUF.
+ * Additionally, dma_buf_end_cpu_access() now returns an int error.
+ */
+#define dma_buf_begin_cpu_access(DMABUF, DIRECTION) dma_buf_begin_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION)
+#define dma_buf_end_cpu_access(DMABUF, DIRECTION) ({ dma_buf_end_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION); 0; })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
+
+/* Linux 4.7 removed the first arguments as it's never been used */
+#define drm_gem_object_lookup(filp, handle) drm_gem_object_lookup((filp)->minor->dev, filp, handle)
+
+/* Linux 4.7 replaced nla_put_u64 with nla_put_u64_64bit */
+#define nla_put_u64_64bit(skb, attrtype, value, padattr) nla_put_u64(skb, attrtype, value)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
+
+/* Linux 4.9 change the second argument to a drm_file pointer */
+#define drm_vma_node_is_allowed(node, file_priv) drm_vma_node_is_allowed(node, (file_priv)->filp)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) */
+
+#if !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
+#define dev_pm_opp_of_add_table of_init_opp_table
+#define dev_pm_opp_of_remove_table of_free_opp_table
+#endif
+
+
+#endif /* __KERNEL_COMPATIBILITY_H__ */
+/*****************************************************************************
+ End of file (kernel_compatibility.h)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@Title C99-compatible types and definitions for Linux kernel code
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+
+/* Limits of specified-width integer types */
+
+/* S8_MIN, etc were added in kernel version 3.14. The other versions are for
+ * earlier kernels. They can be removed once older kernels don't need to be
+ * supported.
+ */
+#ifdef S8_MIN
+ #define INT8_MIN S8_MIN
+#else
+ #define INT8_MIN (-128)
+#endif
+
+#ifdef S8_MAX
+ #define INT8_MAX S8_MAX
+#else
+ #define INT8_MAX 127
+#endif
+
+#ifdef U8_MAX
+ #define UINT8_MAX U8_MAX
+#else
+ #define UINT8_MAX 0xFF
+#endif
+
+#ifdef S16_MIN
+ #define INT16_MIN S16_MIN
+#else
+ #define INT16_MIN (-32768)
+#endif
+
+#ifdef S16_MAX
+ #define INT16_MAX S16_MAX
+#else
+ #define INT16_MAX 32767
+#endif
+
+#ifdef U16_MAX
+ #define UINT16_MAX U16_MAX
+#else
+ #define UINT16_MAX 0xFFFF
+#endif
+
+#ifdef S32_MIN
+ #define INT32_MIN S32_MIN
+#else
+ #define INT32_MIN (-2147483647 - 1)
+#endif
+
+#ifdef S32_MAX
+ #define INT32_MAX S32_MAX
+#else
+ #define INT32_MAX 2147483647
+#endif
+
+#ifdef U32_MAX
+ #define UINT32_MAX U32_MAX
+#else
+ #define UINT32_MAX 0xFFFFFFFF
+#endif
+
+#ifdef S64_MIN
+ #define INT64_MIN S64_MIN
+#else
+ #define INT64_MIN (-9223372036854775807LL)
+#endif
+
+#ifdef S64_MAX
+ #define INT64_MAX S64_MAX
+#else
+ #define INT64_MAX 9223372036854775807LL
+#endif
+
+#ifdef U64_MAX
+ #define UINT64_MAX U64_MAX
+#else
+ #define UINT64_MAX 0xFFFFFFFFFFFFFFFFULL
+#endif
+
+/* Macros for integer constants */
+#define INT8_C S8_C
+#define UINT8_C U8_C
+#define INT16_C S16_C
+#define UINT16_C U16_C
+#define INT32_C S32_C
+#define UINT32_C U32_C
+#define INT64_C S64_C
+#define UINT64_C U64_C
+
+/* Format conversion of integer types <inttypes.h> */
+/* Only define PRIX64 for the moment, as this is the only format macro that
+ * img_types.h needs.
+ */
+#define PRIX64 "llX"
--- /dev/null
+/*************************************************************************/ /*!
+@File km_apphint.c
+@Title Apphint routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+
+#include "pvr_debugfs.h"
+#include "pvr_uaccess.h"
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <stdbool.h>
+
+/* for action device access */
+#include "pvrsrv.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "debugmisc_server.h"
+#include "htbserver.h"
+#include "rgxutils.h"
+#include "rgxapi_km.h"
+
+#include "img_defs.h"
+
+/* defines for default values */
+#include "rgx_fwif.h"
+#include "htbuffer_types.h"
+
+#include "pvr_notifier.h"
+
+#include "km_apphint_defs.h"
+#include "km_apphint.h"
+
+#if defined(PDUMP)
+#include <stdarg.h>
+#include "pdump_km.h"
+#endif
+
+/* Size of temporary buffers used to read and write AppHint data.
+ * Must be large enough to contain any strings read/written
+ * but no larger than 4096 with is the buffer size for the
+ * kernel_param_ops .get function.
+ * And less than 1024 to keep the stack frame size within bounds.
+ */
+#define APPHINT_BUFFER_SIZE 512
+
+#define APPHINT_DEVICES_MAX 16
+
+/*
+*******************************************************************************
+ * AppHint mnemonic data type helper tables
+******************************************************************************/
+struct apphint_lookup {
+ char *name;
+ int value;
+};
+
+static const struct apphint_lookup fwt_logtype_tbl[] = {
+ { "trace", 2},
+ { "tbi", 1},
+ { "none", 0}
+};
+
+static const struct apphint_lookup fwt_loggroup_tbl[] = {
+ RGXFWIF_LOG_GROUP_NAME_VALUE_MAP
+};
+
+static const struct apphint_lookup htb_loggroup_tbl[] = {
+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) },
+ HTB_LOG_SFGROUPLIST
+#undef X
+};
+
+static const struct apphint_lookup htb_opmode_tbl[] = {
+ { "droplatest", HTB_OPMODE_DROPLATEST},
+ { "dropoldest", HTB_OPMODE_DROPOLDEST},
+ { "block", HTB_OPMODE_BLOCK}
+};
+
+__maybe_unused
+static const struct apphint_lookup htb_logmode_tbl[] = {
+ { "all", HTB_LOGMODE_ALLPID},
+ { "restricted", HTB_LOGMODE_RESTRICTEDPID}
+};
+
+static const struct apphint_lookup timecorr_clk_tbl[] = {
+ { "mono", 0 },
+ { "mono_raw", 1 },
+ { "sched", 2 }
+};
+
+/*
+*******************************************************************************
+ Data types
+******************************************************************************/
+union apphint_value {
+ IMG_UINT64 UINT64;
+ IMG_UINT32 UINT32;
+ IMG_BOOL BOOL;
+ IMG_CHAR *STRING;
+};
+
+struct apphint_action {
+ union {
+ PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value);
+ PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value);
+ PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value);
+ PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value);
+ } query;
+ union {
+ PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value);
+ PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value);
+ PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value);
+ PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value);
+ } set;
+ const PVRSRV_DEVICE_NODE *device;
+ const void *private_data;
+ union apphint_value stored;
+ bool free;
+};
+
+struct apphint_param {
+ IMG_UINT32 id;
+ APPHINT_DATA_TYPE data_type;
+ const void *data_type_helper;
+ IMG_UINT32 helper_size;
+};
+
+struct apphint_init_data {
+ IMG_UINT32 id; /* index into AppHint Table */
+ APPHINT_CLASS class;
+ IMG_CHAR *name;
+ union apphint_value default_value;
+};
+
+struct apphint_class_state {
+ APPHINT_CLASS class;
+ IMG_BOOL enabled;
+};
+
+struct apphint_work {
+ struct work_struct work;
+ union apphint_value new_value;
+ struct apphint_action *action;
+};
+
+/*
+*******************************************************************************
+ Initialization / configuration table data
+******************************************************************************/
+#define UINT32Bitfield UINT32
+#define UINT32List UINT32
+
+static const struct apphint_init_data init_data_buildvar[] = {
+#define X(a, b, c, d, e) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+ APPHINT_LIST_BUILDVAR
+#undef X
+};
+
+static const struct apphint_init_data init_data_modparam[] = {
+#define X(a, b, c, d, e) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+ APPHINT_LIST_MODPARAM
+#undef X
+};
+
+static const struct apphint_init_data init_data_debugfs[] = {
+#define X(a, b, c, d, e) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+ APPHINT_LIST_DEBUGFS
+#undef X
+};
+
+static const struct apphint_init_data init_data_debugfs_device[] = {
+#define X(a, b, c, d, e) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+ APPHINT_LIST_DEBUGFS_DEVICE
+#undef X
+};
+
+#undef UINT32Bitfield
+#undef UINT32List
+
+/* Don't use the kernel ARRAY_SIZE macro here because it checks
+ * __must_be_array() and we need to be able to use this safely on a NULL ptr.
+ * This will return an undefined size for a NULL ptr - so should only be
+ * used here.
+ */
+#define APPHINT_HELP_ARRAY_SIZE(a) (sizeof((a))/(sizeof((a[0]))))
+
+static const struct apphint_param param_lookup[] = {
+#define X(a, b, c, d, e) \
+ {APPHINT_ID_ ## a, APPHINT_DATA_TYPE_ ## b, e, APPHINT_HELP_ARRAY_SIZE(e) },
+ APPHINT_LIST_ALL
+#undef X
+};
+
+#undef APPHINT_HELP_ARRAY_SIZE
+
+static const struct apphint_class_state class_state[] = {
+#define X(a) {APPHINT_CLASS_ ## a, APPHINT_ENABLED_CLASS_ ## a},
+ APPHINT_CLASS_LIST
+#undef X
+};
+
+/*
+*******************************************************************************
+ Global state
+******************************************************************************/
+/* If the union apphint_value becomes such that it is not possible to read
+ * and write atomically, a mutex may be desirable to prevent a read returning
+ * a partially written state.
+ * This would require a statically initialized mutex outside of the
+ * struct apphint_state to prevent use of an uninitialized mutex when
+ * module_params are provided on the command line.
+ * static DEFINE_MUTEX(apphint_mutex);
+ */
+static struct apphint_state
+{
+ struct workqueue_struct *workqueue;
+ PVR_DEBUGFS_DIR_DATA *debugfs_device_rootdir[APPHINT_DEVICES_MAX];
+ PVR_DEBUGFS_ENTRY_DATA *debugfs_device_entry[APPHINT_DEVICES_MAX][APPHINT_DEBUGFS_DEVICE_ID_MAX];
+ PVR_DEBUGFS_DIR_DATA *debugfs_rootdir;
+ PVR_DEBUGFS_ENTRY_DATA *debugfs_entry[APPHINT_DEBUGFS_ID_MAX];
+ PVR_DEBUGFS_DIR_DATA *buildvar_rootdir;
+ PVR_DEBUGFS_ENTRY_DATA *buildvar_entry[APPHINT_BUILDVAR_ID_MAX];
+
+ int num_devices;
+ PVRSRV_DEVICE_NODE *devices[APPHINT_DEVICES_MAX];
+ int initialized;
+
+ struct apphint_action val[APPHINT_ID_MAX + ((APPHINT_DEVICES_MAX-1)*APPHINT_DEBUGFS_DEVICE_ID_MAX)];
+
+} apphint = {
+/* statically initialise default values to ensure that any module_params
+ * provided on the command line are not overwritten by defaults.
+ */
+ .val = {
+#define UINT32Bitfield UINT32
+#define UINT32List UINT32
+#define X(a, b, c, d, e) \
+ { {NULL}, {NULL}, NULL, NULL, {.b=d}, false },
+ APPHINT_LIST_ALL
+#undef X
+#undef UINT32Bitfield
+#undef UINT32List
+ },
+ .initialized = 0,
+ .num_devices = 0
+};
+
+#define APPHINT_DEBUGFS_DEVICE_ID_OFFSET (APPHINT_ID_MAX-APPHINT_DEBUGFS_DEVICE_ID_MAX)
+
+static inline void
+get_apphint_id_from_action_addr(const struct apphint_action * const addr,
+ APPHINT_ID * const id)
+{
+ *id = (APPHINT_ID)(addr - apphint.val);
+ if (*id >= APPHINT_ID_MAX) {
+ *id -= APPHINT_DEBUGFS_DEVICE_ID_OFFSET;
+ *id %= APPHINT_DEBUGFS_DEVICE_ID_MAX;
+ *id += APPHINT_DEBUGFS_DEVICE_ID_OFFSET;
+ }
+}
+
+static inline void
+get_value_offset_from_device(const PVRSRV_DEVICE_NODE * const device,
+ int * const offset)
+{
+ int i;
+ for (i = 0; device && i < APPHINT_DEVICES_MAX; i++) {
+ if (apphint.devices[i] == device)
+ break;
+ }
+ if (APPHINT_DEVICES_MAX == i) {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Unregistered device", __func__));
+ i = 0;
+ }
+ *offset = i * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+}
+
+/**
+ * apphint_action_worker - perform an action after an AppHint update has been
+ * requested by a UM process
+ * And update the record of the current active value
+ */
+static void apphint_action_worker(struct work_struct *work)
+{
+ struct apphint_work *work_pkt = container_of(work,
+ struct apphint_work,
+ work);
+ struct apphint_action *a = work_pkt->action;
+ union apphint_value value = work_pkt->new_value;
+ APPHINT_ID id;
+ PVRSRV_ERROR result = PVRSRV_OK;
+
+ get_apphint_id_from_action_addr(a, &id);
+
+ if (a->set.UINT64) {
+ switch (param_lookup[id].data_type) {
+ case APPHINT_DATA_TYPE_UINT64:
+ result = a->set.UINT64(a->device,
+ a->private_data,
+ value.UINT64);
+ break;
+
+ case APPHINT_DATA_TYPE_UINT32:
+ case APPHINT_DATA_TYPE_UINT32Bitfield:
+ case APPHINT_DATA_TYPE_UINT32List:
+ result = a->set.UINT32(a->device,
+ a->private_data,
+ value.UINT32);
+ break;
+
+ case APPHINT_DATA_TYPE_BOOL:
+ result = a->set.BOOL(a->device,
+ a->private_data,
+ value.BOOL);
+ break;
+
+ case APPHINT_DATA_TYPE_STRING:
+ result = a->set.STRING(a->device,
+ a->private_data,
+ value.STRING);
+ kfree(value.STRING);
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: unrecognised data type (%d), index (%d)",
+ __func__, param_lookup[id].data_type, id));
+ }
+
+ if (PVRSRV_OK != result) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed (%s)",
+ __func__, PVRSRVGetErrorStringKM(result)));
+ }
+ } else {
+ if (a->free) {
+ kfree(a->stored.STRING);
+ }
+ a->stored = value;
+ if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) {
+ a->free = true;
+ }
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: AppHint value updated before handler is registered, ID(%d)",
+ __func__, id));
+ }
+ kfree((void *)work_pkt);
+}
+
+static void apphint_action(union apphint_value new_value,
+ struct apphint_action *action)
+{
+ struct apphint_work *work_pkt = kmalloc(sizeof(*work_pkt), GFP_KERNEL);
+
+ /* queue apphint update on a serialized workqueue to avoid races */
+ if (work_pkt) {
+ work_pkt->new_value = new_value;
+ work_pkt->action = action;
+ INIT_WORK(&work_pkt->work, apphint_action_worker);
+ if (0 == queue_work(apphint.workqueue, &work_pkt->work)) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to queue apphint change request",
+ __func__));
+ goto err_exit;
+ }
+ } else {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to alloc memory for apphint change request",
+ __func__));
+ goto err_exit;
+ }
+ return;
+err_exit:
+ kfree(new_value.STRING);
+}
+
+/**
+ * apphint_read - read the different AppHint data types
+ * return -errno or the buffer size
+ */
+static int apphint_read(char *buffer, size_t count, APPHINT_ID ue,
+ union apphint_value *value)
+{
+ APPHINT_DATA_TYPE data_type = param_lookup[ue].data_type;
+ int result = 0;
+
+ switch (data_type) {
+ case APPHINT_DATA_TYPE_UINT64:
+ if (kstrtou64(buffer, 0, &value->UINT64) < 0) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid UINT64 input data for id %d: %s",
+ __func__, ue, buffer));
+ result = -EINVAL;
+ goto err_exit;
+ }
+ break;
+ case APPHINT_DATA_TYPE_UINT32:
+ if (kstrtou32(buffer, 0, &value->UINT32) < 0) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid UINT32 input data for id %d: %s",
+ __func__, ue, buffer));
+ result = -EINVAL;
+ goto err_exit;
+ }
+ break;
+ case APPHINT_DATA_TYPE_BOOL:
+ switch (buffer[0]) {
+ case '0':
+ case 'n':
+ case 'N':
+ case 'f':
+ case 'F':
+ value->BOOL = IMG_FALSE;
+ break;
+ case '1':
+ case 'y':
+ case 'Y':
+ case 't':
+ case 'T':
+ value->BOOL = IMG_TRUE;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid BOOL input data for id %d: %s",
+ __func__, ue, buffer));
+ result = -EINVAL;
+ goto err_exit;
+ }
+ break;
+ case APPHINT_DATA_TYPE_UINT32List:
+ {
+ int i;
+ struct apphint_lookup *lookup =
+ (struct apphint_lookup *)
+ param_lookup[ue].data_type_helper;
+ int size = param_lookup[ue].helper_size;
+ /* buffer may include '\n', remove it */
+ char *arg = strsep(&buffer, "\n");
+
+ if (!lookup) {
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ for (i = 0; i < size; i++) {
+ if (strcasecmp(lookup[i].name, arg) == 0) {
+ value->UINT32 = lookup[i].value;
+ break;
+ }
+ }
+ if (i == size) {
+ if (strlen(arg) == 0) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: No value set for AppHint",
+ __func__));
+ } else {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unrecognised AppHint value (%s)",
+ __func__, arg));
+ }
+ result = -EINVAL;
+ }
+ break;
+ }
+ case APPHINT_DATA_TYPE_UINT32Bitfield:
+ {
+ int i;
+ struct apphint_lookup *lookup =
+ (struct apphint_lookup *)
+ param_lookup[ue].data_type_helper;
+ int size = param_lookup[ue].helper_size;
+ /* buffer may include '\n', remove it */
+ char *string = strsep(&buffer, "\n");
+ char *token = strsep(&string, ",");
+
+ if (!lookup) {
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ value->UINT32 = 0;
+ /* empty string is valid to clear the bitfield */
+ while (token && *token) {
+ for (i = 0; i < size; i++) {
+ if (strcasecmp(lookup[i].name, token) == 0) {
+ value->UINT32 |= lookup[i].value;
+ break;
+ }
+ }
+ if (i == size) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unrecognised AppHint value (%s)",
+ __func__, token));
+ result = -EINVAL;
+ goto err_exit;
+ }
+ token = strsep(&string, ",");
+ }
+ break;
+ }
+ case APPHINT_DATA_TYPE_STRING:
+ {
+ /* buffer may include '\n', remove it */
+ char *string = strsep(&buffer, "\n");
+ size_t len = strlen(string);
+
+ if (!len) {
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ ++len;
+
+ value->STRING = kmalloc(len, GFP_KERNEL);
+ if (!value->STRING) {
+ result = -ENOMEM;
+ goto err_exit;
+ }
+
+ strlcpy(value->STRING, string, len);
+ break;
+ }
+ default:
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+err_exit:
+ return (result < 0) ? result : count;
+}
+
+/**
+ * apphint_write - write the current AppHint data to a buffer
+ *
+ * Returns length written or -errno
+ */
+static int apphint_write(char *buffer, const size_t size,
+ const struct apphint_action *a)
+{
+ const struct apphint_param *hint;
+ int result = 0;
+ APPHINT_ID id;
+ union apphint_value value;
+
+ get_apphint_id_from_action_addr(a, &id);
+ hint = ¶m_lookup[id];
+
+ if (a->query.UINT64) {
+ switch (hint->data_type) {
+ case APPHINT_DATA_TYPE_UINT64:
+ result = a->query.UINT64(a->device,
+ a->private_data,
+ &value.UINT64);
+ break;
+
+ case APPHINT_DATA_TYPE_UINT32:
+ case APPHINT_DATA_TYPE_UINT32Bitfield:
+ case APPHINT_DATA_TYPE_UINT32List:
+ result = a->query.UINT32(a->device,
+ a->private_data,
+ &value.UINT32);
+ break;
+
+ case APPHINT_DATA_TYPE_BOOL:
+ result = a->query.BOOL(a->device,
+ a->private_data,
+ &value.BOOL);
+ break;
+
+ case APPHINT_DATA_TYPE_STRING:
+ result = a->query.STRING(a->device,
+ a->private_data,
+ &value.STRING);
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: unrecognised data type (%d), index (%d)",
+ __func__, hint->data_type, id));
+ }
+
+ if (PVRSRV_OK != result) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed (%d), index (%d)",
+ __func__, result, id));
+ }
+ } else {
+ value = a->stored;
+ }
+
+ switch (hint->data_type) {
+ case APPHINT_DATA_TYPE_UINT64:
+ result += snprintf(buffer + result, size - result,
+ "0x%016llx",
+ value.UINT64);
+ break;
+ case APPHINT_DATA_TYPE_UINT32:
+ result += snprintf(buffer + result, size - result,
+ "0x%08x",
+ value.UINT32);
+ break;
+ case APPHINT_DATA_TYPE_BOOL:
+ result += snprintf(buffer + result, size - result,
+ "%s",
+ value.BOOL ? "Y" : "N");
+ break;
+ case APPHINT_DATA_TYPE_STRING:
+ if (value.STRING) {
+ result += snprintf(buffer + result, size - result,
+ "%s",
+ *value.STRING ? value.STRING : "(none)");
+ } else {
+ result += snprintf(buffer + result, size - result,
+ "(none)");
+ }
+ break;
+ case APPHINT_DATA_TYPE_UINT32List:
+ {
+ struct apphint_lookup *lookup =
+ (struct apphint_lookup *) hint->data_type_helper;
+ IMG_UINT32 i;
+
+ if (!lookup) {
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ for (i = 0; i < hint->helper_size; i++) {
+ if (lookup[i].value == value.UINT32) {
+ result += snprintf(buffer + result,
+ size - result,
+ "%s",
+ lookup[i].name);
+ break;
+ }
+ }
+ break;
+ }
+ case APPHINT_DATA_TYPE_UINT32Bitfield:
+ {
+ struct apphint_lookup *lookup =
+ (struct apphint_lookup *) hint->data_type_helper;
+ IMG_UINT32 i;
+
+ if (!lookup) {
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ for (i = 0; i < hint->helper_size; i++) {
+ if (lookup[i].value & value.UINT32) {
+ result += snprintf(buffer + result,
+ size - result,
+ "%s,",
+ lookup[i].name);
+ }
+ }
+ if (result) {
+ /* remove any trailing ',' */
+ --result;
+ *(buffer + result) = '\0';
+ } else {
+ result += snprintf(buffer + result,
+ size - result, "none");
+ }
+ break;
+ }
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: unrecognised data type (%d), index (%d)",
+ __func__, hint->data_type, id));
+ result = -EINVAL;
+ }
+
+err_exit:
+ return result;
+}
+
+/*
+*******************************************************************************
+ Module parameters initialization - different from debugfs
+******************************************************************************/
+/**
+ * apphint_kparam_set - Handle an update of a module parameter
+ *
+ * Returns 0, or -errno. arg is in kp->arg.
+ */
+static int apphint_kparam_set(const char *val, const struct kernel_param *kp)
+{
+ char val_copy[APPHINT_BUFFER_SIZE];
+ APPHINT_ID id;
+ union apphint_value value;
+ int result;
+
+ /* need to discard const in case of string comparison */
+ result = strlcpy(val_copy, val, APPHINT_BUFFER_SIZE);
+
+ get_apphint_id_from_action_addr(kp->arg, &id);
+ if (result < APPHINT_BUFFER_SIZE) {
+ result = apphint_read(val_copy, result, id, &value);
+ if (result >= 0) {
+ ((struct apphint_action *)kp->arg)->stored = value;
+ if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) {
+ ((struct apphint_action *)kp->arg)->free = true;
+ }
+ }
+ } else {
+ PVR_DPF((PVR_DBG_ERROR, "%s: String too long", __func__));
+ }
+ return (result > 0) ? 0 : result;
+}
+
+/**
+ * apphint_kparam_get - handle a read of a module parameter
+ *
+ * Returns length written or -errno. Buffer is 4k (ie. be short!)
+ */
+static int apphint_kparam_get(char *buffer, const struct kernel_param *kp)
+{
+ return apphint_write(buffer, PAGE_SIZE, kp->arg);
+}
+
+__maybe_unused
+static const struct kernel_param_ops apphint_kparam_fops = {
+ .set = apphint_kparam_set,
+ .get = apphint_kparam_get,
+};
+
+/*
+ * call module_param_cb() for all AppHints listed in APPHINT_LIST_MODPARAM
+ * apphint_modparam_class_ ## resolves to apphint_modparam_enable() except for
+ * AppHint classes that have been disabled.
+ */
+
+#define apphint_modparam_enable(name, number, perm) \
+ module_param_cb(name, &apphint_kparam_fops, &apphint.val[number], perm);
+
+#define X(a, b, c, d, e) \
+ apphint_modparam_class_ ##c(a, APPHINT_ID_ ## a, (S_IRUSR|S_IRGRP|S_IROTH))
+ APPHINT_LIST_MODPARAM
+#undef X
+
+/*
+*******************************************************************************
+ Debugfs get (seq file) operations - supporting functions
+******************************************************************************/
+static void *apphint_seq_start(struct seq_file *s, loff_t *pos)
+{
+ if (*pos == 0) {
+ /* We want only one entry in the sequence, one call to show() */
+ return (void *) 1;
+ }
+
+ PVR_UNREFERENCED_PARAMETER(s);
+
+ return NULL;
+}
+
+static void apphint_seq_stop(struct seq_file *s, void *v)
+{
+ PVR_UNREFERENCED_PARAMETER(s);
+ PVR_UNREFERENCED_PARAMETER(v);
+}
+
+static void *apphint_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ PVR_UNREFERENCED_PARAMETER(s);
+ PVR_UNREFERENCED_PARAMETER(v);
+ PVR_UNREFERENCED_PARAMETER(pos);
+ return NULL;
+}
+
+static int apphint_seq_show(struct seq_file *s, void *v)
+{
+ IMG_CHAR km_buffer[APPHINT_BUFFER_SIZE];
+ int result;
+
+ PVR_UNREFERENCED_PARAMETER(v);
+
+ result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, s->private);
+ if (result < 0) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failure", __func__));
+ } else {
+ /* debugfs requires a trailing \n, module_params don't */
+ result += snprintf(km_buffer + result,
+ APPHINT_BUFFER_SIZE - result,
+ "\n");
+ seq_puts(s, km_buffer);
+ }
+
+ /* have to return 0 to see output */
+ return (result < 0) ? result : 0;
+}
+
+static const struct seq_operations apphint_seq_fops = {
+ .start = apphint_seq_start,
+ .stop = apphint_seq_stop,
+ .next = apphint_seq_next,
+ .show = apphint_seq_show,
+};
+
+/*
+*******************************************************************************
+ Debugfs supporting functions
+******************************************************************************/
+/**
+ * apphint_set - Handle a debugfs value update
+ */
+static ssize_t apphint_set(const char __user *buffer,
+ size_t count,
+ loff_t position,
+ void *data)
+{
+ APPHINT_ID id;
+ union apphint_value value;
+ struct apphint_action *action = data;
+ char km_buffer[APPHINT_BUFFER_SIZE];
+ int result = 0;
+
+ PVR_UNREFERENCED_PARAMETER(position);
+
+ if (count >= APPHINT_BUFFER_SIZE) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: String too long (%zd)",
+ __func__, count));
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ if (pvr_copy_from_user(km_buffer, buffer, count)) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Copy of user data failed",
+ __func__));
+ result = -EFAULT;
+ goto err_exit;
+ }
+ km_buffer[count] = '\0';
+
+ get_apphint_id_from_action_addr(action, &id);
+ result = apphint_read(km_buffer, count, id, &value);
+ if (result >= 0)
+ apphint_action(value, action);
+
+err_exit:
+ return result;
+}
+
+/**
+ * apphint_debugfs_init - Create the specified debugfs entries
+ */
+static int apphint_debugfs_init(char *sub_dir,
+ int device_num,
+ unsigned init_data_size,
+ const struct apphint_init_data *init_data,
+ PVR_DEBUGFS_DIR_DATA *parentdir,
+ PVR_DEBUGFS_DIR_DATA **rootdir, PVR_DEBUGFS_ENTRY_DATA **entry)
+{
+ int result = 0;
+ unsigned i;
+ int device_value_offset = device_num * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+
+ if (*rootdir) {
+ PVR_DPF((PVR_DBG_WARNING,
+ "AppHint DebugFS already created, skipping"));
+ result = -EEXIST;
+ goto err_exit;
+ }
+
+ result = PVRDebugFSCreateEntryDir(sub_dir, parentdir,
+ rootdir);
+ if (result < 0) {
+ PVR_DPF((PVR_DBG_WARNING,
+ "Failed to create \"%s\" DebugFS directory.", sub_dir));
+ goto err_exit;
+ }
+
+ for (i = 0; i < init_data_size; i++) {
+ if (!class_state[init_data[i].class].enabled)
+ continue;
+
+ result = PVRDebugFSCreateEntry(init_data[i].name,
+ *rootdir,
+ &apphint_seq_fops,
+ apphint_set,
+ NULL,
+ NULL,
+ (void *) &apphint.val[init_data[i].id + device_value_offset],
+ &entry[i]);
+ if (result < 0) {
+ PVR_DPF((PVR_DBG_WARNING,
+ "Failed to create \"%s/%s\" DebugFS entry.",
+ sub_dir, init_data[i].name));
+ }
+ }
+
+err_exit:
+ return result;
+}
+
+/**
+ * apphint_debugfs_deinit- destroy the debugfs entries
+ */
+static void apphint_debugfs_deinit(unsigned num_entries,
+ PVR_DEBUGFS_DIR_DATA **rootdir, PVR_DEBUGFS_ENTRY_DATA **entry)
+{
+ unsigned i;
+
+ for (i = 0; i < num_entries; i++) {
+ if (entry[i]) {
+ PVRDebugFSRemoveEntry(&entry[i]);
+ entry[i] = NULL;
+ }
+ }
+
+ if (*rootdir) {
+ PVRDebugFSRemoveEntryDir(rootdir);
+ *rootdir = NULL;
+ }
+}
+
+/*
+*******************************************************************************
+ AppHint status dump implementation
+******************************************************************************/
+#if defined(PDUMP)
+static void apphint_pdump_values(void *flags, const IMG_CHAR *format, ...)
+{
+ char km_buffer[APPHINT_BUFFER_SIZE];
+ IMG_UINT32 ui32Flags = *(IMG_UINT32 *)flags;
+ va_list ap;
+
+ va_start(ap, format);
+ (void)vsnprintf(km_buffer, APPHINT_BUFFER_SIZE, format, ap);
+ va_end(ap);
+
+ PDumpCommentKM(km_buffer, ui32Flags);
+}
+#endif
+
+static void apphint_dump_values(char *group_name,
+ int device_num,
+ const struct apphint_init_data *group_data,
+ int group_size,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ int i, result;
+ int device_value_offset = device_num * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+ char km_buffer[APPHINT_BUFFER_SIZE];
+
+ PVR_DUMPDEBUG_LOG(" %s", group_name);
+ for (i = 0; i < group_size; i++) {
+ result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE,
+ &apphint.val[group_data[i].id + device_value_offset]);
+
+ if (result <= 0) {
+ PVR_DUMPDEBUG_LOG(" %s: <Error>",
+ group_data[i].name);
+ } else {
+ PVR_DUMPDEBUG_LOG(" %s: %s",
+ group_data[i].name, km_buffer);
+ }
+ }
+}
+
+/**
+ * Callback for debug dump
+ */
+static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ int i, result;
+ char km_buffer[APPHINT_BUFFER_SIZE];
+ PVRSRV_DEVICE_NODE *device = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+
+ if (DEBUG_REQUEST_VERBOSITY_HIGH == ui32VerbLevel) {
+ PVR_DUMPDEBUG_LOG("------[ AppHint Settings ]------");
+
+ apphint_dump_values("Build Vars", 0,
+ init_data_buildvar, ARRAY_SIZE(init_data_buildvar),
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+
+ apphint_dump_values("Module Params", 0,
+ init_data_modparam, ARRAY_SIZE(init_data_modparam),
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+
+ apphint_dump_values("Debugfs Params", 0,
+ init_data_debugfs, ARRAY_SIZE(init_data_debugfs),
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+
+ for (i = 0; i < APPHINT_DEVICES_MAX; i++) {
+ if (!apphint.devices[i]
+ || (device && device != apphint.devices[i]))
+ continue;
+
+ result = snprintf(km_buffer,
+ APPHINT_BUFFER_SIZE,
+ "Debugfs Params Device ID: %d",
+ i);
+ if (0 > result)
+ continue;
+
+ apphint_dump_values(km_buffer, i,
+ init_data_debugfs_device,
+ ARRAY_SIZE(init_data_debugfs_device),
+ pfnDumpDebugPrintf,
+ pvDumpDebugFile);
+ }
+ }
+}
+
+/*
+*******************************************************************************
+ Public interface
+******************************************************************************/
+int pvr_apphint_init(void)
+{
+ int result, i;
+
+ if (apphint.initialized) {
+ result = -EEXIST;
+ goto err_out;
+ }
+
+ for (i = 0; i < APPHINT_DEVICES_MAX; i++)
+ apphint.devices[i] = NULL;
+
+ /* create workqueue with strict execution ordering to ensure no
+ * race conditions when setting/updating apphints from different
+ * contexts
+ */
+ apphint.workqueue = alloc_workqueue("apphint_workqueue", WQ_UNBOUND, 1);
+ if (!apphint.workqueue) {
+ result = -ENOMEM;
+ goto err_out;
+ }
+
+ result = apphint_debugfs_init("apphint", 0,
+ ARRAY_SIZE(init_data_debugfs), init_data_debugfs,
+ NULL,
+ &apphint.debugfs_rootdir, apphint.debugfs_entry);
+ if (0 != result)
+ goto err_out;
+
+ result = apphint_debugfs_init("buildvar", 0,
+ ARRAY_SIZE(init_data_buildvar), init_data_buildvar,
+ NULL,
+ &apphint.buildvar_rootdir, apphint.buildvar_entry);
+
+ apphint.initialized = 1;
+
+err_out:
+ return result;
+}
+
+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device)
+{
+ int result, i;
+ char device_num[APPHINT_BUFFER_SIZE];
+ int device_value_offset;
+
+ if (!apphint.initialized) {
+ result = -EAGAIN;
+ goto err_out;
+ }
+
+ if (apphint.num_devices+1 >= APPHINT_DEVICES_MAX) {
+ result = -EMFILE;
+ goto err_out;
+ }
+
+ result = snprintf(device_num, APPHINT_BUFFER_SIZE, "%d", apphint.num_devices);
+ if (result < 0) {
+ PVR_DPF((PVR_DBG_WARNING,
+ "snprintf failed (%d)", result));
+ result = -EINVAL;
+ goto err_out;
+ }
+
+ /* Set the default values for the new device */
+ device_value_offset = apphint.num_devices * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+ for (i = 0; i < APPHINT_DEBUGFS_DEVICE_ID_MAX; i++) {
+ apphint.val[init_data_debugfs_device[i].id + device_value_offset].stored
+ = init_data_debugfs_device[i].default_value;
+ }
+
+ result = apphint_debugfs_init(device_num, apphint.num_devices,
+ ARRAY_SIZE(init_data_debugfs_device),
+ init_data_debugfs_device,
+ apphint.debugfs_rootdir,
+ &apphint.debugfs_device_rootdir[apphint.num_devices],
+ apphint.debugfs_device_entry[apphint.num_devices]);
+ if (0 != result)
+ goto err_out;
+
+ apphint.devices[apphint.num_devices] = device;
+ apphint.num_devices++;
+
+ (void)PVRSRVRegisterDbgRequestNotify(
+ &device->hAppHintDbgReqNotify,
+ device,
+ apphint_dump_state,
+ DEBUG_REQUEST_APPHINT,
+ device);
+
+err_out:
+ return result;
+}
+
+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device)
+{
+ int i;
+
+ if (!apphint.initialized)
+ return;
+
+ /* find the device */
+ for (i = 0; i < APPHINT_DEVICES_MAX; i++) {
+ if (apphint.devices[i] == device)
+ break;
+ }
+
+ if (APPHINT_DEVICES_MAX == i)
+ return;
+
+ if (device->hAppHintDbgReqNotify) {
+ (void)PVRSRVUnregisterDbgRequestNotify(
+ device->hAppHintDbgReqNotify);
+ device->hAppHintDbgReqNotify = NULL;
+ }
+
+ apphint_debugfs_deinit(APPHINT_DEBUGFS_DEVICE_ID_MAX,
+ &apphint.debugfs_device_rootdir[i],
+ apphint.debugfs_device_entry[i]);
+
+ apphint.devices[i] = NULL;
+ apphint.num_devices--;
+}
+
+void pvr_apphint_deinit(void)
+{
+ int i;
+
+ if (!apphint.initialized)
+ return;
+
+ /* remove any remaining device data */
+ for (i = 0; apphint.num_devices && i < APPHINT_DEVICES_MAX; i++) {
+ if (apphint.devices[i])
+ pvr_apphint_device_unregister(apphint.devices[i]);
+ }
+
+ /* free all alloc'd string apphints and set to NULL */
+ for (i = 0; i < ARRAY_SIZE(apphint.val); i++) {
+ if (apphint.val[i].free && apphint.val[i].stored.STRING) {
+ kfree(apphint.val[i].stored.STRING);
+ apphint.val[i].stored.STRING = NULL;
+ apphint.val[i].free = false;
+ }
+ }
+
+ apphint_debugfs_deinit(APPHINT_DEBUGFS_ID_MAX,
+ &apphint.debugfs_rootdir, apphint.debugfs_entry);
+ apphint_debugfs_deinit(APPHINT_BUILDVAR_ID_MAX,
+ &apphint.buildvar_rootdir, apphint.buildvar_entry);
+
+ destroy_workqueue(apphint.workqueue);
+
+ apphint.initialized = 0;
+}
+
+void pvr_apphint_dump_state(void)
+{
+#if defined(PDUMP)
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+ apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH,
+ apphint_pdump_values, (void *)&ui32Flags);
+#endif
+ apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH,
+ NULL, NULL);
+}
+
+int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal)
+{
+ int error = -ERANGE;
+
+ if (ue < APPHINT_ID_MAX) {
+ *pVal = apphint.val[ue].stored.UINT64;
+ error = 0;
+ }
+ return error;
+}
+
+int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal)
+{
+ int error = -ERANGE;
+
+ if (ue < APPHINT_ID_MAX) {
+ *pVal = apphint.val[ue].stored.UINT32;
+ error = 0;
+ }
+ return error;
+}
+
+int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal)
+{
+ int error = -ERANGE;
+
+ if (ue < APPHINT_ID_MAX) {
+ error = 0;
+ *pVal = apphint.val[ue].stored.BOOL;
+ }
+ return error;
+}
+
+int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size)
+{
+ int error = -ERANGE;
+ if (ue < APPHINT_ID_MAX && apphint.val[ue].stored.STRING) {
+ if (strlcpy(pBuffer, apphint.val[ue].stored.STRING, size) < size) {
+ error = 0;
+ }
+ }
+ return error;
+}
+
+void pvr_apphint_register_handlers_uint64(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data)
+{
+ int device_value_offset;
+
+ if (id >= APPHINT_ID_MAX) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: AppHint ID (%d) is out of range, max (%d)",
+ __func__, id, APPHINT_ID_MAX-1));
+ return;
+ }
+
+ get_value_offset_from_device(device, &device_value_offset);
+
+ switch (param_lookup[id].data_type) {
+ case APPHINT_DATA_TYPE_UINT64:
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Does not match AppHint data type for ID (%d)",
+ __func__, id));
+ return;
+ }
+
+ apphint.val[id + device_value_offset] = (struct apphint_action){
+ .query.UINT64 = query,
+ .set.UINT64 = set,
+ .device = device,
+ .private_data = private_data,
+ .stored = apphint.val[id + device_value_offset].stored
+ };
+}
+
+void pvr_apphint_register_handlers_uint32(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data)
+{
+ int device_value_offset;
+
+ if (id >= APPHINT_ID_MAX) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: AppHint ID (%d) is out of range, max (%d)",
+ __func__, id, APPHINT_ID_MAX-1));
+ return;
+ }
+
+ get_value_offset_from_device(device, &device_value_offset);
+
+ switch (param_lookup[id].data_type) {
+ case APPHINT_DATA_TYPE_UINT32:
+ case APPHINT_DATA_TYPE_UINT32Bitfield:
+ case APPHINT_DATA_TYPE_UINT32List:
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Does not match AppHint data type for ID (%d)",
+ __func__, id));
+ return;
+ }
+
+ apphint.val[id + device_value_offset] = (struct apphint_action){
+ .query.UINT32 = query,
+ .set.UINT32 = set,
+ .device = device,
+ .private_data = private_data,
+ .stored = apphint.val[id + device_value_offset].stored
+ };
+}
+
+void pvr_apphint_register_handlers_bool(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data)
+{
+ int device_value_offset;
+
+ if (id >= APPHINT_ID_MAX) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: AppHint ID (%d) is out of range, max (%d)",
+ __func__, id, APPHINT_ID_MAX-1));
+ return;
+ }
+
+ get_value_offset_from_device(device, &device_value_offset);
+
+ switch (param_lookup[id].data_type) {
+ case APPHINT_DATA_TYPE_BOOL:
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Does not match AppHint data type for ID (%d)",
+ __func__, id));
+ return;
+ }
+
+ apphint.val[id + device_value_offset] = (struct apphint_action){
+ .query.BOOL = query,
+ .set.BOOL = set,
+ .device = device,
+ .private_data = private_data,
+ .stored = apphint.val[id + device_value_offset].stored
+ };
+}
+
+void pvr_apphint_register_handlers_string(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data)
+{
+ int device_value_offset;
+
+ if (id >= APPHINT_ID_MAX) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: AppHint ID (%d) is out of range, max (%d)",
+ __func__, id, APPHINT_ID_MAX-1));
+ return;
+ }
+
+ get_value_offset_from_device(device, &device_value_offset);
+
+ switch (param_lookup[id].data_type) {
+ case APPHINT_DATA_TYPE_STRING:
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Does not match AppHint data type for ID (%d)",
+ __func__, id));
+ return;
+ }
+
+ apphint.val[id + device_value_offset] = (struct apphint_action){
+ .query.STRING = query,
+ .set.STRING = set,
+ .device = device,
+ .private_data = private_data,
+ .stored = apphint.val[id + device_value_offset].stored
+ };
+}
+
+#endif /* #if defined(SUPPORT_KERNEL_SRVINIT) */
+/* EOF */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File km_apphint.h
+@Title Apphint internal header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Linux kernel AppHint control
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KM_APPHINT_H__
+#define __KM_APPHINT_H__
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "km_apphint_defs.h"
+#include "device.h"
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+
+int pvr_apphint_init(void);
+void pvr_apphint_deinit(void);
+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device);
+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device);
+void pvr_apphint_dump_state(void);
+
+int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal);
+int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal);
+int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal);
+int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size);
+
+void pvr_apphint_register_handlers_uint64(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void * private_data);
+void pvr_apphint_register_handlers_uint32(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data);
+void pvr_apphint_register_handlers_bool(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data);
+void pvr_apphint_register_handlers_string(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data);
+
+#else
+
+static INLINE void pvr_apphint_register_handlers_uint64(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void * private_data)
+{
+ PVR_UNREFERENCED_PARAMETER(id);
+ PVR_UNREFERENCED_PARAMETER(query);
+ PVR_UNREFERENCED_PARAMETER(set);
+ PVR_UNREFERENCED_PARAMETER(device);
+ PVR_UNREFERENCED_PARAMETER(private_data);
+}
+
+static INLINE void pvr_apphint_register_handlers_uint32(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data)
+{
+ PVR_UNREFERENCED_PARAMETER(id);
+ PVR_UNREFERENCED_PARAMETER(query);
+ PVR_UNREFERENCED_PARAMETER(set);
+ PVR_UNREFERENCED_PARAMETER(device);
+ PVR_UNREFERENCED_PARAMETER(private_data);
+}
+
+static INLINE void pvr_apphint_register_handlers_bool(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data)
+{
+ PVR_UNREFERENCED_PARAMETER(id);
+ PVR_UNREFERENCED_PARAMETER(query);
+ PVR_UNREFERENCED_PARAMETER(set);
+ PVR_UNREFERENCED_PARAMETER(device);
+ PVR_UNREFERENCED_PARAMETER(private_data);
+}
+
+static INLINE void pvr_apphint_register_handlers_string(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data)
+{
+ PVR_UNREFERENCED_PARAMETER(id);
+ PVR_UNREFERENCED_PARAMETER(query);
+ PVR_UNREFERENCED_PARAMETER(set);
+ PVR_UNREFERENCED_PARAMETER(device);
+ PVR_UNREFERENCED_PARAMETER(private_data);
+}
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* __KM_APPHINT_H__ */
+
+/******************************************************************************
+ End of file (apphint.h)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services AppHint definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#ifndef __KM_APPHINT_DEFS_H__
+#define __KM_APPHINT_DEFS_H__
+
+/* NB: The 'DEVICE' AppHints must be last in this list as they will be
+ * duplicated in the case of a driver supporting multiple devices
+ */
+#define APPHINT_LIST_ALL \
+ APPHINT_LIST_BUILDVAR \
+ APPHINT_LIST_MODPARAM \
+ APPHINT_LIST_DEBUGFS \
+ APPHINT_LIST_DEPRECATED \
+ APPHINT_LIST_DEBUGFS_DEVICE
+
+/*
+*******************************************************************************
+ Build variables
+ All of these should be configurable only through the 'default' value
+******************************************************************************/
+#define APPHINT_LIST_BUILDVAR \
+/* name, type, class, default, helper, */ \
+X(HWRDebugDumpLimit, UINT32, DEBUG, PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT, NULL ) \
+X(EnableTrustedDeviceAceConfig, BOOL, GPUVIRT_VAL, PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG, NULL ) \
+X(HTBufferSize, UINT32, ALWAYS, PVRSRV_APPHINT_HTBUFFERSIZE, NULL ) \
+X(CleanupThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_CLEANUPTHREADPRIORITY, NULL ) \
+X(CleanupThreadWeight, UINT32, NEVER, PVRSRV_APPHINT_CLEANUPTHREADWEIGHT, NULL ) \
+X(WatchdogThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY, NULL ) \
+X(WatchdogThreadWeight, UINT32, NEVER, PVRSRV_APPHINT_WATCHDOGTHREADWEIGHT, NULL ) \
+
+/*
+*******************************************************************************
+ Module parameters
+******************************************************************************/
+#define APPHINT_LIST_MODPARAM \
+/* name, type, class, default, helper, */ \
+X(EnableSignatureChecks, BOOL, PDUMP, PVRSRV_APPHINT_ENABLESIGNATURECHECKS, NULL ) \
+X(SignatureChecksBufSize, UINT32, PDUMP, PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE, NULL ) \
+\
+X(DisableClockGating, BOOL, FWDBGCTRL, PVRSRV_APPHINT_DISABLECLOCKGATING, NULL ) \
+X(DisableDMOverlap, BOOL, FWDBGCTRL, PVRSRV_APPHINT_DISABLEDMOVERLAP, NULL ) \
+\
+X(EnableCDMKillingRandMode, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE, NULL ) \
+X(EnableFWContextSwitch, UINT32, FWDBGCTRL, PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH, NULL ) \
+X(EnableRDPowerIsland, UINT32, FWDBGCTRL, PVRSRV_APPHINT_ENABLERDPOWERISLAND, NULL ) \
+\
+X(GeneralNon4KHeapPageSize, UINT32, ALWAYS, PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE, NULL ) \
+\
+X(FirmwarePerf, UINT32, VALIDATION, PVRSRV_APPHINT_FIRMWAREPERF, NULL ) \
+X(FWContextSwitchProfile, UINT32, VALIDATION, PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE, NULL ) \
+X(HWPerfDisableCustomCounterFilter, BOOL, VALIDATION, PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER, NULL ) \
+X(HWPerfFWBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB, NULL ) \
+X(HWPerfHostBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB, NULL ) \
+\
+X(JonesDisableMask, UINT32, VALIDATION, PVRSRV_APPHINT_JONESDISABLEMASK, NULL ) \
+X(NewFilteringMode, BOOL, VALIDATION, PVRSRV_APPHINT_NEWFILTERINGMODE, NULL ) \
+X(TruncateMode, UINT32, VALIDATION, PVRSRV_APPHINT_TRUNCATEMODE, NULL ) \
+X(UseMETAT1, UINT32, VALIDATION, PVRSRV_APPHINT_USEMETAT1, NULL ) \
+X(RGXBVNC, STRING, ALWAYS, PVRSRV_APPHINT_RGXBVNC, NULL ) \
+
+/*
+*******************************************************************************
+ Debugfs parameters - driver configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGFS \
+/* name, type, class, default, helper, */ \
+X(EnableHTBLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLEHTBLOGGROUP, htb_loggroup_tbl ) \
+X(HTBOperationMode, UINT32List, ALWAYS, PVRSRV_APPHINT_HTBOPERATIONMODE, htb_opmode_tbl ) \
+X(HWPerfFWFilter, UINT64, ALWAYS, PVRSRV_APPHINT_HWPERFFWFILTER, NULL ) \
+X(HWPerfHostFilter, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFHOSTFILTER, NULL ) \
+X(TimeCorrClock, UINT32List, ALWAYS, PVRSRV_APPHINT_TIMECORRCLOCK, timecorr_clk_tbl )
+
+/*
+*******************************************************************************
+ Debugfs parameters - device configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGFS_DEVICE \
+/* name, type, class, default, helper, */ \
+/* Device Firmware config */\
+X(AssertOnHWRTrigger, BOOL, ALWAYS, PVRSRV_APPHINT_ASSERTONHWRTRIGGER, NULL ) \
+X(AssertOutOfMemory, BOOL, ALWAYS, PVRSRV_APPHINT_ASSERTOUTOFMEMORY, NULL ) \
+X(CheckMList, BOOL, ALWAYS, PVRSRV_APPHINT_CHECKMLIST, NULL ) \
+X(EnableHWR, BOOL, ALWAYS, APPHNT_BLDVAR_ENABLEHWR, NULL ) \
+X(EnableLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLELOGGROUP, fwt_loggroup_tbl ) \
+X(FirmwareLogType, UINT32List, ALWAYS, PVRSRV_APPHINT_FIRMWARELOGTYPE, fwt_logtype_tbl ) \
+/* Device host config */ \
+X(EnableAPM, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEAPM, NULL ) \
+X(DisableFEDLogging, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEFEDLOGGING, NULL ) \
+X(ZeroFreelist, BOOL, ALWAYS, PVRSRV_APPHINT_ZEROFREELIST, NULL ) \
+X(DustRequestInject, BOOL, VALIDATION, PVRSRV_APPHINT_DUSTREQUESTINJECT, NULL ) \
+X(DisablePDumpPanic, BOOL, PDUMP, PVRSRV_APPHINT_DISABLEPDUMPPANIC, NULL ) \
+X(EnableFWPoisonOnFree, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFWPOISONONFREE, NULL ) \
+X(FWPoisonOnFreeValue, UINT32, ALWAYS, PVRSRV_APPHINT_FWPOISONONFREEVALUE, NULL ) \
+
+/*
+*******************************************************************************
+ Deprecated parameters kept for backwards compatibility
+******************************************************************************/
+#define APPHINT_LIST_DEPRECATED \
+/* name, type, class, default, helper, */ \
+X(EnableFTraceGPU, BOOL, ALWAYS, 0, NULL ) \
+X(EnableRTUBypass, BOOL, ALWAYS, 0, NULL ) \
+\
+X(EnableHWPerf, BOOL, ALWAYS, 0, NULL ) \
+X(EnableHWPerfHost, BOOL, ALWAYS, 0, NULL ) \
+\
+X(DisablePDP, BOOL, PDUMP, PVRSRV_APPHINT_DISABLEPDUMPPANIC, NULL ) \
+X(HWPerfFilter0, UINT32, ALWAYS, 0, NULL ) \
+X(HWPerfFilter1, UINT32, ALWAYS, 0, NULL ) \
+
+/*
+*******************************************************************************
+ * Types used in the APPHINT_LIST_<GROUP> lists must be defined here.
+ * New types require specific handling code to be added
+******************************************************************************/
+#define APPHINT_DATA_TYPE_LIST \
+X(BOOL) \
+X(UINT64) \
+X(UINT32) \
+X(UINT32Bitfield) \
+X(UINT32List) \
+X(STRING)
+
+#define APPHINT_CLASS_LIST \
+X(ALWAYS) \
+X(NEVER) \
+X(DEBUG) \
+X(FWDBGCTRL) \
+X(PDUMP) \
+X(VALIDATION) \
+X(GPUVIRT_VAL)
+
+/*
+*******************************************************************************
+ Visibility control for module parameters
+ These bind build variables to AppHint Visibility Groups.
+******************************************************************************/
+#define APPHINT_ENABLED_CLASS_ALWAYS IMG_TRUE
+#define APPHINT_ENABLED_CLASS_NEVER IMG_FALSE
+#define apphint_modparam_class_ALWAYS(a, b, c) apphint_modparam_enable(a, b, c)
+#if defined(DEBUG)
+ #define APPHINT_ENABLED_CLASS_DEBUG IMG_TRUE
+ #define apphint_modparam_class_DEBUG(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+ #define APPHINT_ENABLED_CLASS_DEBUG IMG_FALSE
+ #define apphint_modparam_class_DEBUG(a, b, c)
+#endif
+#if defined(SUPPORT_FWDBGCTRL)
+ #define APPHINT_ENABLED_CLASS_FWDBGCTRL IMG_TRUE
+ #define apphint_modparam_class_FWDBGCTRL(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+ #define APPHINT_ENABLED_CLASS_FWDBGCTRL IMG_FALSE
+ #define apphint_modparam_class_FWDBGCTRL(a, b, c)
+#endif
+#if defined(PDUMP)
+ #define APPHINT_ENABLED_CLASS_PDUMP IMG_TRUE
+ #define apphint_modparam_class_PDUMP(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+ #define APPHINT_ENABLED_CLASS_PDUMP IMG_FALSE
+ #define apphint_modparam_class_PDUMP(a, b, c)
+#endif
+#if defined(SUPPORT_VALIDATION)
+ #define APPHINT_ENABLED_CLASS_VALIDATION IMG_TRUE
+ #define apphint_modparam_class_VALIDATION(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+ #define APPHINT_ENABLED_CLASS_VALIDATION IMG_FALSE
+ #define apphint_modparam_class_VALIDATION(a, b, c)
+#endif
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_TRUE
+ #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+ #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_FALSE
+ #define apphint_modparam_class_GPUVIRT_VAL(a, b, c)
+#endif
+
+/*
+*******************************************************************************
+ AppHint defaults based on other build parameters
+******************************************************************************/
+#if defined(HWR_DEFAULT_ENABLED)
+ #define APPHNT_BLDVAR_ENABLEHWR 1
+#else
+ #define APPHNT_BLDVAR_ENABLEHWR 0
+#endif
+#if defined(DEBUG)
+ #define APPHNT_BLDVAR_DEBUG 1
+ #define APPHNT_BLDVAR_DBGDUMPLIMIT RGXFWIF_HWR_DEBUG_DUMP_ALL
+#else
+ #define APPHNT_BLDVAR_DEBUG 0
+ #define APPHNT_BLDVAR_DBGDUMPLIMIT 1
+#endif
+#if defined(DEBUG) || defined(PDUMP)
+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_TRUE
+#else
+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_FALSE
+#endif
+
+/*
+*******************************************************************************
+
+ Table generated enums
+
+******************************************************************************/
+/* Unique ID for all AppHints */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_ID_ ## a,
+ APPHINT_LIST_ALL
+#undef X
+ APPHINT_ID_MAX
+} APPHINT_ID;
+
+/* ID for build variable Apphints - used for build variable only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_BUILDVAR_ID_ ## a,
+ APPHINT_LIST_BUILDVAR
+#undef X
+ APPHINT_BUILDVAR_ID_MAX
+} APPHINT_BUILDVAR_ID;
+
+/* ID for Modparam Apphints - used for modparam only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_MODPARAM_ID_ ## a,
+ APPHINT_LIST_MODPARAM
+#undef X
+ APPHINT_MODPARAM_ID_MAX
+} APPHINT_MODPARAM_ID;
+
+/* ID for Debugfs Apphints - used for debugfs only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_DEBUGFS_ID_ ## a,
+ APPHINT_LIST_DEBUGFS
+#undef X
+ APPHINT_DEBUGFS_ID_MAX
+} APPHINT_DEBUGFS_ID;
+
+/* ID for Debugfs Device Apphints - used for debugfs device only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_DEBUGFS_DEVICE_ID_ ## a,
+ APPHINT_LIST_DEBUGFS_DEVICE
+#undef X
+ APPHINT_DEBUGFS_DEVICE_ID_MAX
+} APPHINT_DEBUGFS_DEVICE_ID;
+
+/* data types and actions */
+typedef enum {
+ APPHINT_DATA_TYPE_INVALID = 0,
+#define X(a) APPHINT_DATA_TYPE_ ## a,
+ APPHINT_DATA_TYPE_LIST
+#undef X
+ APPHINT_DATA_TYPE_MAX
+} APPHINT_DATA_TYPE;
+
+typedef enum {
+#define X(a) APPHINT_CLASS_ ## a,
+ APPHINT_CLASS_LIST
+#undef X
+ APPHINT_CLASS_MAX
+} APPHINT_CLASS;
+
+#endif /* __KM_APPHINT_DEFS_H__ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Linux specific Services code internal interfaces
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Interfaces between various parts of the Linux specific
+ Services code, that don't have any other obvious
+ header file to go into.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__LINKAGE_H__)
+#define __LINKAGE_H__
+
+/*
+ * FIXME: This is declared here to save creating a new header which should be
+ * removed soon anyway as bridge gen should be providing this interface.
+ */
+PVRSRV_ERROR DeviceDepBridgeInit(IMG_UINT64 ui64Features);
+PVRSRV_ERROR DeviceDepBridgeDeInit(IMG_UINT64 ui64Features);
+PVRSRV_ERROR LinuxBridgeInit(void);
+PVRSRV_ERROR LinuxBridgeDeInit(void);
+
+PVRSRV_ERROR PVROSFuncInit(void);
+void PVROSFuncDeInit(void);
+
+int PVRDebugCreateDebugFSEntries(void);
+void PVRDebugRemoveDebugFSEntries(void);
+
+#endif /* !defined(__LINKAGE_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Module defs for pvr core drivers
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LINUXSRV_H__
+#define _LINUXSRV_H__
+
+#include "dbgdrvif_srv5.h"
+
+IMG_UINT32 DeviceIoControl(IMG_UINT32 hDevice,
+ IMG_UINT32 ui32ControlCode,
+ void *pInBuffer,
+ IMG_UINT32 ui32InBufferSize,
+ void *pOutBuffer,
+ IMG_UINT32 ui32OutBufferSize,
+ IMG_UINT32 *pui32BytesReturned);
+
+#endif /* _LINUXSRV_H__*/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Linked list shared functions implementation.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implementation of the list iterators for types shared among
+ more than one file in the services code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "lists.h"
+
+/*===================================================================
+ LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just
+ once are implemented locally).
+ ===================================================================*/
+
+IMPLEMENT_LIST_ANY(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE)
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE)
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Linked list shared functions templates.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Definition of the linked list function templates.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __LISTS_UTILS__
+#define __LISTS_UTILS__
+
+/* instruct QAC to ignore warnings about the following custom formatted macros */
+/* PRQA S 0881,3410 ++ */
+#include <stdarg.h>
+#include "img_types.h"
+#include "device.h"
+#include "power.h"
+
+/*
+ - USAGE -
+
+ The list functions work with any structure that provides the fields psNext and
+ ppsThis. In order to make a function available for a given type, it is required
+ to use the funcion template macro that creates the actual code.
+
+ There are 5 main types of functions:
+ - INSERT : given a pointer to the head pointer of the list and a pointer
+ to the node, inserts it as the new head.
+ - INSERT TAIL : given a pointer to the head pointer of the list and a pointer
+ to the node, inserts the node at the tail of the list.
+ - REMOVE : given a pointer to a node, removes it from its list.
+ - FOR EACH : apply a function over all the elements of a list.
+ - ANY : apply a function over the elements of a list, until one of them
+ return a non null value, and then returns it.
+
+ The two last functions can have a variable argument form, with allows to pass
+ additional parameters to the callback function. In order to do this, the
+ callback function must take two arguments, the first is the current node and
+ the second is a list of variable arguments (va_list).
+
+ The ANY functions have also another for which specifies the return type of the
+ callback function and the default value returned by the callback function.
+
+*/
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_ForEach
+@Description Apply a callback function to all the elements of a list.
+@Input psHead The head of the list to be processed.
+@Input pfnCallBack The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH(TYPE) \
+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH(TYPE) \
+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\
+{\
+ while(psHead)\
+ {\
+ pfnCallBack(psHead);\
+ psHead = psHead->psNext;\
+ }\
+}
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_ForEachSafe
+@Description Apply a callback function to all the elements of a list. Do it
+ in a safe way that handles the fact that a node might remove itself
+ from the list during the iteration.
+@Input psHead The head of the list to be processed.
+@Input pfnCallBack The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH_SAFE(TYPE) \
+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH_SAFE(TYPE) \
+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\
+{\
+ TYPE *psNext;\
+\
+ while(psHead)\
+ {\
+ psNext = psHead->psNext; \
+ pfnCallBack(psHead);\
+ psHead = psNext;\
+ }\
+}
+
+
+#define DECLARE_LIST_FOR_EACH_VA(TYPE) \
+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \
+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) \
+{\
+ va_list ap;\
+ while(psHead)\
+ {\
+ va_start(ap, pfnCallBack);\
+ pfnCallBack(psHead, ap);\
+ psHead = psHead->psNext;\
+ va_end(ap);\
+ }\
+}
+
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_Any
+@Description Applies a callback function to the elements of a list until the function
+ returns a non null value, then returns it.
+@Input psHead The head of the list to be processed.
+@Input pfnCallBack The function to be applied to each element of the list.
+@Return The first non null value returned by the callback function.
+*/ /**************************************************************************/
+#define DECLARE_LIST_ANY(TYPE) \
+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY(TYPE) \
+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))\
+{ \
+ void *pResult;\
+ TYPE *psNextNode;\
+ pResult = NULL;\
+ psNextNode = psHead;\
+ while(psHead && !pResult)\
+ {\
+ psNextNode = psNextNode->psNext;\
+ pResult = pfnCallBack(psHead);\
+ psHead = psNextNode;\
+ }\
+ return pResult;\
+}
+
+
+/*with variable arguments, that will be passed as a va_list to the callback function*/
+
+#define DECLARE_LIST_ANY_VA(TYPE) \
+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA(TYPE) \
+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+ va_list ap;\
+ TYPE *psNextNode;\
+ void* pResult = NULL;\
+ while(psHead && !pResult)\
+ {\
+ psNextNode = psHead->psNext;\
+ va_start(ap, pfnCallBack);\
+ pResult = pfnCallBack(psHead, ap);\
+ va_end(ap);\
+ psHead = psNextNode;\
+ }\
+ return pResult;\
+}
+
+/*those ones are for extra type safety, so there's no need to use castings for the results*/
+
+#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\
+{ \
+ RTYPE result;\
+ TYPE *psNextNode;\
+ result = CONTINUE;\
+ psNextNode = psHead;\
+ while(psHead && result == CONTINUE)\
+ {\
+ psNextNode = psNextNode->psNext;\
+ result = pfnCallBack(psHead);\
+ psHead = psNextNode;\
+ }\
+ return result;\
+}
+
+
+#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+ va_list ap;\
+ TYPE *psNextNode;\
+ RTYPE result = CONTINUE;\
+ while(psHead && result == CONTINUE)\
+ {\
+ psNextNode = psHead->psNext;\
+ va_start(ap, pfnCallBack);\
+ result = pfnCallBack(psHead, ap);\
+ va_end(ap);\
+ psHead = psNextNode;\
+ }\
+ return result;\
+}
+
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_Remove
+@Description Removes a given node from the list.
+@Input psNode The pointer to the node to be removed.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REMOVE(TYPE) \
+void List_##TYPE##_Remove(TYPE *psNode)
+
+#define IMPLEMENT_LIST_REMOVE(TYPE) \
+void List_##TYPE##_Remove(TYPE *psNode)\
+{\
+ (*psNode->ppsThis)=psNode->psNext;\
+ if(psNode->psNext)\
+ {\
+ psNode->psNext->ppsThis = psNode->ppsThis;\
+ }\
+}
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_Insert
+@Description Inserts a given node at the beginnning of the list.
+@Input psHead The pointer to the pointer to the head node.
+@Input psNode The pointer to the node to be inserted.
+*/ /**************************************************************************/
+#define DECLARE_LIST_INSERT(TYPE) \
+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT(TYPE) \
+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+ psNewNode->ppsThis = ppsHead;\
+ psNewNode->psNext = *ppsHead;\
+ *ppsHead = psNewNode;\
+ if(psNewNode->psNext)\
+ {\
+ psNewNode->psNext->ppsThis = &(psNewNode->psNext);\
+ }\
+}
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_InsertTail
+@Description Inserts a given node at the end of the list.
+@Input psHead The pointer to the pointer to the head node.
+@Input psNode The pointer to the node to be inserted.
+*/ /**************************************************************************/
+#define DECLARE_LIST_INSERT_TAIL(TYPE) \
+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT_TAIL(TYPE) \
+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+ TYPE *psTempNode = *ppsHead;\
+ if (psTempNode != NULL)\
+ {\
+ while (psTempNode->psNext)\
+ psTempNode = psTempNode->psNext;\
+ ppsHead = &psTempNode->psNext;\
+ }\
+ psNewNode->ppsThis = ppsHead;\
+ psNewNode->psNext = NULL;\
+ *ppsHead = psNewNode;\
+}
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_Reverse
+@Description Reverse a list in place
+@Input ppsHead The pointer to the pointer to the head node.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REVERSE(TYPE) \
+void List_##TYPE##_Reverse(TYPE **ppsHead)
+
+#define IMPLEMENT_LIST_REVERSE(TYPE) \
+void List_##TYPE##_Reverse(TYPE **ppsHead)\
+{\
+ TYPE *psTmpNode1; \
+ TYPE *psTmpNode2; \
+ TYPE *psCurNode; \
+ psTmpNode1 = NULL; \
+ psCurNode = *ppsHead; \
+ while(psCurNode) { \
+ psTmpNode2 = psCurNode->psNext; \
+ psCurNode->psNext = psTmpNode1; \
+ psTmpNode1 = psCurNode; \
+ psCurNode = psTmpNode2; \
+ if(psCurNode) \
+ { \
+ psTmpNode1->ppsThis = &(psCurNode->psNext); \
+ } \
+ else \
+ { \
+ psTmpNode1->ppsThis = ppsHead; \
+ } \
+ } \
+ *ppsHead = psTmpNode1; \
+}
+
+#define IS_LAST_ELEMENT(x) ((x)->psNext == NULL)
+
+
+DECLARE_LIST_ANY(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE);
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
+
+#undef DECLARE_LIST_ANY_2
+#undef DECLARE_LIST_ANY_VA
+#undef DECLARE_LIST_ANY_VA_2
+#undef DECLARE_LIST_FOR_EACH
+#undef DECLARE_LIST_FOR_EACH_VA
+#undef DECLARE_LIST_INSERT
+#undef DECLARE_LIST_REMOVE
+
+#endif
+
+/* re-enable warnings */
+/* PRQA S 0881,3410 -- */
--- /dev/null
+/*************************************************************************/ /*!
+@File lock.h
+@Title Locking interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services internal locking interface
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LOCK_H_
+#define _LOCK_H_
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#include "lock_types.h"
+
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include "allocmem.h"
+#include <asm/atomic.h>
+
+#define OSLockCreateNoStats(phLock, eLockType) ({ \
+ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+ *(phLock) = OSAllocMemNoStats(sizeof(struct mutex)); \
+ if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \
+ e;})
+#define OSLockCreate(phLock, eLockType) ({ \
+ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+ *(phLock) = OSAllocMem(sizeof(struct mutex)); \
+ if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \
+ e;})
+#define OSLockDestroy(hLock) ({mutex_destroy((hLock)); OSFreeMem((hLock)); PVRSRV_OK;})
+#define OSLockDestroyNoStats(hLock) ({mutex_destroy((hLock)); OSFreeMemNoStats((hLock)); PVRSRV_OK;})
+
+#define OSLockAcquire(hLock) ({mutex_lock((hLock)); PVRSRV_OK;})
+#define OSLockAcquireNested(hLock, subclass) ({mutex_lock_nested((hLock), (subclass)); PVRSRV_OK;})
+#define OSLockRelease(hLock) ({mutex_unlock((hLock)); PVRSRV_OK;})
+
+#define OSLockIsLocked(hLock) ((mutex_is_locked((hLock)) == 1) ? IMG_TRUE : IMG_FALSE)
+#define OSTryLockAcquire(hLock) ((mutex_trylock(hLock) == 1) ? IMG_TRUE : IMG_FALSE)
+
+/* These _may_ be reordered or optimized away entirely by the compiler/hw */
+#define OSAtomicRead(pCounter) atomic_read(pCounter)
+#define OSAtomicWrite(pCounter, i) atomic_set(pCounter, i)
+
+/* The following atomic operations, in addition to being SMP-safe, also
+ imply a memory barrier around the operation */
+#define OSAtomicIncrement(pCounter) atomic_inc_return(pCounter)
+#define OSAtomicDecrement(pCounter) atomic_dec_return(pCounter)
+#define OSAtomicCompareExchange(pCounter, oldv, newv) atomic_cmpxchg(pCounter,oldv,newv)
+
+#define OSAtomicAdd(pCounter, incr) atomic_add_return(incr,pCounter)
+#define OSAtomicAddUnless(pCounter, incr, test) __atomic_add_unless(pCounter,incr,test)
+
+#define OSAtomicSubtract(pCounter, incr) atomic_add_return(-(incr),pCounter)
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test)
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/**************************************************************************/ /*!
+@Function OSLockCreate
+@Description Creates an operating system lock object.
+@Output phLock The created lock.
+@Input eLockType The type of lock required. This may be:
+ LOCK_TYPE_PASSIVE - the lock will not be used
+ in interrupt context or
+ LOCK_TYPE_DISPATCH - the lock may be used
+ in interrupt context.
+@Return PVRSRV_OK on success. PVRSRV_ERROR_OUT_OF_MEMORY if the driver
+ cannot allocate CPU memory needed for the lock.
+ PVRSRV_ERROR_INIT_FAILURE if the Operating System fails to
+ allocate the lock.
+ */ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock, LOCK_TYPE eLockType);
+#if defined(INTEGRITY_OS)
+#define OSLockCreateNoStats OSLockCreate
+#endif
+
+/**************************************************************************/ /*!
+@Function OSLockDestroy
+@Description Destroys an operating system lock object.
+@Input hLock The lock to be destroyed.
+@Return None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR OSLockDestroy(POS_LOCK hLock);
+
+#if defined(INTEGRITY_OS)
+#define OSLockDestroyNoStats OSLockDestroy
+#endif
+/**************************************************************************/ /*!
+@Function OSLockAcquire
+@Description Acquires an operating system lock.
+ NB. This function must not return until the lock is acquired
+ (meaning the implementation should not timeout or return with
+ an error, as the caller will assume they have the lock).
+@Input hLock The lock to be acquired.
+@Return None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+void OSLockAcquire(POS_LOCK hLock);
+
+/* Nested notation isn't used in UM or other OS's */
+/**************************************************************************/ /*!
+@Function OSLockAcquireNested
+@Description For operating systems other than Linux, this equates to an
+ OSLockAcquire() call. On Linux, this function wraps a call
+ to mutex_lock_nested(). This recognises the scenario where
+ there may be multiple subclasses within a particular class
+ of lock. In such cases, the order in which the locks belonging
+ these various subclasses are acquired is important and must be
+ validated.
+@Input hLock The lock to be acquired.
+@Input subclass The subclass of the lock.
+@Return None.
+ */ /**************************************************************************/
+#define OSLockAcquireNested(hLock, subclass) OSLockAcquire((hLock))
+
+/**************************************************************************/ /*!
+@Function OSLockRelease
+@Description Releases an operating system lock.
+@Input hLock The lock to be released.
+@Return None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+void OSLockRelease(POS_LOCK hLock);
+
+/**************************************************************************/ /*!
+@Function OSLockIsLocked
+@Description Tests whether or not an operating system lock is currently
+ locked.
+@Input hLock The lock to be tested.
+@Return IMG_TRUE if locked, IMG_FALSE if not locked.
+ */ /**************************************************************************/
+IMG_INTERNAL
+IMG_BOOL OSLockIsLocked(POS_LOCK hLock);
+
+#if defined(LINUX)
+
+/* Use GCC intrinsics (read/write semantics consistent with kernel-side implementation) */
+#define OSAtomicRead(pCounter) (*(volatile int *)&(pCounter)->counter)
+#define OSAtomicWrite(pCounter, i) ((pCounter)->counter = (IMG_INT) i)
+#define OSAtomicIncrement(pCounter) __sync_add_and_fetch((&(pCounter)->counter), 1)
+#define OSAtomicDecrement(pCounter) __sync_sub_and_fetch((&(pCounter)->counter), 1)
+#define OSAtomicCompareExchange(pCounter, oldv, newv) \
+ __sync_val_compare_and_swap((&(pCounter)->counter), oldv, newv)
+
+#define OSAtomicAdd(pCounter, incr) __sync_add_and_fetch((&(pCounter)->counter), incr)
+#define OSAtomicAddUnless(pCounter, incr, test) ({ \
+ int c; int old; \
+ c = OSAtomicRead(pCounter); \
+ while (1) { \
+ if (c == (test)) break; \
+ old = OSAtomicCompareExchange(pCounter, c, c+(incr)); \
+ if (old == c) break; \
+ c = old; \
+ } c; })
+
+#define OSAtomicSubtract(pCounter, incr) OSAtomicAdd(pCounter, -(incr))
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test)
+
+#else
+
+/* These _may_ be reordered or optimized away entirely by the compiler/hw */
+/*************************************************************************/ /*!
+@Function OSAtomicRead
+@Description Read the value of a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to read
+@Return The value of the atomic variable
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicRead(ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function OSAtomicWrite
+@Description Write the value of a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to be written to
+@Input v The value to write
+@Return None
+*/ /**************************************************************************/
+IMG_INTERNAL
+void OSAtomicWrite(ATOMIC_T *pCounter, IMG_INT v);
+
+/* For the following atomic operations, in addition to being SMP-safe,
+ should also have a memory barrier around each operation */
+/*************************************************************************/ /*!
+@Function OSAtomicIncrement
+@Description Increment the value of a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to be incremented
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicIncrement(ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function OSAtomicDecrement
+@Description Decrement the value of a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to be decremented
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicDecrement(ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function OSAtomicAdd
+@Description Add a specified value to a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to add the value to
+@Input v The value to be added
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicAdd(ATOMIC_T *pCounter, IMG_INT v);
+
+/*************************************************************************/ /*!
+@Function OSAtomicAddUnless
+@Description Add a specified value to a variable atomically unless it
+ already equals a particular value.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to add the value to
+@Input v The value to be added to 'pCounter'
+@Input t The test value. If 'pCounter' equals this,
+ its value will not be adjusted
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicAddUnless(ATOMIC_T *pCounter, IMG_INT v, IMG_INT t);
+
+/*************************************************************************/ /*!
+@Function OSAtomicSubtract
+@Description Subtract a specified value to a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to subtract the value from
+@Input v The value to be subtracted
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicSubtract(ATOMIC_T *pCounter, IMG_INT v);
+
+/*************************************************************************/ /*!
+@Function OSAtomicSubtractUnless
+@Description Subtract a specified value from a variable atomically unless
+ it already equals a particular value.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to subtract the value from
+@Input v The value to be subtracted from 'pCounter'
+@Input t The test value. If 'pCounter' equals this,
+ its value will not be adjusted
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicSubtractUnless(ATOMIC_T *pCounter, IMG_INT v, IMG_INT t);
+
+/*************************************************************************/ /*!
+@Function OSAtomicCompareExchange
+@Description Set a variable to a given value only if it is currently
+ equal to a specified value. The whole operation must be atomic.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to be checked and
+ possibly updated
+@Input oldv The value the atomic variable must have in
+ order to be modified
+@Input newv The value to write to the atomic variable if
+ it equals 'oldv'
+@Return The value of *pCounter after the function.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicCompareExchange(ATOMIC_T *pCounter, IMG_INT oldv, IMG_INT newv);
+
+#endif /* defined(LINUX) */
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+
+#endif /* _LOCK_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File lock_types.h
+@Title Locking types
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Locking specific enums, defines and structures
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LOCK_TYPES_H_
+#define _LOCK_TYPES_H_
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+/* The mutex is defined as a pointer to be compatible with the other code. This
+ * isn't ideal and usually you wouldn't do that in kernel code. */
+typedef struct mutex *POS_LOCK;
+typedef atomic_t ATOMIC_T;
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+#include "img_types.h" /* needed for IMG_INT */
+typedef struct _OS_LOCK_ *POS_LOCK;
+#if defined(LINUX)
+ typedef struct _OS_ATOMIC {IMG_INT counter;} ATOMIC_T;
+#elif defined(__QNXNTO__)
+ typedef struct _OS_ATOMIC {IMG_INT counter;} ATOMIC_T;
+#elif defined(_WIN32)
+ /*
+ * Dummy definition. WDDM doesn't use Services, but some headers
+ * still have to be shared. This is one such case.
+ */
+ typedef struct _OS_ATOMIC {IMG_INT counter;} ATOMIC_T;
+#elif defined(INTEGRITY_OS)
+ /*Fixed size data type to hold the largest value*/
+ typedef struct _OS_ATOMIC {IMG_UINT64 counter;} ATOMIC_T;
+#else
+ #error "Please type-define an atomic lock for this environment"
+#endif
+
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+
+typedef enum
+{
+ LOCK_TYPE_NONE = 0x00,
+
+ LOCK_TYPE_MASK = 0x0F,
+ LOCK_TYPE_PASSIVE = 0x01, /* Passive level lock e.g. mutex, system may promote to dispatch */
+ LOCK_TYPE_DISPATCH = 0x02, /* Dispatch level lock e.g. spin lock, may be used in ISR/MISR */
+
+ LOCK_TYPE_INSIST_FLAG = 0x80, /* When set caller can guarantee lock not used in ISR/MISR */
+ LOCK_TYPE_PASSIVE_ONLY = LOCK_TYPE_INSIST_FLAG | LOCK_TYPE_PASSIVE
+
+} LOCK_TYPE;
+#endif /* _LOCK_TYPES_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Integer log2 and related functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef LOG2_H
+#define LOG2_H
+
+#include "img_defs.h"
+
+/**************************************************************************/ /*!
+@Description Determine if a number is a power of two.
+@Input n
+@Return True if n is a power of 2, false otherwise. True if n == 0.
+*/ /***************************************************************************/
+static INLINE IMG_BOOL IsPower2(uint32_t n)
+{
+ /* C++ needs this cast. */
+ return (IMG_BOOL)((n & (n - 1)) == 0);
+}
+
+/**************************************************************************/ /*!
+@Description Determine if a number is a power of two.
+@Input n
+@Return True if n is a power of 2, false otherwise. True if n == 0.
+*/ /***************************************************************************/
+static INLINE IMG_BOOL IsPower2_64(uint64_t n)
+{
+ /* C++ needs this cast. */
+ return (IMG_BOOL)((n & (n - 1)) == 0);
+}
+
+/**************************************************************************/ /*!
+@Description Round a non-power-of-two number up to the next power of two.
+@Input n
+@Return n rounded up to the next power of two. If n is zero or
+ already a power of two, return n unmodified.
+*/ /***************************************************************************/
+static INLINE uint32_t RoundUpToNextPowerOfTwo(uint32_t n)
+{
+ n--;
+ n |= n >> 1; /* handle 2 bit numbers */
+ n |= n >> 2; /* handle 4 bit numbers */
+ n |= n >> 4; /* handle 8 bit numbers */
+ n |= n >> 8; /* handle 16 bit numbers */
+ n |= n >> 16; /* handle 32 bit numbers */
+ n++;
+
+ return n;
+}
+
+/**************************************************************************/ /*!
+@Description Round a non-power-of-two number up to the next power of two.
+@Input n
+@Return n rounded up to the next power of two. If n is zero or
+ already a power of two, return n unmodified.
+*/ /***************************************************************************/
+static INLINE uint64_t RoundUpToNextPowerOfTwo_64(uint64_t n)
+{
+ n--;
+ n |= n >> 1; /* handle 2 bit numbers */
+ n |= n >> 2; /* handle 4 bit numbers */
+ n |= n >> 4; /* handle 8 bit numbers */
+ n |= n >> 8; /* handle 16 bit numbers */
+ n |= n >> 16; /* handle 32 bit numbers */
+ n |= n >> 32; /* handle 64 bit numbers */
+ n++;
+
+ return n;
+}
+
+/**************************************************************************/ /*!
+@Description Compute floor(log2(n))
+@Input n
+@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t FloorLog2(uint32_t n)
+{
+ uint32_t log2 = 0;
+
+ while (n >>= 1)
+ log2++;
+
+ return log2;
+}
+
+/**************************************************************************/ /*!
+@Description Compute floor(log2(n))
+@Input n
+@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t FloorLog2_64(uint64_t n)
+{
+ uint32_t log2 = 0;
+
+ while (n >>= 1)
+ log2++;
+
+ return log2;
+}
+
+/**************************************************************************/ /*!
+@Description Compute ceil(log2(n))
+@Input n
+@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t CeilLog2(uint32_t n)
+{
+ uint32_t log2 = 0;
+
+ if(n == 0)
+ return 0;
+
+ n--; /* Handle powers of 2 */
+
+ while(n)
+ {
+ log2++;
+ n >>= 1;
+ }
+
+ return log2;
+}
+
+/**************************************************************************/ /*!
+@Description Compute ceil(log2(n))
+@Input n
+@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t CeilLog2_64(uint64_t n)
+{
+ uint32_t log2 = 0;
+
+ if(n == 0)
+ return 0;
+
+ n--; /* Handle powers of 2 */
+
+ while(n)
+ {
+ log2++;
+ n >>= 1;
+ }
+
+ return log2;
+}
+
+/**************************************************************************/ /*!
+@Description Compute log2(n) for exact powers of two only
+@Input n Must be a power of two
+@Return log2(n)
+*/ /***************************************************************************/
+static INLINE uint32_t ExactLog2(uint32_t n)
+{
+ static const uint32_t b[] =
+ { 0xAAAAAAAA, 0xCCCCCCCC, 0xF0F0F0F0, 0xFF00FF00, 0xFFFF0000};
+ uint32_t r = (n & b[0]) != 0;
+
+ r |= (uint32_t) ((n & b[4]) != 0) << 4;
+ r |= (uint32_t) ((n & b[3]) != 0) << 3;
+ r |= (uint32_t) ((n & b[2]) != 0) << 2;
+ r |= (uint32_t) ((n & b[1]) != 0) << 1;
+
+ return r;
+}
+
+/**************************************************************************/ /*!
+@Description Compute log2(n) for exact powers of two only
+@Input n Must be a power of two
+@Return log2(n)
+*/ /***************************************************************************/
+static INLINE uint32_t ExactLog2_64(uint64_t n)
+{
+ static const uint64_t b[] =
+ { 0xAAAAAAAAAAAAAAAAULL, 0xCCCCCCCCCCCCCCCCULL,
+ 0xF0F0F0F0F0F0F0F0ULL, 0xFF00FF00FF00FF00ULL,
+ 0xFFFF0000FFFF0000ULL, 0xFFFFFFFF00000000ULL };
+ uint32_t r = (n & b[0]) != 0;
+
+ r |= (uint32_t) ((n & b[5]) != 0) << 5;
+ r |= (uint32_t) ((n & b[4]) != 0) << 4;
+ r |= (uint32_t) ((n & b[3]) != 0) << 3;
+ r |= (uint32_t) ((n & b[2]) != 0) << 2;
+ r |= (uint32_t) ((n & b[1]) != 0) << 1;
+
+ return r;
+}
+
+#endif /* LOG2_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Debug driver main file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <asm/uaccess.h>
+#include <drm/drmP.h>
+
+#include "img_types.h"
+#include "linuxsrv.h"
+#include "dbgdriv_ioctl.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "hostfunc.h"
+#include "pvr_debug.h"
+#include "pvrmodule.h"
+#include "pvr_uaccess.h"
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+
+/* Outward temp buffer used by IOCTL handler allocated once and grows as needed.
+ * This optimisation means the debug driver performs less vmallocs/vfrees
+ * reducing the chance of kernel vmalloc space exhaustion.
+ * Singular out buffer for PDump UM reads is not multi-thread safe and so
+ * it now needs a mutex to protect it from multiple simultaneous reads in
+ * the future.
+ */
+static IMG_CHAR* g_outTmpBuf = NULL;
+static IMG_UINT32 g_outTmpBufSize = 64*PAGE_SIZE;
+static void* g_pvOutTmpBufMutex = NULL;
+
+void DBGDrvGetServiceTable(void **fn_table);
+
+void DBGDrvGetServiceTable(void **fn_table)
+{
+ extern DBGKM_SERVICE_TABLE g_sDBGKMServices;
+
+ *fn_table = &g_sDBGKMServices;
+}
+
+void dbgdrv_cleanup(void)
+{
+ if (g_outTmpBuf)
+ {
+ vfree(g_outTmpBuf);
+ g_outTmpBuf = NULL;
+ }
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ HostDestroyEventObjects();
+#endif
+ HostDestroyMutex(g_pvOutTmpBufMutex);
+ HostDestroyMutex(g_pvAPIMutex);
+ return;
+}
+
+IMG_INT dbgdrv_init(void)
+{
+ /* Init API mutex */
+ if ((g_pvAPIMutex=HostCreateMutex()) == NULL)
+ {
+ return -ENOMEM;
+ }
+
+ /* Init TmpBuf mutex */
+ if ((g_pvOutTmpBufMutex=HostCreateMutex()) == NULL)
+ {
+ return -ENOMEM;
+ }
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ /*
+ * The current implementation of HostCreateEventObjects on Linux
+ * can never fail, so there is no need to check for error.
+ */
+ (void) HostCreateEventObjects();
+#endif
+
+ return 0;
+}
+
+static IMG_INT dbgdrv_ioctl_work(void *arg, IMG_BOOL bCompat)
+{
+ struct drm_pvr_dbgdrv_cmd *psDbgdrvCmd = (struct drm_pvr_dbgdrv_cmd *) arg;
+ char *buffer, *in, *out;
+ unsigned int cmd;
+ void *pBufferIn, *pBufferOut;
+
+ if (psDbgdrvCmd->pad)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Invalid pad value\n"));
+ return -EINVAL;
+ }
+
+ if ((psDbgdrvCmd->in_data_size > (PAGE_SIZE >> 1)) ||
+ (psDbgdrvCmd->out_data_size > (PAGE_SIZE >> 1)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Sizes of the buffers are too large, cannot do ioctl\n"));
+ return -1;
+ }
+
+ buffer = (char *) HostPageablePageAlloc(1);
+ if (!buffer)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate buffer, cannot do ioctl\n"));
+ return -EFAULT;
+ }
+
+ in = buffer;
+ out = buffer + (PAGE_SIZE >>1);
+
+ pBufferIn = (void *)(uintptr_t) psDbgdrvCmd->in_data_ptr;
+ pBufferOut = (void *)(uintptr_t) psDbgdrvCmd->out_data_ptr;
+
+ if (pvr_copy_from_user(in, pBufferIn, psDbgdrvCmd->in_data_size) != 0)
+ {
+ goto init_failed;
+ }
+
+ /* Extra -1 because ioctls start at DEBUG_SERVICE_IOCTL_BASE + 1 */
+ cmd = MAKEIOCTLINDEX(psDbgdrvCmd->cmd) - DEBUG_SERVICE_IOCTL_BASE - 1;
+
+ if (psDbgdrvCmd->cmd == DEBUG_SERVICE_READ)
+ {
+ IMG_UINT32 *pui32BytesCopied = (IMG_UINT32 *)out;
+ DBG_OUT_READ *psReadOutParams = (DBG_OUT_READ *)out;
+ DBG_IN_READ *psReadInParams = (DBG_IN_READ *)in;
+ void *pvOutBuffer;
+ PDBG_STREAM psStream;
+
+ psStream = SID2PStream(psReadInParams->hStream);
+ if (!psStream)
+ {
+ goto init_failed;
+ }
+
+ /* Serialise IOCTL Read op access to the singular output buffer */
+ HostAquireMutex(g_pvOutTmpBufMutex);
+
+ if ((g_outTmpBuf == NULL) || (psReadInParams->ui32OutBufferSize > g_outTmpBufSize))
+ {
+ if (psReadInParams->ui32OutBufferSize > g_outTmpBufSize)
+ {
+ g_outTmpBufSize = psReadInParams->ui32OutBufferSize;
+ }
+ g_outTmpBuf = vmalloc(g_outTmpBufSize);
+ if (!g_outTmpBuf)
+ {
+ HostReleaseMutex(g_pvOutTmpBufMutex);
+ goto init_failed;
+ }
+ }
+
+ /* Ensure only one thread is allowed into the DBGDriv core at a time */
+ HostAquireMutex(g_pvAPIMutex);
+
+ psReadOutParams->ui32DataRead = DBGDrivRead(psStream,
+ psReadInParams->ui32BufID,
+ psReadInParams->ui32OutBufferSize,
+ g_outTmpBuf);
+ psReadOutParams->ui32SplitMarker = DBGDrivGetMarker(psStream);
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ pvOutBuffer = WIDEPTR_GET_PTR(psReadInParams->pui8OutBuffer, bCompat);
+
+ if (pvr_copy_to_user(pvOutBuffer,
+ g_outTmpBuf,
+ *pui32BytesCopied) != 0)
+ {
+ HostReleaseMutex(g_pvOutTmpBufMutex);
+ goto init_failed;
+ }
+
+ HostReleaseMutex(g_pvOutTmpBufMutex);
+
+ }
+ else
+ {
+ (g_DBGDrivProc[cmd])(in, out, bCompat);
+ }
+
+ if (copy_to_user(pBufferOut, out, psDbgdrvCmd->out_data_size) != 0)
+ {
+ goto init_failed;
+ }
+
+ HostPageablePageFree((void *)buffer);
+ return 0;
+
+init_failed:
+ HostPageablePageFree((void *)buffer);
+ return -EFAULT;
+}
+
+int dbgdrv_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile)
+{
+ return dbgdrv_ioctl_work((void *) arg, IMG_FALSE);
+}
+
+int dbgdrv_ioctl_compat(struct file *file, unsigned int ioctlCmd, unsigned long arg)
+{
+ return dbgdrv_ioctl_work((void *) arg, IMG_TRUE);
+}
+
+
+
+EXPORT_SYMBOL(DBGDrvGetServiceTable);
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Memory manipulation functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Memory related functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* This workaround is only *required* on ARM64. Avoid building or including
+ * it by default on other architectures, unless the 'safe memcpy' test flag
+ * is enabled. (The code should work on other architectures.)
+ */
+
+#if defined(__arm64__) || defined(__aarch64__) || defined (PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)
+
+/* NOTE: This C file is compiled with -ffreestanding to avoid pattern matching
+ * by the compiler to stdlib functions, and it must only use the below
+ * headers. Do not include any IMG or services headers in this file.
+ */
+#include <stddef.h>
+
+/* Prototypes to suppress warnings in -ffreestanding mode */
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize);
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize);
+
+/* This file is only intended to be used on platforms which use GCC or Clang,
+ * due to its requirement on __attribute__((vector_size(n))), typeof() and
+ * __SIZEOF__ macros.
+ */
+#if defined(__GNUC__)
+
+#define MIN(a, b) \
+ ({__typeof(a) _a = (a); __typeof(b) _b = (b); _a > _b ? _b : _a;})
+
+#if !defined(DEVICE_MEMSETCPY_ALIGN_IN_BYTES)
+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES __SIZEOF_LONG__
+#endif
+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES % 2 != 0
+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be a power of 2"
+#endif
+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES < 4
+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be equal or greater than 4"
+#endif
+
+#if __SIZEOF_POINTER__ != __SIZEOF_LONG__
+#error No support for architectures where void* and long are sized differently
+#endif
+
+#if __SIZEOF_LONG__ > DEVICE_MEMSETCPY_ALIGN_IN_BYTES
+/* Meaningless, and harder to do correctly */
+# error Cannot handle DEVICE_MEMSETCPY_ALIGN_IN_BYTES < sizeof(long)
+typedef unsigned long block_t;
+#elif __SIZEOF_LONG__ <= DEVICE_MEMSETCPY_ALIGN_IN_BYTES
+typedef unsigned int block_t
+ __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES)));
+# if defined(__arm64__) || defined(__aarch64__)
+# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8
+# define DEVICE_MEMSETCPY_ARM64
+# define REGSZ "w"
+# define REGCL "w"
+# define BVCLB "r"
+# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16
+# define DEVICE_MEMSETCPY_ARM64
+# define REGSZ "x"
+# define REGCL "x"
+# define BVCLB "r"
+# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32
+# if defined(__ARM_NEON_FP)
+# define DEVICE_MEMSETCPY_ARM64
+# define REGSZ "q"
+# define REGCL "v"
+# define BVCLB "w"
+# endif
+# endif
+# if defined(DEVICE_MEMSETCPY_ARM64)
+# if defined(DEVICE_MEMSETCPY_ARM64_NON_TEMPORAL)
+# define NSHLD() __asm__ ("dmb nshld")
+# define NSHST() __asm__ ("dmb nshst")
+# define LDP "ldnp"
+# define STP "stnp"
+# else
+# define NSHLD()
+# define NSHST()
+# define LDP "ldp"
+# define STP "stp"
+# endif
+ typedef unsigned int block_half_t
+ __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES / 2)));
+# endif
+# endif
+#endif
+
+__attribute__((visibility("hidden")))
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize)
+{
+ volatile const char *pcSrc = pvSrc;
+ volatile char *pcDst = pvDst;
+ size_t uPreambleBytes;
+ int bBlockCopy = 0;
+
+ size_t uSrcUnaligned = (size_t)pcSrc % sizeof(block_t);
+ size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t);
+
+ if (!uSrcUnaligned && !uDstUnaligned)
+ {
+ /* Neither pointer is unaligned. Optimal case. */
+ bBlockCopy = 1;
+ }
+ else
+ {
+ if (uSrcUnaligned == sizeof(int) && uDstUnaligned == sizeof(int))
+ {
+ /* Both pointers are at least 32-bit aligned, and we assume that
+ * the processor must handle all kinds of 32-bit load-stores.
+ * NOTE: Could we optimize this with a non-temporal version?
+ */
+ if (uSize >= sizeof(int))
+ {
+ volatile int *piSrc = (int *)pcSrc;
+ volatile int *piDst = (int *)pcDst;
+
+ while (uSize >= sizeof(int))
+ {
+ *piDst++ = *piSrc++;
+ uSize -= sizeof(int);
+ }
+
+ pcSrc = (char *)piSrc;
+ pcDst = (char *)piDst;
+ }
+ }
+ else if (uSrcUnaligned == uDstUnaligned)
+ {
+ /* Neither pointer is usefully aligned, but they are misaligned in
+ * the same way, so we can copy a preamble in a slow way, then
+ * optimize the rest.
+ */
+ uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize);
+ uSize -= uPreambleBytes;
+ while (uPreambleBytes)
+ {
+ *pcDst++ = *pcSrc++;
+ uPreambleBytes--;
+ }
+
+ bBlockCopy = 1;
+ }
+ }
+
+ if (bBlockCopy && uSize >= sizeof(block_t))
+ {
+ volatile block_t *pSrc = (block_t *)pcSrc;
+ volatile block_t *pDst = (block_t *)pcDst;
+
+ NSHLD();
+
+ while (uSize >= sizeof(block_t))
+ {
+#if defined(DEVICE_MEMSETCPY_ARM64)
+ __asm__ (LDP " " REGSZ "0, " REGSZ "1, [%[pSrc]]\n\t"
+ STP " " REGSZ "0, " REGSZ "1, [%[pDst]]"
+ :
+ : [pSrc] "r" (pSrc), [pDst] "r" (pDst)
+ : "memory", REGCL "0", REGCL "1");
+#else
+ *pDst = *pSrc;
+#endif
+ pDst++;
+ pSrc++;
+ uSize -= sizeof(block_t);
+ }
+
+ NSHST();
+
+ pcSrc = (char *)pSrc;
+ pcDst = (char *)pDst;
+ }
+
+ while (uSize)
+ {
+ *pcDst++ = *pcSrc++;
+ uSize--;
+ }
+}
+
+__attribute__((visibility("hidden")))
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize)
+{
+ volatile char *pcDst = pvDst;
+ size_t uPreambleBytes;
+
+ size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t);
+
+ if (uDstUnaligned)
+ {
+ uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize);
+ uSize -= uPreambleBytes;
+ while (uPreambleBytes)
+ {
+ *pcDst++ = ui8Value;
+ uPreambleBytes--;
+ }
+ }
+
+ if (uSize >= sizeof(block_t))
+ {
+ volatile block_t *pDst = (block_t *)pcDst;
+#if defined(DEVICE_MEMSETCPY_ARM64)
+ block_half_t bValue;
+#else
+ block_t bValue;
+#endif
+ size_t i;
+
+ for (i = 0; i < sizeof(bValue) / sizeof(unsigned int); i++)
+ bValue[i] = ui8Value << 24U |
+ ui8Value << 16U |
+ ui8Value << 8U |
+ ui8Value;
+
+ NSHLD();
+
+ while (uSize >= sizeof(block_t))
+ {
+#if defined(DEVICE_MEMSETCPY_ARM64)
+ __asm__ (STP " %" REGSZ "[bValue], %" REGSZ "[bValue], [%[pDst]]"
+ :
+ : [bValue] BVCLB (bValue), [pDst] "r" (pDst)
+ : "memory");
+#else
+ *pDst = bValue;
+#endif
+ pDst++;
+ uSize -= sizeof(block_t);
+ }
+
+ NSHST();
+
+ pcDst = (char *)pDst;
+ }
+
+ while (uSize)
+ {
+ *pcDst++ = ui8Value;
+ uSize--;
+ }
+}
+
+#else /* !defined(__GNUC__) */
+
+/* Potentially very slow (but safe) fallbacks for non-GNU C compilers */
+
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize)
+{
+ volatile const char *pcSrc = pvSrc;
+ volatile char *pcDst = pvDst;
+
+ while (uSize)
+ {
+ *pcDst++ = *pcSrc++;
+ uSize--;
+ }
+}
+
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize)
+{
+ volatile char *pcDst = pvDst;
+
+ while (uSize)
+ {
+ *pcDst++ = ui8Value;
+ uSize--;
+ }
+}
+
+#endif /* !defined(__GNUC__) */
+
+#endif /* defined(__arm64__) || defined(__aarch64__) || defined (PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common memory management definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common memory management definitions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef MM_COMMON_H
+#define MM_COMMON_H
+
+#define DEVICEMEM_HISTORY_TEXT_BUFSZ 40
+#define DEVICEMEM_HISTORY_ALLOC_INDEX_NONE 0xFFFFFFFF
+
+#endif
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common MMU Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements basic low level control of MMU.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "devicemem_server_utils.h"
+
+/* Our own interface */
+#include "mmu_common.h"
+
+#include "rgx_bvnc_defs_km.h"
+/*
+Interfaces to other modules:
+
+Let's keep this graph up-to-date:
+
+ +-----------+
+ | devicemem |
+ +-----------+
+ |
+ +============+
+ | mmu_common |
+ +============+
+ |
+ +-----------------+
+ | |
+ +---------+ +----------+
+ | pmr | | device |
+ +---------+ +----------+
+*/
+
+#include "img_types.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#if defined(PDUMP)
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#endif
+#include "pmr.h"
+/* include/ */
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv.h"
+#include "htbuffer.h"
+
+#include "rgxdevice.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#endif
+
+#include "dllist.h"
+
+// #define MMU_OBJECT_REFCOUNT_DEBUGING 1
+#if defined (MMU_OBJECT_REFCOUNT_DEBUGING)
+#define MMU_OBJ_DBG(x) PVR_DPF(x);
+#else
+#define MMU_OBJ_DBG(x)
+#endif
+
+typedef IMG_UINT32 MMU_FLAGS_T;
+
+typedef enum _MMU_MOD_
+{
+ MMU_MOD_UNKNOWN = 0,
+ MMU_MOD_MAP,
+ MMU_MOD_UNMAP,
+} MMU_MOD;
+
+
+/*!
+ * Refcounted structure that is shared between the context and
+ * the cleanup thread items.
+ * It is used to keep track of all cleanup items and whether the creating
+ * MMU context has been destroyed and therefore is not allowed to be
+ * accessed anymore.
+ *
+ * The cleanup thread is used to defer the freeing of the page tables
+ * because we have to make sure that the MMU cache has been invalidated.
+ * If we don't take care of this the MMU might partially access cached
+ * and uncached tables which might lead to inconsistencies and in the
+ * worst case to MMU pending faults on random memory.
+ */
+typedef struct _MMU_CTX_CLEANUP_DATA_
+{
+ /*! Refcount to know when this structure can be destroyed */
+ IMG_UINT32 uiRef;
+ /*! Protect items in this structure, especially the refcount */
+ POS_LOCK hCleanupLock;
+ /*! List of all cleanup items currently in flight */
+ DLLIST_NODE sMMUCtxCleanupItemsHead;
+ /*! Was the MMU context destroyed and should not be accessed anymore? */
+ IMG_BOOL bMMUContextExists;
+} MMU_CTX_CLEANUP_DATA;
+
+
+/*!
+ * Structure holding one or more page tables that need to be
+ * freed after the MMU cache has been flushed which is signalled when
+ * the stored sync has a value that is <= the required value.
+ */
+typedef struct _MMU_CLEANUP_ITEM_
+{
+ /*! Cleanup thread data */
+ PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+ /*! List to hold all the MMU_MEMORY_MAPPINGs, i.e. page tables */
+ DLLIST_NODE sMMUMappingHead;
+ /*! Node of the cleanup item list for the context */
+ DLLIST_NODE sMMUCtxCleanupItem;
+ /* Pointer to the cleanup meta data */
+ MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData;
+ /* Sync to query if the MMU cache was flushed */
+ PVRSRV_CLIENT_SYNC_PRIM *psSync;
+ /*! The update value of the sync to signal that the cache was flushed */
+ IMG_UINT32 uiRequiredSyncVal;
+ /*! The device node needed to free the page tables */
+ PVRSRV_DEVICE_NODE *psDevNode;
+} MMU_CLEANUP_ITEM;
+
+/*!
+ All physical allocations and frees are relative to this context, so
+ we would get all the allocations of PCs, PDs, and PTs from the same
+ RA.
+
+ We have one per MMU context in case we have mixed UMA/LMA devices
+ within the same system.
+*/
+typedef struct _MMU_PHYSMEM_CONTEXT_
+{
+ /*! Parent device node */
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ /*! Refcount so we know when to free up the arena */
+ IMG_UINT32 uiNumAllocations;
+
+ /*! Arena from which physical memory is derived */
+ RA_ARENA *psPhysMemRA;
+ /*! Arena name */
+ IMG_CHAR *pszPhysMemRAName;
+ /*! Size of arena name string */
+ size_t uiPhysMemRANameAllocSize;
+
+ /*! Meta data for deferred cleanup */
+ MMU_CTX_CLEANUP_DATA *psCleanupData;
+ /*! Temporary list of all deferred MMU_MEMORY_MAPPINGs. */
+ DLLIST_NODE sTmpMMUMappingHead;
+
+} MMU_PHYSMEM_CONTEXT;
+
+/*!
+ Mapping structure for MMU memory allocation
+*/
+typedef struct _MMU_MEMORY_MAPPING_
+{
+ /*! Physmem context to allocate from */
+ MMU_PHYSMEM_CONTEXT *psContext;
+ /*! OS/system Handle for this allocation */
+ PG_HANDLE sMemHandle;
+ /*! CPU virtual address of this allocation */
+ void *pvCpuVAddr;
+ /*! Device physical address of this allocation */
+ IMG_DEV_PHYADDR sDevPAddr;
+ /*! Size of this allocation */
+ size_t uiSize;
+ /*! Number of current mappings of this allocation */
+ IMG_UINT32 uiCpuVAddrRefCount;
+ /*! Node for the defer free list */
+ DLLIST_NODE sMMUMappingItem;
+} MMU_MEMORY_MAPPING;
+
+/*!
+ Memory descriptor for MMU objects. There can be more than one memory
+ descriptor per MMU memory allocation.
+*/
+typedef struct _MMU_MEMORY_DESC_
+{
+ /* NB: bValid is set if this descriptor describes physical
+ memory. This allows "empty" descriptors to exist, such that we
+ can allocate them in batches. */
+ /*! Does this MMU object have physical backing */
+ IMG_BOOL bValid;
+ /*! Device Physical address of physical backing */
+ IMG_DEV_PHYADDR sDevPAddr;
+ /*! CPU virtual address of physical backing */
+ void *pvCpuVAddr;
+ /*! Mapping data for this MMU object */
+ MMU_MEMORY_MAPPING *psMapping;
+ /*! Memdesc offset into the psMapping */
+ IMG_UINT32 uiOffset;
+ /*! Size of the Memdesc */
+ IMG_UINT32 uiSize;
+} MMU_MEMORY_DESC;
+
+/*!
+ MMU levelx structure. This is generic and is used
+ for all levels (PC, PD, PT).
+*/
+typedef struct _MMU_Levelx_INFO_
+{
+ /*! The Number of entries in this level */
+ IMG_UINT32 ui32NumOfEntries;
+
+ /*! Number of times this level has been reference. Note: For Level1 (PTE)
+ we still take/drop the reference when setting up the page tables rather
+ then at map/unmap time as this simplifies things */
+ IMG_UINT32 ui32RefCount;
+
+ /*! MemDesc for this level */
+ MMU_MEMORY_DESC sMemDesc;
+
+ /*! Array of infos for the next level. Must be last member in structure */
+ struct _MMU_Levelx_INFO_ *apsNextLevel[1];
+} MMU_Levelx_INFO;
+
+/*!
+ MMU context structure
+*/
+struct _MMU_CONTEXT_
+{
+ /*! Parent device node */
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ MMU_DEVICEATTRIBS *psDevAttrs;
+
+ /*! For allocation and deallocation of the physical memory where
+ the pagetables live */
+ struct _MMU_PHYSMEM_CONTEXT_ *psPhysMemCtx;
+
+#if defined(PDUMP)
+ /*! PDump context ID (required for PDump commands with virtual addresses) */
+ IMG_UINT32 uiPDumpContextID;
+
+ /*! The refcount of the PDump context ID */
+ IMG_UINT32 ui32PDumpContextIDRefCount;
+#endif
+
+ /*! Data that is passed back during device specific callbacks */
+ IMG_HANDLE hDevData;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32OSidReg;
+ IMG_BOOL bOSidAxiProt;
+#endif
+
+ /*! Lock to ensure exclusive access when manipulating the MMU context or
+ * reading and using its content
+ */
+ POS_LOCK hLock;
+
+ /*! Base level info structure. Must be last member in structure */
+ MMU_Levelx_INFO sBaseLevelInfo;
+ /* NO OTHER MEMBERS AFTER THIS STRUCTURE ! */
+};
+
+static const IMG_DEV_PHYADDR gsBadDevPhyAddr = {MMU_BAD_PHYS_ADDR};
+
+#if defined(DEBUG)
+#include "log2.h"
+#endif
+
+
+/*****************************************************************************
+ * Utility functions *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function _FreeMMUMapping
+
+@Description Free a given dllist of MMU_MEMORY_MAPPINGs and the page tables
+ they represent.
+
+@Input psDevNode Device node
+
+@Input psTmpMMUMappingHead List of MMU_MEMORY_MAPPINGs to free
+*/
+/*****************************************************************************/
+static void
+_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode,
+ PDLLIST_NODE psTmpMMUMappingHead)
+{
+ PDLLIST_NODE psNode, psNextNode;
+
+ /* Free the current list unconditionally */
+ dllist_foreach_node(psTmpMMUMappingHead,
+ psNode,
+ psNextNode)
+ {
+ MMU_MEMORY_MAPPING *psMapping = IMG_CONTAINER_OF(psNode,
+ MMU_MEMORY_MAPPING,
+ sMMUMappingItem);
+
+ psDevNode->pfnDevPxFree(psDevNode, &psMapping->sMemHandle);
+ dllist_remove_node(psNode);
+ OSFreeMem(psMapping);
+ }
+}
+
+/*************************************************************************/ /*!
+@Function _CleanupThread_FreeMMUMapping
+
+@Description Function to be executed by the cleanup thread to free
+ MMU_MEMORY_MAPPINGs after the MMU cache has been invalidated.
+
+ This function will request a MMU cache invalidate once and
+ retry to free the MMU_MEMORY_MAPPINGs until the invalidate
+ has been executed.
+
+ If the memory context that created this cleanup item has been
+ destroyed in the meantime this function will directly free the
+ MMU_MEMORY_MAPPINGs without waiting for any MMU cache
+ invalidation.
+
+@Input pvData Cleanup data in form of a MMU_CLEANUP_ITEM
+
+@Return PVRSRV_OK if successful otherwise PVRSRV_ERROR_RETRY
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR
+_CleanupThread_FreeMMUMapping(void* pvData)
+{
+ PVRSRV_ERROR eError;
+ MMU_CLEANUP_ITEM *psCleanup = (MMU_CLEANUP_ITEM *) pvData;
+ MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData = psCleanup->psMMUCtxCleanupData;
+ PVRSRV_DEVICE_NODE *psDevNode = psCleanup->psDevNode;
+ IMG_BOOL bFreeNow;
+ IMG_UINT32 uiSyncCurrent;
+ IMG_UINT32 uiSyncReq;
+
+ OSLockAcquire(psMMUCtxCleanupData->hCleanupLock);
+
+ /* Don't attempt to free anything when the context has been destroyed.
+ * Especially don't access any device specific structures anymore!*/
+ if (!psMMUCtxCleanupData->bMMUContextExists)
+ {
+ OSFreeMem(psCleanup);
+ eError = PVRSRV_OK;
+ goto e0;
+ }
+
+ if (psCleanup->psSync == NULL)
+ {
+ /* Kick to invalidate the MMU caches and get sync info */
+ psDevNode->pfnMMUCacheInvalidateKick(psDevNode,
+ &psCleanup->uiRequiredSyncVal,
+ IMG_TRUE);
+ psCleanup->psSync = psDevNode->psMMUCacheSyncPrim;
+ }
+
+ uiSyncCurrent = *(psCleanup->psSync->pui32LinAddr);
+ uiSyncReq = psCleanup->uiRequiredSyncVal;
+
+ /* Either the invalidate has been executed ... */
+ bFreeNow = (uiSyncCurrent >= uiSyncReq) ? IMG_TRUE :
+ /* ... with the counter wrapped around ... */
+ (uiSyncReq - uiSyncCurrent) > 0xEFFFFFFFUL ? IMG_TRUE :
+ /* ... or are we still waiting for the invalidate? */
+ IMG_FALSE;
+
+#if defined(NO_HARDWARE)
+ /* In NOHW the syncs will never be updated so just free the tables */
+ bFreeNow = IMG_TRUE;
+#endif
+
+ if (bFreeNow)
+ {
+ _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead);
+
+ dllist_remove_node(&psCleanup->sMMUCtxCleanupItem);
+ OSFreeMem(psCleanup);
+
+ eError = PVRSRV_OK;
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_RETRY;
+ }
+
+e0:
+
+ /* If this cleanup task has been successfully executed we can
+ * decrease the context cleanup data refcount. Successfully
+ * means here that the MMU_MEMORY_MAPPINGs have been freed by
+ * either this cleanup task of when the MMU context has been
+ * destroyed. */
+ if (eError == PVRSRV_OK)
+ {
+ IMG_UINT32 uiRef;
+
+ uiRef = --psMMUCtxCleanupData->uiRef;
+ OSLockRelease(psMMUCtxCleanupData->hCleanupLock);
+
+ if (uiRef == 0)
+ {
+ OSLockDestroy(psMMUCtxCleanupData->hCleanupLock);
+ OSFreeMem(psMMUCtxCleanupData);
+ }
+ }
+ else
+ {
+ OSLockRelease(psMMUCtxCleanupData->hCleanupLock);
+ }
+
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function _SetupCleanup_FreeMMUMapping
+
+@Description Setup a cleanup item for the cleanup thread that will
+ kick off a MMU invalidate request and free the associated
+ MMU_MEMORY_MAPPINGs when the invalidate was successful.
+
+@Input psDevNode Device node
+
+@Input psPhysMemCtx The current MMU physmem context
+*/
+/*****************************************************************************/
+static void
+_SetupCleanup_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode,
+ MMU_PHYSMEM_CONTEXT *psPhysMemCtx)
+{
+
+ MMU_CLEANUP_ITEM *psCleanupItem;
+ MMU_CTX_CLEANUP_DATA *psCleanupData = psPhysMemCtx->psCleanupData;
+
+ if (dllist_is_empty(&psPhysMemCtx->sTmpMMUMappingHead))
+ {
+ goto e0;
+ }
+
+#if !defined(SUPPORT_MMU_PENDING_FAULT_PROTECTION)
+ /* If users deactivated this we immediately free the page tables */
+ goto e1;
+#endif
+
+ /* Don't defer the freeing if we are currently unloading the driver
+ * or if the sync has been destroyed */
+ if (PVRSRVGetPVRSRVData()->bUnload ||
+ psDevNode->psMMUCacheSyncPrim == NULL)
+ {
+ goto e1;
+ }
+
+ /* Allocate a cleanup item */
+ psCleanupItem = OSAllocMem(sizeof(*psCleanupItem));
+ if(!psCleanupItem)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get memory for deferred page table cleanup. "
+ "Freeing tables immediately",
+ __FUNCTION__));
+ goto e1;
+ }
+
+ /* Set sync to NULL to indicate we did not interact with
+ * the FW yet. Kicking off an MMU cache invalidate should
+ * be done in the cleanup thread to not waste time here. */
+ psCleanupItem->psSync = NULL;
+ psCleanupItem->uiRequiredSyncVal = 0;
+ psCleanupItem->psDevNode = psDevNode;
+ psCleanupItem->psMMUCtxCleanupData = psCleanupData;
+
+ OSLockAcquire(psCleanupData->hCleanupLock);
+
+ psCleanupData->uiRef++;
+
+ /* Move the page tables to free to the cleanup item */
+ dllist_replace_head(&psPhysMemCtx->sTmpMMUMappingHead,
+ &psCleanupItem->sMMUMappingHead);
+
+ /* Add the cleanup item itself to the context list */
+ dllist_add_to_tail(&psCleanupData->sMMUCtxCleanupItemsHead,
+ &psCleanupItem->sMMUCtxCleanupItem);
+
+ OSLockRelease(psCleanupData->hCleanupLock);
+
+ /* Setup the cleanup thread data and add the work item */
+ psCleanupItem->sCleanupThreadFn.pfnFree = _CleanupThread_FreeMMUMapping;
+ psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem;
+ psCleanupItem->sCleanupThreadFn.ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT;
+ psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE;
+
+ PVRSRVCleanupThreadAddWork(&psCleanupItem->sCleanupThreadFn);
+
+ return;
+
+e1:
+ /* Free the page tables now */
+ _FreeMMUMapping(psDevNode, &psPhysMemCtx->sTmpMMUMappingHead);
+e0:
+ return;
+}
+
+/*************************************************************************/ /*!
+@Function _CalcPCEIdx
+
+@Description Calculate the page catalogue index
+
+@Input sDevVAddr Device virtual address
+
+@Input psDevVAddrConfig Configuration of the virtual address
+
+@Input bRoundUp Round up the index
+
+@Return The page catalogue index
+*/
+/*****************************************************************************/
+static IMG_UINT32 _CalcPCEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+ IMG_BOOL bRoundUp)
+{
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_UINT32 ui32RetVal;
+
+ sTmpDevVAddr = sDevVAddr;
+
+ if (bRoundUp)
+ {
+ sTmpDevVAddr.uiAddr --;
+ }
+ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPCIndexMask)
+ >> psDevVAddrConfig->uiPCIndexShift);
+
+ if (bRoundUp)
+ {
+ ui32RetVal ++;
+ }
+
+ return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function _CalcPCEIdx
+
+@Description Calculate the page directory index
+
+@Input sDevVAddr Device virtual address
+
+@Input psDevVAddrConfig Configuration of the virtual address
+
+@Input bRoundUp Round up the index
+
+@Return The page directory index
+*/
+/*****************************************************************************/
+static IMG_UINT32 _CalcPDEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+ IMG_BOOL bRoundUp)
+{
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_UINT32 ui32RetVal;
+
+ sTmpDevVAddr = sDevVAddr;
+
+ if (bRoundUp)
+ {
+ sTmpDevVAddr.uiAddr --;
+ }
+ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPDIndexMask)
+ >> psDevVAddrConfig->uiPDIndexShift);
+
+ if (bRoundUp)
+ {
+ ui32RetVal ++;
+ }
+
+ return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function _CalcPTEIdx
+
+@Description Calculate the page entry index
+
+@Input sDevVAddr Device virtual address
+
+@Input psDevVAddrConfig Configuration of the virtual address
+
+@Input bRoundUp Round up the index
+
+@Return The page entry index
+*/
+/*****************************************************************************/
+static IMG_UINT32 _CalcPTEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+ IMG_BOOL bRoundUp)
+{
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_UINT32 ui32RetVal;
+
+ sTmpDevVAddr = sDevVAddr;
+ sTmpDevVAddr.uiAddr -= psDevVAddrConfig->uiOffsetInBytes;
+ if (bRoundUp)
+ {
+ sTmpDevVAddr.uiAddr --;
+ }
+ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPTIndexMask)
+ >> psDevVAddrConfig->uiPTIndexShift);
+
+ if (bRoundUp)
+ {
+ ui32RetVal ++;
+ }
+
+ return ui32RetVal;
+}
+
+/*****************************************************************************
+ * MMU memory allocation/management functions (mem desc) *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function _MMU_PhysMem_RAImportAlloc
+
+@Description Imports MMU Px memory into the RA. This is where the
+ actual allocation of physical memory happens.
+
+@Input hArenaHandle Handle that was passed in during the
+ creation of the RA
+
+@Input uiSize Size of the memory to import
+
+@Input uiFlags Flags that where passed in the allocation.
+
+@Output puiBase The address of where to insert this import
+
+@Output puiActualSize The actual size of the import
+
+@Output phPriv Handle which will be passed back when
+ this import is freed
+
+@Return PVRSRV_OK if import alloc was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle,
+ RA_LENGTH_T uiSize,
+ RA_FLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *puiBase,
+ RA_LENGTH_T *puiActualSize,
+ RA_PERISPAN_HANDLE *phPriv)
+{
+ MMU_PHYSMEM_CONTEXT *psCtx = (MMU_PHYSMEM_CONTEXT *) hArenaHandle;
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) psCtx->psDevNode;
+ MMU_MEMORY_MAPPING *psMapping;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+ psMapping = OSAllocMem(sizeof(MMU_MEMORY_MAPPING));
+ if (psMapping == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ eError = psDevNode->pfnDevPxAlloc(psDevNode, TRUNCATE_64BITS_TO_SIZE_T(uiSize), &psMapping->sMemHandle,
+ &psMapping->sDevPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ psMapping->psContext = psCtx;
+ psMapping->uiSize = TRUNCATE_64BITS_TO_SIZE_T(uiSize);
+
+ psMapping->uiCpuVAddrRefCount = 0;
+
+ *phPriv = (RA_PERISPAN_HANDLE) psMapping;
+
+ /* Note: This assumes this memory never gets paged out */
+ *puiBase = (RA_BASE_T)psMapping->sDevPAddr.uiAddr;
+ *puiActualSize = uiSize;
+
+ return PVRSRV_OK;
+
+e1:
+ OSFreeMem(psMapping);
+e0:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_PhysMem_RAImportFree
+
+@Description Imports MMU Px memory into the RA. This is where the
+ actual free of physical memory happens.
+
+@Input hArenaHandle Handle that was passed in during the
+ creation of the RA
+
+@Input puiBase The address of where to insert this import
+
+@Output phPriv Private data that the import alloc provided
+
+@Return None
+*/
+/*****************************************************************************/
+static void _MMU_PhysMem_RAImportFree(RA_PERARENA_HANDLE hArenaHandle,
+ RA_BASE_T uiBase,
+ RA_PERISPAN_HANDLE hPriv)
+{
+ MMU_MEMORY_MAPPING *psMapping = (MMU_MEMORY_MAPPING *) hPriv;
+ MMU_PHYSMEM_CONTEXT *psCtx = (MMU_PHYSMEM_CONTEXT *) hArenaHandle;
+
+ PVR_UNREFERENCED_PARAMETER(uiBase);
+
+ /* Check we have dropped all CPU mappings */
+ PVR_ASSERT(psMapping->uiCpuVAddrRefCount == 0);
+
+ /* Add mapping to defer free list */
+ psMapping->psContext = NULL;
+ dllist_add_to_tail(&psCtx->sTmpMMUMappingHead, &psMapping->sMMUMappingItem);
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_PhysMemAlloc
+
+@Description Allocates physical memory for MMU objects
+
+@Input psCtx Physmem context to do the allocation from
+
+@Output psMemDesc Allocation description
+
+@Input uiBytes Size of the allocation in bytes
+
+@Input uiAlignment Alignment requirement of this allocation
+
+@Return PVRSRV_OK if allocation was successful
+*/
+/*****************************************************************************/
+
+static PVRSRV_ERROR _MMU_PhysMemAlloc(MMU_PHYSMEM_CONTEXT *psCtx,
+ MMU_MEMORY_DESC *psMemDesc,
+ size_t uiBytes,
+ size_t uiAlignment)
+{
+ PVRSRV_ERROR eError;
+ RA_BASE_T uiPhysAddr;
+
+ if (!psMemDesc || psMemDesc->bValid)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = RA_Alloc(psCtx->psPhysMemRA,
+ uiBytes,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, // flags
+ uiAlignment,
+ "",
+ &uiPhysAddr,
+ NULL,
+ (RA_PERISPAN_HANDLE *) &psMemDesc->psMapping);
+ if(PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_MMU_PhysMemAlloc: ERROR call to RA_Alloc() failed"));
+ return eError;
+ }
+
+ psMemDesc->bValid = IMG_TRUE;
+ psMemDesc->pvCpuVAddr = NULL;
+ psMemDesc->sDevPAddr.uiAddr = (IMG_UINT64) uiPhysAddr;
+
+ if (psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+ {
+ eError = psCtx->psDevNode->pfnDevPxMap(psCtx->psDevNode,
+ &psMemDesc->psMapping->sMemHandle,
+ psMemDesc->psMapping->uiSize,
+ &psMemDesc->psMapping->sDevPAddr,
+ &psMemDesc->psMapping->pvCpuVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ RA_Free(psCtx->psPhysMemRA, psMemDesc->sDevPAddr.uiAddr);
+ return eError;
+ }
+ }
+
+ psMemDesc->psMapping->uiCpuVAddrRefCount++;
+ psMemDesc->pvCpuVAddr = (IMG_UINT8 *) psMemDesc->psMapping->pvCpuVAddr
+ + (psMemDesc->sDevPAddr.uiAddr - psMemDesc->psMapping->sDevPAddr.uiAddr);
+ psMemDesc->uiOffset = (psMemDesc->sDevPAddr.uiAddr - psMemDesc->psMapping->sDevPAddr.uiAddr);
+ psMemDesc->uiSize = uiBytes;
+ PVR_ASSERT(psMemDesc->pvCpuVAddr != NULL);
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_PhysMemFree
+
+@Description Allocates physical memory for MMU objects
+
+@Input psCtx Physmem context to do the free on
+
+@Input psMemDesc Allocation description
+
+@Return None
+*/
+/*****************************************************************************/
+static void _MMU_PhysMemFree(MMU_PHYSMEM_CONTEXT *psCtx,
+ MMU_MEMORY_DESC *psMemDesc)
+{
+ RA_BASE_T uiPhysAddr;
+
+ PVR_ASSERT(psMemDesc->bValid);
+
+ if (--psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+ {
+ psCtx->psDevNode->pfnDevPxUnMap(psCtx->psDevNode, &psMemDesc->psMapping->sMemHandle,
+ psMemDesc->psMapping->pvCpuVAddr);
+ }
+
+ psMemDesc->pvCpuVAddr = NULL;
+
+ uiPhysAddr = psMemDesc->sDevPAddr.uiAddr;
+ RA_Free(psCtx->psPhysMemRA, uiPhysAddr);
+
+ psMemDesc->bValid = IMG_FALSE;
+}
+
+
+/*****************************************************************************
+ * MMU object allocation/management functions *
+ *****************************************************************************/
+
+static INLINE void _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ MMU_PROTFLAGS_T *uiMMUProtFlags,
+ MMU_CONTEXT *psMMUContext)
+{
+ /* Do flag conversion between devmem flags and MMU generic flags */
+ if (bInvalidate == IMG_FALSE)
+ {
+ *uiMMUProtFlags |= ( (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+ >> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET)
+ << MMU_PROTFLAGS_DEVICE_OFFSET;
+
+ if (PVRSRV_CHECK_GPU_READABLE(uiMappingFlags))
+ {
+ *uiMMUProtFlags |= MMU_PROTFLAGS_READABLE;
+ }
+ if (PVRSRV_CHECK_GPU_WRITEABLE(uiMappingFlags))
+ {
+ *uiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE;
+ }
+
+ switch (DevmemDeviceCacheMode(psMMUContext->psDevNode, uiMappingFlags))
+ {
+ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+ case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE:
+ break;
+ case PVRSRV_MEMALLOCFLAG_GPU_CACHED:
+ *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,"_MMU_DerivePTProtFlags: Wrong parameters"));
+ return;
+ }
+
+ if (DevmemDeviceCacheCoherency(psMMUContext->psDevNode, uiMappingFlags))
+ {
+ *uiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT;
+ }
+
+ if( (psMMUContext->psDevNode->pfnCheckDeviceFeature) && \
+ psMMUContext->psDevNode->pfnCheckDeviceFeature(psMMUContext->psDevNode, RGX_FEATURE_MIPS_BIT_MASK))
+ {
+ /*
+ If we are allocating on the MMU of the firmware processor, the cached/uncached attributes
+ must depend on the FIRMWARE_CACHED allocation flag.
+ */
+ if (psMMUContext->psDevAttrs == psMMUContext->psDevNode->psFirmwareMMUDevAttrs)
+ {
+ if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED))
+ {
+ *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED;
+ }
+ else
+ {
+ *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHED;
+
+ }
+ *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHE_COHERENT;
+ }
+ }
+ }
+ else
+ {
+ *uiMMUProtFlags |= MMU_PROTFLAGS_INVALID;
+ }
+}
+
+/*************************************************************************/ /*!
+@Function _PxMemAlloc
+
+@Description Allocates physical memory for MMU objects, initialises
+ and PDumps it.
+
+@Input psMMUContext MMU context
+
+@Input uiNumEntries Number of entries to allocate
+
+@Input psConfig MMU Px config
+
+@Input eMMULevel MMU level that that allocation is for
+
+@Output psMemDesc Description of allocation
+
+@Return PVRSRV_OK if allocation was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext,
+ IMG_UINT32 uiNumEntries,
+ const MMU_PxE_CONFIG *psConfig,
+ MMU_LEVEL eMMULevel,
+ MMU_MEMORY_DESC *psMemDesc,
+ IMG_UINT32 uiLog2Align)
+{
+ PVRSRV_ERROR eError;
+ size_t uiBytes;
+ size_t uiAlign;
+
+ PVR_ASSERT(psConfig->uiBytesPerEntry != 0);
+
+ uiBytes = uiNumEntries * psConfig->uiBytesPerEntry;
+ /* We need here the alignment of the previous level because that is the entry for we generate here */
+ uiAlign = 1 << uiLog2Align;
+
+ /* allocate the object */
+ eError = _MMU_PhysMemAlloc(psMMUContext->psPhysMemCtx,
+ psMemDesc, uiBytes, uiAlign);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_PxMemAlloc: failed to allocate memory for the MMU object"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ /*
+ Clear the object
+ Note: if any MMUs are cleared with non-zero values then will need a
+ custom clear function
+ Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is
+ unlikely
+ */
+ OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes);
+
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psMemDesc->psMapping->sMemHandle,
+ psMemDesc->uiOffset,
+ psMemDesc->uiSize);
+ if(eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Alloc MMU object");
+
+ PDumpMMUMalloc(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ eMMULevel,
+ &psMemDesc->sDevPAddr,
+ uiBytes,
+ uiAlign,
+ psMMUContext->psDevAttrs->eMMUType);
+
+ PDumpMMUDumpPxEntries(eMMULevel,
+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psMemDesc->pvCpuVAddr,
+ psMemDesc->sDevPAddr,
+ 0,
+ uiNumEntries,
+ NULL, NULL, 0, /* pdump symbolic info is irrelevant here */
+ psConfig->uiBytesPerEntry,
+ uiLog2Align,
+ psConfig->uiAddrShift,
+ psConfig->uiAddrMask,
+ psConfig->uiProtMask,
+ psConfig->uiValidEnMask,
+ 0,
+ psMMUContext->psDevAttrs->eMMUType);
+#endif
+
+ return PVRSRV_OK;
+e1:
+ _MMU_PhysMemFree(psMMUContext->psPhysMemCtx,
+ psMemDesc);
+e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function _PxMemFree
+
+@Description Frees physical memory for MMU objects, de-initialises
+ and PDumps it.
+
+@Input psMemDesc Description of allocation
+
+@Return PVRSRV_OK if allocation was successful
+*/
+/*****************************************************************************/
+
+static void _PxMemFree(MMU_CONTEXT *psMMUContext,
+ MMU_MEMORY_DESC *psMemDesc, MMU_LEVEL eMMULevel)
+{
+#if defined(MMU_CLEARMEM_ON_FREE)
+ PVRSRV_ERROR eError;
+
+ /*
+ Clear the MMU object
+ Note: if any MMUs are cleared with non-zero values then will need a
+ custom clear function
+ Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is
+ unlikely
+ */
+ OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, psMemDesc->ui32Bytes);
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Clear MMU object before freeing it");
+#endif
+#endif/* MMU_CLEARMEM_ON_FREE */
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Free MMU object");
+ {
+ PDumpMMUFree(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ eMMULevel,
+ &psMemDesc->sDevPAddr,
+ psMMUContext->psDevAttrs->eMMUType);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(eMMULevel);
+#endif
+ /* free the PC */
+ _MMU_PhysMemFree(psMMUContext->psPhysMemCtx, psMemDesc);
+}
+
+static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext,
+ MMU_Levelx_INFO *psLevel,
+ IMG_UINT32 uiIndex,
+ const MMU_PxE_CONFIG *psConfig,
+ const IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_BOOL bUnmap,
+#if defined(PDUMP)
+ const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+ IMG_UINT64 uiProtFlags)
+{
+ MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+ IMG_UINT64 ui64PxE64;
+ IMG_UINT64 uiAddr = psDevPAddr->uiAddr;
+
+ if(psMMUContext->psDevNode->pfnCheckDeviceFeature(psMMUContext->psDevNode, \
+ RGX_FEATURE_MIPS_BIT_MASK))
+ {
+ /*
+ * If mapping for the MIPS FW context, check for sensitive PAs
+ */
+ if (psMMUContext->psDevAttrs == psMMUContext->psDevNode->psFirmwareMMUDevAttrs
+ && RGXMIPSFW_SENSITIVE_ADDR(uiAddr))
+ {
+ PVRSRV_RGXDEV_INFO *psDevice = (PVRSRV_RGXDEV_INFO *)psMMUContext->psDevNode->pvDevice;
+
+ uiAddr = psDevice->sTrampoline.sPhysAddr.uiAddr + RGXMIPSFW_TRAMPOLINE_OFFSET(uiAddr);
+ }
+ }
+
+ /* Calculate Entry */
+ ui64PxE64 = uiAddr /* Calculate the offset to that base */
+ >> psConfig->uiAddrLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */
+ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
+ & psConfig->uiAddrMask; /* Delete unused bits */
+ ui64PxE64 |= uiProtFlags;
+
+ /* Set the entry */
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+ pui64Px[uiIndex] = ui64PxE64;
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+ /* assert that the result fits into 32 bits before writing
+ it into the 32-bit array with a cast */
+ PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+ pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+ }
+ else
+ {
+ return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+ }
+
+
+ /* Log modification */
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+ uiIndex, MMU_LEVEL_1,
+ HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64),
+ !bUnmap);
+
+#if defined (PDUMP)
+ PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psMemDesc->pvCpuVAddr,
+ psMemDesc->sDevPAddr,
+ uiIndex,
+ 1,
+ pszMemspaceName,
+ pszSymbolicAddr,
+ uiSymbolicAddrOffset,
+ psConfig->uiBytesPerEntry,
+ psConfig->uiAddrLog2Align,
+ psConfig->uiAddrShift,
+ psConfig->uiAddrMask,
+ psConfig->uiProtMask,
+ psConfig->uiValidEnMask,
+ 0,
+ psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function _SetupPxE
+
+@Description Sets up an entry of an MMU object to point to the
+ provided address
+
+@Input psMMUContext MMU context to operate on
+
+@Input psLevel Level info for MMU object
+
+@Input uiIndex Index into the MMU object to setup
+
+@Input psConfig MMU Px config
+
+@Input eMMULevel Level of MMU object
+
+@Input psDevPAddr Address to setup the MMU object to point to
+
+@Input pszMemspaceName Name of the PDump memory space that the entry
+ will point to
+
+@Input pszSymbolicAddr PDump symbolic address that the entry will
+ point to
+
+@Input uiProtFlags MMU protection flags
+
+@Return PVRSRV_OK if the setup was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext,
+ MMU_Levelx_INFO *psLevel,
+ IMG_UINT32 uiIndex,
+ const MMU_PxE_CONFIG *psConfig,
+ MMU_LEVEL eMMULevel,
+ const IMG_DEV_PHYADDR *psDevPAddr,
+#if defined(PDUMP)
+ const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+ MMU_FLAGS_T uiProtFlags,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psDevNode;
+ MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+
+ IMG_UINT32 (*pfnDerivePxEProt4)(IMG_UINT32);
+ IMG_UINT64 (*pfnDerivePxEProt8)(IMG_UINT32, IMG_UINT32);
+
+ if (!psDevPAddr)
+ {
+ /* Invalidate entry */
+ if (~uiProtFlags & MMU_PROTFLAGS_INVALID)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Error, no physical address specified, but not invalidating entry"));
+ uiProtFlags |= MMU_PROTFLAGS_INVALID;
+ }
+ psDevPAddr = &gsBadDevPhyAddr;
+ }
+ else
+ {
+ if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "A physical address was specified when requesting invalidation of entry"));
+ uiProtFlags |= MMU_PROTFLAGS_INVALID;
+ }
+ }
+
+ switch(eMMULevel)
+ {
+ case MMU_LEVEL_3:
+ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePCEProt4;
+ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePCEProt8;
+ break;
+
+ case MMU_LEVEL_2:
+ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePDEProt4;
+ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePDEProt8;
+ break;
+
+ case MMU_LEVEL_1:
+ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePTEProt4;
+ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePTEProt8;
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level", __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* How big is a PxE in bytes? */
+ /* Filling the actual Px entry with an address */
+ switch(psConfig->uiBytesPerEntry)
+ {
+ case 4:
+ {
+ IMG_UINT32 *pui32Px;
+ IMG_UINT64 ui64PxE64;
+
+ pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+ ui64PxE64 = psDevPAddr->uiAddr /* Calculate the offset to that base */
+ >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */
+ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
+ & psConfig->uiAddrMask; /* Delete unused higher bits */
+
+ ui64PxE64 |= (IMG_UINT64)pfnDerivePxEProt4(uiProtFlags);
+ /* assert that the result fits into 32 bits before writing
+ it into the 32-bit array with a cast */
+ PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+ /* We should never invalidate an invalid page */
+ if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+ {
+ PVR_ASSERT(pui32Px[uiIndex] != ui64PxE64);
+ }
+ pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+ uiIndex, eMMULevel,
+ HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64),
+ (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1);
+ break;
+ }
+ case 8:
+ {
+ IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+ pui64Px[uiIndex] = psDevPAddr->uiAddr /* Calculate the offset to that base */
+ >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */
+ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
+ & psConfig->uiAddrMask; /* Delete unused higher bits */
+ pui64Px[uiIndex] |= pfnDerivePxEProt8(uiProtFlags, uiLog2DataPageSize);
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+ uiIndex, eMMULevel,
+ HTBLOG_U64_BITS_HIGH(pui64Px[uiIndex]), HTBLOG_U64_BITS_LOW(pui64Px[uiIndex]),
+ (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1);
+ break;
+ }
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: PxE size not supported (%d) for level %d",
+ __func__, psConfig->uiBytesPerEntry, eMMULevel));
+
+ return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+ }
+
+#if defined (PDUMP)
+ PDumpMMUDumpPxEntries(eMMULevel,
+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psMemDesc->pvCpuVAddr,
+ psMemDesc->sDevPAddr,
+ uiIndex,
+ 1,
+ pszMemspaceName,
+ pszSymbolicAddr,
+ uiSymbolicAddrOffset,
+ psConfig->uiBytesPerEntry,
+ psConfig->uiAddrLog2Align,
+ psConfig->uiAddrShift,
+ psConfig->uiAddrMask,
+ psConfig->uiProtMask,
+ psConfig->uiValidEnMask,
+ 0,
+ psMMUContext->psDevAttrs->eMMUType);
+#endif
+
+ psDevNode->pfnMMUCacheInvalidate(psDevNode, psMMUContext->hDevData,
+ eMMULevel,
+ (uiProtFlags & MMU_PROTFLAGS_INVALID)?IMG_TRUE:IMG_FALSE);
+
+ return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ * MMU host control functions (Level Info) *
+ *****************************************************************************/
+
+
+/*************************************************************************/ /*!
+@Function _MMU_FreeLevel
+
+@Description Recursively frees the specified range of Px entries. If any
+ level has its last reference dropped then the MMU object
+ memory and the MMU_Levelx_Info will be freed.
+
+ At each level we might be crossing a boundary from one Px to
+ another. The values for auiStartArray should be by used for
+ the first call into each level and the values in auiEndArray
+ should only be used in the last call for each level.
+ In order to determine if this is the first/last call we pass
+ in bFirst and bLast.
+ When one level calls down to the next only if bFirst/bLast is set
+ and it's the first/last iteration of the loop at its level will
+ bFirst/bLast set for the next recursion.
+ This means that each iteration has the knowledge of the previous
+ level which is required.
+
+@Input psMMUContext MMU context to operate on
+
+@Input psLevel Level info on which to free the
+ specified range
+
+@Input auiStartArray Array of start indexes (one for each level)
+
+@Input auiEndArray Array of end indexes (one for each level)
+
+@Input auiEntriesPerPxArray Array of number of entries for the Px
+ (one for each level)
+
+@Input apsConfig Array of PxE configs (one for each level)
+
+@Input aeMMULevel Array of MMU levels (one for each level)
+
+@Input pui32CurrentLevel Pointer to a variable which is set to our
+ current level
+
+@Input uiStartIndex Start index of the range to free
+
+@Input uiEndIndex End index of the range to free
+
+@Input bFirst This is the first call for this level
+
+@Input bLast This is the last call for this level
+
+@Return IMG_TRUE if the last reference to psLevel was dropped
+*/
+/*****************************************************************************/
+static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext,
+ MMU_Levelx_INFO *psLevel,
+ IMG_UINT32 auiStartArray[],
+ IMG_UINT32 auiEndArray[],
+ IMG_UINT32 auiEntriesPerPxArray[],
+ const MMU_PxE_CONFIG *apsConfig[],
+ MMU_LEVEL aeMMULevel[],
+ IMG_UINT32 *pui32CurrentLevel,
+ IMG_UINT32 uiStartIndex,
+ IMG_UINT32 uiEndIndex,
+ IMG_BOOL bFirst,
+ IMG_BOOL bLast,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_UINT32 uiThisLevel = *pui32CurrentLevel;
+ const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel];
+ IMG_UINT32 i;
+ IMG_BOOL bFreed = IMG_FALSE;
+
+ /* Sanity check */
+ PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+ PVR_ASSERT(psLevel != NULL);
+
+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel: level = %d, range %d - %d, refcount = %d",
+ aeMMULevel[uiThisLevel], uiStartIndex,
+ uiEndIndex, psLevel->ui32RefCount));
+
+ for (i = uiStartIndex;(i < uiEndIndex) && (psLevel != NULL);i++)
+ {
+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+ {
+ MMU_Levelx_INFO *psNextLevel = psLevel->apsNextLevel[i];
+ IMG_UINT32 uiNextStartIndex;
+ IMG_UINT32 uiNextEndIndex;
+ IMG_BOOL bNextFirst;
+ IMG_BOOL bNextLast;
+
+ /* If we're crossing a Px then the start index changes */
+ if (bFirst && (i == uiStartIndex))
+ {
+ uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+ bNextFirst = IMG_TRUE;
+ }
+ else
+ {
+ uiNextStartIndex = 0;
+ bNextFirst = IMG_FALSE;
+ }
+
+ /* If we're crossing a Px then the end index changes */
+ if (bLast && (i == (uiEndIndex - 1)))
+ {
+ uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+ bNextLast = IMG_TRUE;
+ }
+ else
+ {
+ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+ bNextLast = IMG_FALSE;
+ }
+
+ /* Recurse into the next level */
+ (*pui32CurrentLevel)++;
+ if (_MMU_FreeLevel(psMMUContext, psNextLevel, auiStartArray,
+ auiEndArray, auiEntriesPerPxArray,
+ apsConfig, aeMMULevel, pui32CurrentLevel,
+ uiNextStartIndex, uiNextEndIndex,
+ bNextFirst, bNextLast, uiLog2DataPageSize))
+ {
+ PVRSRV_ERROR eError;
+
+ /* Un-wire the entry */
+ eError = _SetupPxE(psMMUContext,
+ psLevel,
+ i,
+ psConfig,
+ aeMMULevel[uiThisLevel],
+ NULL,
+#if defined(PDUMP)
+ NULL, /* Only required for data page */
+ NULL, /* Only required for data page */
+ 0, /* Only required for data page */
+#endif
+ MMU_PROTFLAGS_INVALID,
+ uiLog2DataPageSize);
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Free table of the level below, pointed to by this table entry.
+ * We don't destroy the table inside the above _MMU_FreeLevel call because we
+ * first have to set the table entry of the level above to invalid. */
+ _PxMemFree(psMMUContext, &psNextLevel->sMemDesc, aeMMULevel[*pui32CurrentLevel]);
+ OSFreeMem(psNextLevel);
+
+ /* The level below us is empty, drop the refcount and clear the pointer */
+ psLevel->ui32RefCount--;
+ psLevel->apsNextLevel[i] = NULL;
+
+ /* Check we haven't wrapped around */
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ }
+ (*pui32CurrentLevel)--;
+ }
+ else
+ {
+ psLevel->ui32RefCount--;
+ }
+
+ /*
+ Free this level if it is no longer referenced, unless it's the base
+ level in which case it's part of the MMU context and should be freed
+ when the MMU context is freed
+ */
+ if ((psLevel->ui32RefCount == 0) && (psLevel != &psMMUContext->sBaseLevelInfo))
+ {
+ bFreed = IMG_TRUE;
+ }
+ }
+
+ /* Level one flushing is done when we actually write the table entries */
+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+ {
+ psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry);
+ }
+
+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %d, refcount = %d",
+ aeMMULevel[uiThisLevel], bFreed?0:psLevel->ui32RefCount));
+
+ return bFreed;
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_AllocLevel
+
+@Description Recursively allocates the specified range of Px entries. If any
+ level has its last reference dropped then the MMU object
+ memory and the MMU_Levelx_Info will be freed.
+
+ At each level we might be crossing a boundary from one Px to
+ another. The values for auiStartArray should be by used for
+ the first call into each level and the values in auiEndArray
+ should only be used in the last call for each level.
+ In order to determine if this is the first/last call we pass
+ in bFirst and bLast.
+ When one level calls down to the next only if bFirst/bLast is set
+ and it's the first/last iteration of the loop at its level will
+ bFirst/bLast set for the next recursion.
+ This means that each iteration has the knowledge of the previous
+ level which is required.
+
+@Input psMMUContext MMU context to operate on
+
+@Input psLevel Level info on which to to free the
+ specified range
+
+@Input auiStartArray Array of start indexes (one for each level)
+
+@Input auiEndArray Array of end indexes (one for each level)
+
+@Input auiEntriesPerPxArray Array of number of entries for the Px
+ (one for each level)
+
+@Input apsConfig Array of PxE configs (one for each level)
+
+@Input aeMMULevel Array of MMU levels (one for each level)
+
+@Input pui32CurrentLevel Pointer to a variable which is set to our
+ current level
+
+@Input uiStartIndex Start index of the range to free
+
+@Input uiEndIndex End index of the range to free
+
+@Input bFirst This is the first call for this level
+
+@Input bLast This is the last call for this level
+
+@Return IMG_TRUE if the last reference to psLevel was dropped
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext,
+ MMU_Levelx_INFO *psLevel,
+ IMG_UINT32 auiStartArray[],
+ IMG_UINT32 auiEndArray[],
+ IMG_UINT32 auiEntriesPerPxArray[],
+ const MMU_PxE_CONFIG *apsConfig[],
+ MMU_LEVEL aeMMULevel[],
+ IMG_UINT32 *pui32CurrentLevel,
+ IMG_UINT32 uiStartIndex,
+ IMG_UINT32 uiEndIndex,
+ IMG_BOOL bFirst,
+ IMG_BOOL bLast,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_UINT32 uiThisLevel = *pui32CurrentLevel; /* Starting with 0 */
+ const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; /* The table config for the current level */
+ PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ IMG_UINT32 uiAllocState = 99; /* Debug info to check what progress was made in the function. Updated during this function. */
+ IMG_UINT32 i;
+
+ /* Sanity check */
+ PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+
+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel: level = %d, range %d - %d, refcount = %d",
+ aeMMULevel[uiThisLevel], uiStartIndex,
+ uiEndIndex, psLevel->ui32RefCount));
+
+ /* Go from uiStartIndex to uiEndIndex through the Px */
+ for (i = uiStartIndex;i < uiEndIndex;i++)
+ {
+ /* Only try an allocation if this is not the last level */
+ /*Because a PT allocation is already done while setting the entry in PD */
+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+ {
+ IMG_UINT32 uiNextStartIndex;
+ IMG_UINT32 uiNextEndIndex;
+ IMG_BOOL bNextFirst;
+ IMG_BOOL bNextLast;
+
+ /* If there is already a next Px level existing, do not allocate it */
+ if (!psLevel->apsNextLevel[i])
+ {
+ MMU_Levelx_INFO *psNextLevel;
+ IMG_UINT32 ui32AllocSize;
+ IMG_UINT32 uiNextEntries;
+
+ /* Allocate and setup the next level */
+ uiNextEntries = auiEntriesPerPxArray[uiThisLevel + 1];
+ ui32AllocSize = sizeof(MMU_Levelx_INFO);
+ if (aeMMULevel[uiThisLevel + 1] != MMU_LEVEL_1)
+ {
+ ui32AllocSize += sizeof(MMU_Levelx_INFO *) * (uiNextEntries - 1);
+ }
+ psNextLevel = OSAllocZMem(ui32AllocSize);
+ if (psNextLevel == NULL)
+ {
+ uiAllocState = 0;
+ goto e0;
+ }
+
+ /* Hook in this level for next time */
+ psLevel->apsNextLevel[i] = psNextLevel;
+
+ psNextLevel->ui32NumOfEntries = uiNextEntries;
+ psNextLevel->ui32RefCount = 0;
+ /* Allocate Px memory for a sub level*/
+ eError = _PxMemAlloc(psMMUContext, uiNextEntries, apsConfig[uiThisLevel + 1],
+ aeMMULevel[uiThisLevel + 1],
+ &psNextLevel->sMemDesc,
+ psConfig->uiAddrLog2Align);
+ if (eError != PVRSRV_OK)
+ {
+ uiAllocState = 1;
+ goto e0;
+ }
+
+ /* Wire up the entry */
+ eError = _SetupPxE(psMMUContext,
+ psLevel,
+ i,
+ psConfig,
+ aeMMULevel[uiThisLevel],
+ &psNextLevel->sMemDesc.sDevPAddr,
+#if defined(PDUMP)
+ NULL, /* Only required for data page */
+ NULL, /* Only required for data page */
+ 0, /* Only required for data page */
+#endif
+ 0,
+ uiLog2DataPageSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ uiAllocState = 2;
+ goto e0;
+ }
+
+ psLevel->ui32RefCount++;
+ }
+
+ /* If we're crossing a Px then the start index changes */
+ if (bFirst && (i == uiStartIndex))
+ {
+ uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+ bNextFirst = IMG_TRUE;
+ }
+ else
+ {
+ uiNextStartIndex = 0;
+ bNextFirst = IMG_FALSE;
+ }
+
+ /* If we're crossing a Px then the end index changes */
+ if (bLast && (i == (uiEndIndex - 1)))
+ {
+ uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+ bNextLast = IMG_TRUE;
+ }
+ else
+ {
+ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+ bNextLast = IMG_FALSE;
+ }
+
+ /* Recurse into the next level */
+ (*pui32CurrentLevel)++;
+ eError = _MMU_AllocLevel(psMMUContext, psLevel->apsNextLevel[i],
+ auiStartArray,
+ auiEndArray,
+ auiEntriesPerPxArray,
+ apsConfig,
+ aeMMULevel,
+ pui32CurrentLevel,
+ uiNextStartIndex,
+ uiNextEndIndex,
+ bNextFirst,
+ bNextLast,
+ uiLog2DataPageSize);
+ (*pui32CurrentLevel)--;
+ if (eError != PVRSRV_OK)
+ {
+ uiAllocState = 2;
+ goto e0;
+ }
+ }
+ else
+ {
+ /* All we need to do for level 1 is bump the refcount */
+ psLevel->ui32RefCount++;
+ }
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ }
+
+ /* Level one flushing is done when we actually write the table entries */
+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+ {
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry);
+ if (eError != PVRSRV_OK)
+ goto e0;
+ }
+
+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %d, refcount = %d",
+ aeMMULevel[uiThisLevel], psLevel->ui32RefCount));
+ return PVRSRV_OK;
+
+e0:
+ /* Sanity check that we've not come down this route unexpectedly */
+ PVR_ASSERT(uiAllocState!=99);
+ PVR_DPF((PVR_DBG_ERROR, "_MMU_AllocLevel: Error %d allocating Px for level %d in stage %d"
+ ,eError, aeMMULevel[uiThisLevel], uiAllocState));
+
+ /* the start value of index variable i is nor initialised on purpose
+ indeed this for loop deinitialise what has already been initialised
+ just before failing in reverse order. So the i index has already the
+ right value. */
+ for (/* i already set */ ; i>= uiStartIndex && i< uiEndIndex; i--)
+ {
+ switch(uiAllocState)
+ {
+ IMG_UINT32 uiNextStartIndex;
+ IMG_UINT32 uiNextEndIndex;
+ IMG_BOOL bNextFirst;
+ IMG_BOOL bNextLast;
+
+ case 3:
+ /* If we're crossing a Px then the start index changes */
+ if (bFirst && (i == uiStartIndex))
+ {
+ uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+ bNextFirst = IMG_TRUE;
+ }
+ else
+ {
+ uiNextStartIndex = 0;
+ bNextFirst = IMG_FALSE;
+ }
+
+ /* If we're crossing a Px then the end index changes */
+ if (bLast && (i == (uiEndIndex - 1)))
+ {
+ uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+ bNextLast = IMG_TRUE;
+ }
+ else
+ {
+ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+ bNextLast = IMG_FALSE;
+ }
+
+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+ {
+ (*pui32CurrentLevel)++;
+ if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i],
+ auiStartArray, auiEndArray,
+ auiEntriesPerPxArray, apsConfig,
+ aeMMULevel, pui32CurrentLevel,
+ uiNextStartIndex, uiNextEndIndex,
+ bNextFirst, bNextLast, uiLog2DataPageSize))
+ {
+ psLevel->ui32RefCount--;
+ psLevel->apsNextLevel[i] = NULL;
+
+ /* Check we haven't wrapped around */
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ }
+ (*pui32CurrentLevel)--;
+ }
+ else
+ {
+ /* We should never come down this path, but it's here
+ for completeness */
+ psLevel->ui32RefCount--;
+
+ /* Check we haven't wrapped around */
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ }
+ case 2:
+ if (psLevel->apsNextLevel[i] != NULL &&
+ psLevel->apsNextLevel[i]->ui32RefCount == 0)
+ {
+ _PxMemFree(psMMUContext, &psLevel->sMemDesc,
+ aeMMULevel[uiThisLevel]);
+ }
+ case 1:
+ if (psLevel->apsNextLevel[i] != NULL &&
+ psLevel->apsNextLevel[i]->ui32RefCount == 0)
+ {
+ OSFreeMem(psLevel->apsNextLevel[i]);
+ psLevel->apsNextLevel[i] = NULL;
+ }
+ case 0:
+ uiAllocState = 3;
+ break;
+ }
+ }
+ return eError;
+}
+
+/*****************************************************************************
+ * MMU page table functions *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function _MMU_GetLevelData
+
+@Description Get the all the level data and calculates the indexes for the
+ specified address range
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddrStart Start device virtual address
+
+@Input sDevVAddrEnd End device virtual address
+
+@Input uiLog2DataPageSize Log2 of the page size to use
+
+@Input auiStartArray Array of start indexes (one for each level)
+
+@Input auiEndArray Array of end indexes (one for each level)
+
+@Input uiEntriesPerPxArray Array of number of entries for the Px
+ (one for each level)
+
+@Input apsConfig Array of PxE configs (one for each level)
+
+@Input aeMMULevel Array of MMU levels (one for each level)
+
+@Input ppsMMUDevVAddrConfig Device virtual address config
+
+@Input phPriv Private data of page size config
+
+@Return IMG_TRUE if the last reference to psLevel was dropped
+*/
+/*****************************************************************************/
+static void _MMU_GetLevelData(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrStart,
+ IMG_DEV_VIRTADDR sDevVAddrEnd,
+ IMG_UINT32 uiLog2DataPageSize,
+ IMG_UINT32 auiStartArray[],
+ IMG_UINT32 auiEndArray[],
+ IMG_UINT32 auiEntriesPerPx[],
+ const MMU_PxE_CONFIG *apsConfig[],
+ MMU_LEVEL aeMMULevel[],
+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv)
+{
+ const MMU_PxE_CONFIG *psMMUPDEConfig;
+ const MMU_PxE_CONFIG *psMMUPTEConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i = 0;
+
+ eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+ &psMMUPDEConfig,
+ &psMMUPTEConfig,
+ ppsMMUDevVAddrConfig,
+ phPriv);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ psDevVAddrConfig = *ppsMMUDevVAddrConfig;
+
+ if (psDevVAddrConfig->uiPCIndexMask != 0)
+ {
+ auiStartArray[i] = _CalcPCEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+ auiEndArray[i] = _CalcPCEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPC;
+ apsConfig[i] = psDevAttrs->psBaseConfig;
+ aeMMULevel[i] = MMU_LEVEL_3;
+ i++;
+ }
+
+ if (psDevVAddrConfig->uiPDIndexMask != 0)
+ {
+ auiStartArray[i] = _CalcPDEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+ auiEndArray[i] = _CalcPDEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPD;
+ if (i == 0)
+ {
+ apsConfig[i] = psDevAttrs->psBaseConfig;
+ }
+ else
+ {
+ apsConfig[i] = psMMUPDEConfig;
+ }
+ aeMMULevel[i] = MMU_LEVEL_2;
+ i++;
+ }
+
+ /*
+ There is always a PTE entry so we have a slightly different behaviour than above.
+ E.g. for 2 MB RGX pages the uiPTIndexMask is 0x0000000000 but still there
+ is a PT with one entry.
+
+ */
+ auiStartArray[i] = _CalcPTEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+ if (psDevVAddrConfig->uiPTIndexMask !=0)
+ {
+ auiEndArray[i] = _CalcPTEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+ }
+ else
+ {
+ /*
+ If the PTE mask is zero it means there is only 1 PTE and thus
+ the start and end array are one in the same
+ */
+ auiEndArray[i] = auiStartArray[i];
+ }
+
+ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPT;
+
+ if (i == 0)
+ {
+ apsConfig[i] = psDevAttrs->psBaseConfig;
+ }
+ else
+ {
+ apsConfig[i] = psMMUPTEConfig;
+ }
+ aeMMULevel[i] = MMU_LEVEL_1;
+}
+
+static void _MMU_PutLevelData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hPriv)
+{
+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+ psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+}
+
+/*************************************************************************/ /*!
+@Function _AllocPageTables
+
+@Description Allocate page tables and any higher level MMU objects required
+ for the specified virtual range
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddrStart Start device virtual address
+
+@Input sDevVAddrEnd End device virtual address
+
+@Input uiLog2DataPageSize Page size of the data pages
+
+@Return PVRSRV_OK if the allocation was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR
+_AllocPageTables(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrStart,
+ IMG_DEV_VIRTADDR sDevVAddrEnd,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+ IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+ IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+ MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+ const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_HANDLE hPriv;
+ IMG_UINT32 ui32CurrentLevel = 0;
+
+
+ PVR_DPF((PVR_DBG_ALLOC,
+ "_AllocPageTables: vaddr range: 0x%010llx:0x%010llx",
+ sDevVAddrStart.uiAddr,
+ sDevVAddrEnd.uiAddr
+ ));
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Allocating page tables for %llu bytes virtual range: 0x%010llX to 0x%010llX",
+ (IMG_UINT64)sDevVAddrEnd.uiAddr - (IMG_UINT64)sDevVAddrStart.uiAddr,
+ (IMG_UINT64)sDevVAddrStart.uiAddr,
+ (IMG_UINT64)sDevVAddrEnd.uiAddr);
+#endif
+
+ _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+ (IMG_UINT32) uiLog2DataPageSize, auiStartArray, auiEndArray,
+ auiEntriesPerPx, apsConfig, aeMMULevel,
+ &psDevVAddrConfig, &hPriv);
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_ALLOC,
+ HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr),
+ HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr));
+
+ eError = _MMU_AllocLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+ auiStartArray, auiEndArray, auiEntriesPerPx,
+ apsConfig, aeMMULevel, &ui32CurrentLevel,
+ auiStartArray[0], auiEndArray[0],
+ IMG_TRUE, IMG_TRUE, uiLog2DataPageSize);
+
+ _MMU_PutLevelData(psMMUContext, hPriv);
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function _FreePageTables
+
+@Description Free page tables and any higher level MMU objects at are no
+ longer referenced for the specified virtual range.
+ This will fill the temporary free list of the MMU context which
+ needs cleanup after the call.
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddrStart Start device virtual address
+
+@Input sDevVAddrEnd End device virtual address
+
+@Input uiLog2DataPageSize Page size of the data pages
+
+@Return None
+*/
+/*****************************************************************************/
+static void _FreePageTables(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrStart,
+ IMG_DEV_VIRTADDR sDevVAddrEnd,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+ IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+ IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+ MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+ const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_UINT32 ui32CurrentLevel = 0;
+ IMG_HANDLE hPriv;
+
+
+ PVR_DPF((PVR_DBG_ALLOC,
+ "_FreePageTables: vaddr range: 0x%010llx:0x%010llx",
+ sDevVAddrStart.uiAddr,
+ sDevVAddrEnd.uiAddr
+ ));
+
+ _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+ uiLog2DataPageSize, auiStartArray, auiEndArray,
+ auiEntriesPerPx, apsConfig, aeMMULevel,
+ &psDevVAddrConfig, &hPriv);
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_FREE,
+ HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr),
+ HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr));
+
+ _MMU_FreeLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+ auiStartArray, auiEndArray, auiEntriesPerPx,
+ apsConfig, aeMMULevel, &ui32CurrentLevel,
+ auiStartArray[0], auiEndArray[0],
+ IMG_TRUE, IMG_TRUE, uiLog2DataPageSize);
+
+ _MMU_PutLevelData(psMMUContext, hPriv);
+}
+
+
+/*************************************************************************/ /*!
+@Function _MMU_GetPTInfo
+
+@Description Get the PT level information and PT entry index for the specified
+ virtual address
+
+@Input psMMUContext MMU context to operate on
+
+@Input psDevVAddr Device virtual address to get the PTE info
+ from.
+
+@Input psDevVAddrConfig The current virtual address config obtained
+ by another function call before.
+
+@Output psLevel Level info of the PT
+
+@Output pui32PTEIndex Index into the PT the address corresponds to
+
+@Return None
+*/
+/*****************************************************************************/
+static INLINE void _MMU_GetPTInfo(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+ MMU_Levelx_INFO **psLevel,
+ IMG_UINT32 *pui32PTEIndex)
+{
+ MMU_Levelx_INFO *psLocalLevel = NULL;
+
+ IMG_UINT32 uiPCEIndex;
+ IMG_UINT32 uiPDEIndex;
+
+ switch(psMMUContext->psDevAttrs->eTopLevel)
+ {
+ case MMU_LEVEL_3:
+ /* find the page directory containing the PCE */
+ uiPCEIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+ psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPCEIndex];
+
+ case MMU_LEVEL_2:
+ /* find the page table containing the PDE */
+ uiPDEIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+ if (psLocalLevel != NULL)
+ {
+ psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex];
+ }
+ else
+ {
+ psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPDEIndex];
+ }
+
+ case MMU_LEVEL_1:
+ /* find PTE index into page table */
+ *pui32PTEIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+ if (psLocalLevel == NULL)
+ {
+ psLocalLevel = &psMMUContext->sBaseLevelInfo;
+ }
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTEInfo: Invalid MMU level"));
+ return;
+ }
+
+ *psLevel = psLocalLevel;
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_GetPTConfig
+
+@Description Get the level config. Call _MMU_PutPTConfig after use!
+
+@Input psMMUContext MMU context to operate on
+
+@Input uiLog2DataPageSize Log 2 of the page size
+
+@Output ppsConfig Config of the PTE
+
+@Output phPriv Private data handle to be passed back
+ when the info is put
+
+@Output ppsDevVAddrConfig Config of the device virtual addresses
+
+@Return None
+*/
+/*****************************************************************************/
+static INLINE void _MMU_GetPTConfig(MMU_CONTEXT *psMMUContext,
+ IMG_UINT32 uiLog2DataPageSize,
+ const MMU_PxE_CONFIG **ppsConfig,
+ IMG_HANDLE *phPriv,
+ const MMU_DEVVADDR_CONFIG **ppsDevVAddrConfig)
+{
+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ const MMU_PxE_CONFIG *psPDEConfig;
+ const MMU_PxE_CONFIG *psPTEConfig;
+
+ if (psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+ &psPDEConfig,
+ &psPTEConfig,
+ &psDevVAddrConfig,
+ phPriv) != PVRSRV_OK)
+ {
+ /*
+ There should be no way we got here unless uiLog2DataPageSize
+ has changed after the MMU_Alloc call (in which case it's a bug in
+ the MM code)
+ */
+ PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not get valid page size config"));
+ PVR_ASSERT(0);
+ }
+
+ *ppsConfig = psPTEConfig;
+ *ppsDevVAddrConfig = psDevVAddrConfig;
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_PutPTConfig
+
+@Description Put the level info. Has to be called after _MMU_GetPTConfig to
+ ensure correct refcounting.
+
+@Input psMMUContext MMU context to operate on
+
+@Input phPriv Private data handle created by
+ _MMU_GetPTConfig.
+
+@Return None
+*/
+/*****************************************************************************/
+static INLINE void _MMU_PutPTConfig(MMU_CONTEXT *psMMUContext,
+ IMG_HANDLE hPriv)
+{
+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+ if( psDevAttrs->pfnPutPageSizeConfiguration(hPriv) != PVRSRV_OK )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not put page size config"));
+ PVR_ASSERT(0);
+ }
+
+}
+
+
+/*****************************************************************************
+ * Public interface functions *
+ *****************************************************************************/
+
+/*
+ MMU_ContextCreate
+*/
+PVRSRV_ERROR
+MMU_ContextCreate(PVRSRV_DEVICE_NODE *psDevNode,
+ MMU_CONTEXT **ppsMMUContext,
+ MMU_DEVICEATTRIBS *psDevAttrs)
+{
+ MMU_CONTEXT *psMMUContext;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ const MMU_PxE_CONFIG *psConfig;
+ MMU_PHYSMEM_CONTEXT *psCtx;
+ IMG_UINT32 ui32BaseObjects;
+ IMG_UINT32 ui32Size;
+ IMG_CHAR sBuf[40];
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ psConfig = psDevAttrs->psBaseConfig;
+ psDevVAddrConfig = psDevAttrs->psTopLevelDevVAddrConfig;
+
+ switch(psDevAttrs->eTopLevel)
+ {
+ case MMU_LEVEL_3:
+ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPC;
+ break;
+
+ case MMU_LEVEL_2:
+ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPD;
+ break;
+
+ case MMU_LEVEL_1:
+ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPT;
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Invalid MMU config"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ /* Allocate the MMU context with the Level 1 Px info's */
+ ui32Size = sizeof(MMU_CONTEXT) +
+ ((ui32BaseObjects - 1) * sizeof(MMU_Levelx_INFO *));
+
+ psMMUContext = OSAllocZMem(ui32Size);
+ if (psMMUContext == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: ERROR call to OSAllocMem failed"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+#if defined(PDUMP)
+ /* Clear the refcount */
+ psMMUContext->ui32PDumpContextIDRefCount = 0;
+#endif
+ /* Record Device specific attributes in the context for subsequent use */
+ psMMUContext->psDevAttrs = psDevAttrs;
+ psMMUContext->psDevNode = psDevNode;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ IMG_UINT32 ui32OSid, ui32OSidReg;
+ IMG_BOOL bOSidAxiProt;
+
+ RetrieveOSidsfromPidList(OSGetCurrentClientProcessIDKM(), &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+ MMU_SetOSids(psMMUContext, ui32OSid, ui32OSidReg, bOSidAxiProt);
+}
+#endif
+
+ /*
+ Allocate physmem context and set it up
+ */
+ psCtx = OSAllocZMem(sizeof(MMU_PHYSMEM_CONTEXT));
+ if (psCtx == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: ERROR call to OSAllocMem failed"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+ psMMUContext->psPhysMemCtx = psCtx;
+
+ psCtx->psDevNode = psDevNode;
+
+ OSSNPrintf(sBuf, sizeof(sBuf)-1, "pgtables %p", psCtx);
+ psCtx->uiPhysMemRANameAllocSize = OSStringLength(sBuf)+1;
+ psCtx->pszPhysMemRAName = OSAllocMem(psCtx->uiPhysMemRANameAllocSize);
+ if (psCtx->pszPhysMemRAName == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Out of memory"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e2;
+ }
+
+ OSStringCopy(psCtx->pszPhysMemRAName, sBuf);
+
+ psCtx->psPhysMemRA = RA_Create(psCtx->pszPhysMemRAName,
+ /* subsequent import */
+ psDevNode->uiMMUPxLog2AllocGran,
+ RA_LOCKCLASS_1,
+ _MMU_PhysMem_RAImportAlloc,
+ _MMU_PhysMem_RAImportFree,
+ psCtx, /* priv */
+ IMG_FALSE);
+ if (psCtx->psPhysMemRA == NULL)
+ {
+ OSFreeMem(psCtx->pszPhysMemRAName);
+ psCtx->pszPhysMemRAName = NULL;
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e3;
+ }
+
+ /* Setup cleanup meta data to check if a MMU context
+ * has been destroyed and should not be accessed anymore */
+ psCtx->psCleanupData = OSAllocMem(sizeof(*(psCtx->psCleanupData)));
+ if (psCtx->psCleanupData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: ERROR call to OSAllocMem failed"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e4;
+ }
+
+ OSLockCreate(&psCtx->psCleanupData->hCleanupLock, LOCK_TYPE_PASSIVE);
+ psCtx->psCleanupData->bMMUContextExists = IMG_TRUE;
+ dllist_init(&psCtx->psCleanupData->sMMUCtxCleanupItemsHead);
+ psCtx->psCleanupData->uiRef = 1;
+
+ /* allocate the base level object */
+ /*
+ Note: Although this is not required by the this file until
+ the 1st allocation is made, a device specific callback
+ might request the base object address so we allocate
+ it up front.
+ */
+ if (_PxMemAlloc(psMMUContext,
+ ui32BaseObjects,
+ psConfig,
+ psDevAttrs->eTopLevel,
+ &psMMUContext->sBaseLevelInfo.sMemDesc,
+ psDevAttrs->ui32BaseAlign))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Failed to alloc level 1 object"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e5;
+ }
+
+ dllist_init(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead);
+
+ psMMUContext->sBaseLevelInfo.ui32NumOfEntries = ui32BaseObjects;
+ psMMUContext->sBaseLevelInfo.ui32RefCount = 0;
+
+ eError = OSLockCreate(&psMMUContext->hLock, LOCK_TYPE_PASSIVE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Failed to create lock for MMU_CONTEXT"));
+ goto e6;
+ }
+
+ /* return context */
+ *ppsMMUContext = psMMUContext;
+
+ return PVRSRV_OK;
+
+e6:
+ _PxMemFree(psMMUContext, &psMMUContext->sBaseLevelInfo.sMemDesc, psDevAttrs->eTopLevel);
+e5:
+ OSFreeMem(psCtx->psCleanupData);
+e4:
+ RA_Delete(psCtx->psPhysMemRA);
+e3:
+ OSFreeMem(psCtx->pszPhysMemRAName);
+e2:
+ OSFreeMem(psCtx);
+e1:
+ OSFreeMem(psMMUContext);
+e0:
+ return eError;
+}
+
+/*
+ MMU_ContextDestroy
+*/
+void
+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PDLLIST_NODE psNode, psNextNode;
+
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) psMMUContext->psDevNode;
+ MMU_CTX_CLEANUP_DATA *psCleanupData = psMMUContext->psPhysMemCtx->psCleanupData;
+ IMG_UINT32 uiRef;
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_ContextDestroy: Enter"));
+
+ if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+ {
+ /* There should be no way to get here with live pages unless
+ there is a bug in this module or the MM code */
+ PVR_ASSERT(psMMUContext->sBaseLevelInfo.ui32RefCount == 0);
+ }
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ /* Free the top level MMU object - will be put on defer free list.
+ * This has to be done before the step below that will empty the
+ * defer-free list. */
+ _PxMemFree(psMMUContext,
+ &psMMUContext->sBaseLevelInfo.sMemDesc,
+ psMMUContext->psDevAttrs->eTopLevel);
+
+ /* Empty the temporary defer-free list of Px */
+ _FreeMMUMapping(psDevNode, &psMMUContext->psPhysMemCtx->sTmpMMUMappingHead);
+ PVR_ASSERT(dllist_is_empty(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead));
+
+ OSLockAcquire(psCleanupData->hCleanupLock);
+
+ /* Empty the defer free list so the cleanup thread will
+ * not have to access any MMU context related structures anymore */
+ dllist_foreach_node(&psCleanupData->sMMUCtxCleanupItemsHead,
+ psNode,
+ psNextNode)
+ {
+ MMU_CLEANUP_ITEM *psCleanup = IMG_CONTAINER_OF(psNode,
+ MMU_CLEANUP_ITEM,
+ sMMUCtxCleanupItem);
+
+ _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead);
+
+ dllist_remove_node(psNode);
+ }
+ PVR_ASSERT(dllist_is_empty(&psCleanupData->sMMUCtxCleanupItemsHead));
+
+ psCleanupData->bMMUContextExists = IMG_FALSE;
+ uiRef = --psCleanupData->uiRef;
+
+ OSLockRelease(psCleanupData->hCleanupLock);
+
+ if (uiRef == 0)
+ {
+ OSLockDestroy(psCleanupData->hCleanupLock);
+ OSFreeMem(psCleanupData);
+ }
+
+ /* Free physmem context */
+ RA_Delete(psMMUContext->psPhysMemCtx->psPhysMemRA);
+ psMMUContext->psPhysMemCtx->psPhysMemRA = NULL;
+ OSFreeMem(psMMUContext->psPhysMemCtx->pszPhysMemRAName);
+ psMMUContext->psPhysMemCtx->pszPhysMemRAName = NULL;
+
+ OSFreeMem(psMMUContext->psPhysMemCtx);
+
+ OSLockRelease(psMMUContext->hLock);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ RemovePidOSidCoupling(OSGetCurrentClientProcessIDKM());
+#endif
+
+ OSLockDestroy(psMMUContext->hLock);
+
+ /* free the context itself. */
+ OSFreeMem(psMMUContext);
+ /*not nulling pointer, copy on stack*/
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_ContextDestroy: Exit"));
+}
+
+/*
+ MMU_Alloc
+*/
+PVRSRV_ERROR
+MMU_Alloc (MMU_CONTEXT *psMMUContext,
+ IMG_DEVMEM_SIZE_T uSize,
+ IMG_DEVMEM_SIZE_T *puActualSize,
+ IMG_UINT32 uiProtFlags,
+ IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_UINT32 uiLog2PageSize)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+
+ const MMU_PxE_CONFIG *psPDEConfig;
+ const MMU_PxE_CONFIG *psPTEConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+ MMU_DEVICEATTRIBS *psDevAttrs;
+ IMG_HANDLE hPriv;
+
+#if !defined (DEBUG)
+ PVR_UNREFERENCED_PARAMETER(uDevVAddrAlignment);
+#endif
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Alloc: uSize=0x%010llx, uiProtFlags=0x%x, align=0x%010llx", uSize, uiProtFlags, uDevVAddrAlignment));
+
+ /* check params */
+ if (!psMMUContext || !psDevVAddr || !puActualSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: invalid params"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevAttrs = psMMUContext->psDevAttrs;
+
+ eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2PageSize,
+ &psPDEConfig,
+ &psPTEConfig,
+ &psDevVAddrConfig,
+ &hPriv);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Failed to get config info (%d)", eError));
+ return eError;
+ }
+
+ /* size and alignment must be datapage granular */
+ if(((psDevVAddr->uiAddr & psDevVAddrConfig->uiPageOffsetMask) != 0)
+ || ((uSize & psDevVAddrConfig->uiPageOffsetMask) != 0))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: invalid address or size granularity"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ sDevVAddrEnd = *psDevVAddr;
+ sDevVAddrEnd.uiAddr += uSize;
+
+ OSLockAcquire(psMMUContext->hLock);
+ eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize);
+ OSLockRelease(psMMUContext->hLock);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
+ return PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES;
+ }
+
+ psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+ return PVRSRV_OK;
+}
+
+/*
+ MMU_Free
+*/
+void
+MMU_Free (MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+ if (psMMUContext == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
+ return;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "MMU_Free: Freeing DevVAddr 0x%010llX",
+ sDevVAddr.uiAddr));
+
+ /* ensure the address range to free is inside the heap */
+ sDevVAddrEnd = sDevVAddr;
+ sDevVAddrEnd.uiAddr += uiSize;
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ _FreePageTables(psMMUContext,
+ sDevVAddr,
+ sDevVAddrEnd,
+ uiLog2DataPageSize);
+
+ _SetupCleanup_FreeMMUMapping(psMMUContext->psDevNode,
+ psMMUContext->psPhysMemCtx);
+
+ OSLockRelease(psMMUContext->hLock);
+
+ return;
+
+}
+
+PVRSRV_ERROR
+MMU_MapPages(MMU_CONTEXT *psMMUContext,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ PMR *psPMR,
+ IMG_UINT32 ui32PhysPgOffset,
+ IMG_UINT32 ui32MapPageCount,
+ IMG_UINT32 *paui32MapIndices,
+ IMG_UINT32 uiLog2PageSize)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hPriv;
+
+ MMU_Levelx_INFO *psLevel = NULL;
+
+ MMU_Levelx_INFO *psPrevLevel = NULL;
+
+ IMG_UINT32 uiPTEIndex = 0;
+ IMG_UINT32 uiPageSize = (1 << uiLog2PageSize);
+ IMG_UINT32 uiLoop = 0;
+ IMG_UINT32 ui32MappedCount = 0;
+ IMG_UINT32 uiPgOffset = 0;
+ IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0;
+
+ IMG_UINT64 uiProtFlags = 0;
+ MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+
+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEV_PHYADDR *psDevPAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_BOOL *pbValid;
+ IMG_BOOL bValid;
+ IMG_BOOL bDummyBacking = IMG_FALSE;
+ IMG_BOOL bNeedBacking = IMG_FALSE;
+
+#if defined(PDUMP)
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+
+ PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%lld bytes)",
+ (IMG_UINT64)(ui32MapPageCount * uiPageSize));
+#endif /*PDUMP*/
+
+
+ /* Validate the most essential parameters */
+ if((NULL == psMMUContext) || (0 == sDevVAddrBase.uiAddr) || (NULL == psPMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Invalid mapping parameter issued", __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ /* Allocate memory for page-frame-numbers and validity states,
+ N.B. assert could be triggered by an illegal uiSizeBytes */
+ if (ui32MapPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ psDevPAddr = OSAllocMem(ui32MapPageCount * sizeof(IMG_DEV_PHYADDR));
+ if (psDevPAddr == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ pbValid = OSAllocMem(ui32MapPageCount * sizeof(IMG_BOOL));
+ if (pbValid == NULL)
+ {
+ /* Should allocation fail, clean-up here before exit */
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN state"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ OSFreeMem(psDevPAddr);
+ goto e0;
+ }
+ }
+ else
+ {
+ psDevPAddr = asDevPAddr;
+ pbValid = abValid;
+ }
+
+ /* Get the Device physical addresses of the pages we are trying to map
+ * In the case of non indexed mapping we can get all addresses at once */
+ if(NULL == paui32MapIndices)
+ {
+ eError = PMR_DevPhysAddr(psPMR,
+ uiLog2PageSize,
+ ui32MapPageCount,
+ (ui32PhysPgOffset << uiLog2PageSize),
+ psDevPAddr,
+ pbValid);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+ }
+
+ /*Get the Page table level configuration */
+ _MMU_GetPTConfig(psMMUContext,
+ (IMG_UINT32) uiLog2PageSize,
+ &psConfig,
+ &hPriv,
+ &psDevVAddrConfig);
+
+ _MMU_ConvertDevMemFlags(IMG_FALSE,
+ uiMappingFlags,
+ &uiMMUProtFlags,
+ psMMUContext);
+ /* Callback to get device specific protection flags */
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e2;
+ }
+
+ if (PMR_IsSparse(psPMR))
+ {
+ /* We know there will not be 4G number of PMR's */
+ bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags);
+ }
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ for(uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++)
+ {
+
+#if defined(PDUMP)
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif /*PDUMP*/
+
+ if(NULL != paui32MapIndices)
+ {
+ uiPgOffset = paui32MapIndices[uiLoop];
+
+ /*Calculate the Device Virtual Address of the page */
+ sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + (uiPgOffset * uiPageSize);
+ /* Get the physical address to map */
+ eError = PMR_DevPhysAddr(psPMR,
+ uiLog2PageSize,
+ 1,
+ uiPgOffset * uiPageSize,
+ &sDevPAddr,
+ &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ goto e3;
+ }
+ }
+ else
+ {
+ uiPgOffset = uiLoop + ui32PhysPgOffset;
+ sDevPAddr = psDevPAddr[uiLoop];
+ bValid = pbValid[uiLoop];
+ }
+
+ /*
+ The default value of the entry is invalid so we don't need to mark
+ it as such if the page wasn't valid, we just advance pass that address
+ */
+ if (bValid || bDummyBacking)
+ {
+
+ if(!bValid)
+ {
+ sDevPAddr.uiAddr = psMMUContext->psDevNode->sDummyPage.ui64DummyPgPhysAddr;
+ }
+ else
+ {
+ /* check the physical alignment of the memory to map */
+ PVR_ASSERT((sDevPAddr.uiAddr & (uiPageSize-1)) == 0);
+ }
+
+#if defined(DEBUG)
+{
+ IMG_INT32 i32FeatureVal = 0;
+ IMG_UINT32 ui32BitLength = FloorLog2(sDevPAddr.uiAddr);
+
+ i32FeatureVal = psMMUContext->psDevNode->pfnGetDeviceFeatureValue(psMMUContext->psDevNode, \
+ RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK);
+ do {
+ /* i32FeatureVal can be negative for cases where this feature is undefined
+ * In that situation we need to bail out than go ahead with debug comparison */
+ if(0 > i32FeatureVal)
+ break;
+
+ if (ui32BitLength > i32FeatureVal )
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_MMU_MapPage Failed. The physical address bitlength (%d) "
+ "is greater than what the chip can handle (%d).",
+ ui32BitLength, i32FeatureVal));
+
+ PVR_ASSERT(ui32BitLength <= i32FeatureVal );
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e3;
+ }
+ }while(0);
+}
+#endif /*DEBUG*/
+
+#if defined(PDUMP)
+ if(bValid)
+ {
+ eError = PMR_PDumpSymbolicAddr(psPMR, uiPgOffset * uiPageSize,
+ sizeof(aszMemspaceName), &aszMemspaceName[0],
+ sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+ &uiSymbolicAddrOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+#endif /*PDUMP*/
+
+ psPrevLevel = psLevel;
+ /* Calculate PT index and get new table descriptor */
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+
+ if (psPrevLevel == psLevel)
+ {
+ uiFlushEnd = uiPTEIndex;
+ }
+ else
+ {
+ /* Flush if we moved to another psLevel, i.e. page table */
+ if (psPrevLevel != NULL)
+ {
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psPrevLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset,
+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ if (eError != PVRSRV_OK)
+ goto e3;
+ }
+
+ uiFlushStart = uiPTEIndex;
+ uiFlushEnd = uiFlushStart;
+ }
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_MAP,
+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr),
+ HTBLOG_U64_BITS_HIGH(sDevPAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevPAddr.uiAddr));
+
+ /* Set the PT entry with the specified address and protection flags */
+ eError = _SetupPTE(psMMUContext,
+ psLevel,
+ uiPTEIndex,
+ psConfig,
+ &sDevPAddr,
+ IMG_FALSE,
+#if defined(PDUMP)
+ (bValid)?aszMemspaceName:(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName),
+ (bValid)?aszSymbolicAddress:DUMMY_PAGE,
+ (bValid)?uiSymbolicAddrOffset:0,
+#endif /*PDUMP*/
+ uiProtFlags);
+
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Mapping failed", __func__));
+ goto e3;
+ }
+
+ if(bValid)
+ {
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "%s: devVAddr=%10llX, size=0x%x",
+ __func__,
+ sDevVAddr.uiAddr,
+ uiPgOffset * uiPageSize));
+
+ ui32MappedCount++;
+ }
+ }
+
+ sDevVAddr.uiAddr += uiPageSize;
+ }
+
+ /* Flush the last level we touched */
+ if (psLevel != NULL)
+ {
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ if (eError != PVRSRV_OK)
+ goto e3;
+ }
+
+ OSLockRelease(psMMUContext->hLock);
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ if (psDevPAddr != asDevPAddr)
+ {
+ OSFreeMem(pbValid);
+ OSFreeMem(psDevPAddr);
+ }
+
+ /* Flush TLB for PTs*/
+ psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+ psMMUContext->hDevData,
+ MMU_LEVEL_1,
+ IMG_FALSE);
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, ui32MapPageCount);
+#endif /*PDUMP*/
+
+ return PVRSRV_OK;
+
+e3:
+ OSLockRelease(psMMUContext->hLock);
+
+ if(PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags))
+ {
+ bNeedBacking = IMG_TRUE;
+ }
+
+ MMU_UnmapPages(psMMUContext,(bNeedBacking)?uiMappingFlags:0, sDevVAddrBase, uiLoop, paui32MapIndices, uiLog2PageSize, bNeedBacking);
+e2:
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+e1:
+ if (psDevPAddr != asDevPAddr)
+ {
+ OSFreeMem(pbValid);
+ OSFreeMem(psDevPAddr);
+ }
+e0:
+ return eError;
+}
+
+/*
+ MMU_UnmapPages
+*/
+void
+MMU_UnmapPages (MMU_CONTEXT *psMMUContext,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_BOOL bDummyBacking)
+{
+ IMG_UINT32 uiPTEIndex = 0, ui32Loop=0;
+ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+ IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0;
+ MMU_Levelx_INFO *psLevel = NULL;
+ MMU_Levelx_INFO *psPrevLevel = NULL;
+ IMG_HANDLE hPriv;
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_UINT64 uiProtFlags = 0;
+ MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+ IMG_DEV_PHYADDR sDummyPgDevPhysAddr;
+ IMG_BOOL bUnmap = IMG_TRUE;
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010llX to 0x%010llX",
+ ui32PageCount,
+ (IMG_UINT64)sDevVAddr.uiAddr,
+ ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1);
+#endif
+
+ sDummyPgDevPhysAddr.uiAddr = psMMUContext->psDevNode->sDummyPage.ui64DummyPgPhysAddr;
+ bUnmap = (bDummyBacking)?IMG_FALSE:IMG_TRUE;
+ /* Get PT and address configs */
+ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+ &psConfig, &hPriv, &psDevVAddrConfig);
+
+ _MMU_ConvertDevMemFlags(bUnmap,
+ uiMappingFlags,
+ &uiMMUProtFlags,
+ psMMUContext);
+
+ /* Callback to get device specific protection flags */
+ if (psConfig->uiBytesPerEntry == 4)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+ }
+ else if (psConfig->uiBytesPerEntry == 8)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+ }
+
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ /* Unmap page by page */
+ while (ui32Loop < ui32PageCount)
+ {
+ if(NULL != pai32FreeIndices)
+ {
+ /*Calculate the Device Virtual Address of the page */
+ sDevVAddr.uiAddr = sDevVAddrBase.uiAddr +
+ pai32FreeIndices[ui32Loop] * uiPageSize;
+ }
+
+ psPrevLevel = psLevel;
+ /* Calculate PT index and get new table descriptor */
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+
+ if (psPrevLevel == psLevel)
+ {
+ uiFlushEnd = uiPTEIndex;
+ }
+ else
+ {
+ /* Flush if we moved to another psLevel, i.e. page table */
+ if (psPrevLevel != NULL)
+ {
+ psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psPrevLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset,
+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ }
+
+ uiFlushStart = uiPTEIndex;
+ uiFlushEnd = uiFlushStart;
+ }
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP,
+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr));
+
+ /* Set the PT entry to invalid and poison it with a bad address */
+ if (_SetupPTE(psMMUContext,
+ psLevel,
+ uiPTEIndex,
+ psConfig,
+ (bDummyBacking)?&sDummyPgDevPhysAddr:&gsBadDevPhyAddr,
+ bUnmap,
+#if defined(PDUMP)
+ (bDummyBacking)?(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName):NULL,
+ (bDummyBacking)?DUMMY_PAGE:NULL,
+ 0U,
+#endif
+ uiProtFlags) != PVRSRV_OK )
+ {
+ goto e0;
+ }
+
+ /* Check we haven't wrapped around */
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ ui32Loop++;
+ sDevVAddr.uiAddr += uiPageSize;
+ }
+
+ /* Flush the last level we touched */
+ if (psLevel != NULL)
+ {
+ psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ }
+
+ OSLockRelease(psMMUContext->hLock);
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ /* Flush TLB for PTs*/
+ psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+ psMMUContext->hDevData,
+ MMU_LEVEL_1,
+ IMG_TRUE);
+
+ return;
+
+e0:
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table"));
+ PVR_ASSERT(0);
+ OSLockRelease(psMMUContext->hLock);
+ return;
+}
+
+PVRSRV_ERROR
+MMU_MapPMRFast (MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ const PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSizeBytes,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_UINT32 uiLog2PageSize)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 uiCount, i;
+ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+ IMG_UINT32 uiPTEIndex = 0;
+ IMG_UINT64 uiProtFlags;
+ MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+ MMU_Levelx_INFO *psLevel = NULL;
+ IMG_HANDLE hPriv;
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEV_PHYADDR *psDevPAddr;
+ IMG_BOOL *pbValid;
+ IMG_UINT32 uiFlushStart = 0;
+
+#if defined(PDUMP)
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+ IMG_UINT32 ui32MappedCount = 0;
+ PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%lld bytes)", uiSizeBytes);
+#endif /*PDUMP*/
+
+ /* We should verify the size and contiguity when supporting variable page size */
+
+ PVR_ASSERT (psMMUContext != NULL);
+ PVR_ASSERT (psPMR != NULL);
+
+
+ /* Allocate memory for page-frame-numbers and validity states,
+ N.B. assert could be triggered by an illegal uiSizeBytes */
+ uiCount = uiSizeBytes >> uiLog2PageSize;
+ PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiCount << uiLog2PageSize == uiSizeBytes);
+ if (uiCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ psDevPAddr = OSAllocMem(uiCount * sizeof(IMG_DEV_PHYADDR));
+ if (psDevPAddr == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ pbValid = OSAllocMem(uiCount * sizeof(IMG_BOOL));
+ if (pbValid == NULL)
+ {
+ /* Should allocation fail, clean-up here before exit */
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN state"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ OSFreeMem(psDevPAddr);
+ goto e0;
+ }
+ }
+ else
+ {
+ psDevPAddr = asDevPAddr;
+ pbValid = abValid;
+ }
+
+ /* Get general PT and address configs */
+ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+ &psConfig, &hPriv, &psDevVAddrConfig);
+
+ _MMU_ConvertDevMemFlags(IMG_FALSE,
+ uiMappingFlags,
+ &uiMMUProtFlags,
+ psMMUContext);
+
+ /* Callback to get device specific protection flags */
+
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+ eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+ goto e1;
+ }
+
+
+ /* "uiSize" is the amount of contiguity in the underlying
+ page. Normally this would be constant for the system, but,
+ that constant needs to be communicated, in case it's ever
+ different; caller guarantees that PMRLockSysPhysAddr() has
+ already been called */
+ eError = PMR_DevPhysAddr(psPMR,
+ uiLog2PageSize,
+ uiCount,
+ 0,
+ psDevPAddr,
+ pbValid);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+ uiFlushStart = uiPTEIndex;
+
+ /* Map in all pages of that PMR page by page*/
+ for (i=0, uiCount=0; uiCount < uiSizeBytes; i++)
+ {
+#if defined(DEBUG)
+{
+ IMG_INT32 i32FeatureVal = 0;
+ IMG_UINT32 ui32BitLength = FloorLog2(psDevPAddr[i].uiAddr);
+ i32FeatureVal = psMMUContext->psDevNode->pfnGetDeviceFeatureValue(psMMUContext->psDevNode, \
+ RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK);
+ do {
+ if(0 > i32FeatureVal)
+ break;
+
+ if (ui32BitLength > i32FeatureVal )
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_MMU_MapPage Failed. The physical address bitlength (%d) "
+ "is greater than what the chip can handle (%d).",
+ ui32BitLength, i32FeatureVal));
+
+ PVR_ASSERT(ui32BitLength <= i32FeatureVal );
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ OSLockRelease(psMMUContext->hLock);
+ goto e1;
+ }
+ }while(0);
+}
+#endif /*DEBUG*/
+#if defined(PDUMP)
+ {
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ eError = PMR_PDumpSymbolicAddr(psPMR, uiCount,
+ sizeof(aszMemspaceName), &aszMemspaceName[0],
+ sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+ &uiSymbolicAddrOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ ui32MappedCount++;
+ }
+#endif /*PDUMP*/
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_PMRMAP,
+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr),
+ HTBLOG_U64_BITS_HIGH(psDevPAddr[i].uiAddr), HTBLOG_U64_BITS_LOW(psDevPAddr[i].uiAddr));
+
+ /* Set the PT entry with the specified address and protection flags */
+ eError = _SetupPTE(psMMUContext, psLevel, uiPTEIndex,
+ psConfig, &psDevPAddr[i], IMG_FALSE,
+#if defined(PDUMP)
+ aszMemspaceName,
+ aszSymbolicAddress,
+ uiSymbolicAddrOffset,
+#endif /*PDUMP*/
+ uiProtFlags);
+ if (eError != PVRSRV_OK)
+ goto e2;
+
+ sDevVAddr.uiAddr += uiPageSize;
+ uiCount += uiPageSize;
+
+ /* Calculate PT index and get new table descriptor */
+ if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (uiCount != uiSizeBytes))
+ {
+ uiPTEIndex++;
+ }
+ else
+ {
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ if (eError != PVRSRV_OK)
+ goto e2;
+
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+ uiFlushStart = uiPTEIndex;
+ }
+ }
+
+ OSLockRelease(psMMUContext->hLock);
+
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ if (psDevPAddr != asDevPAddr)
+ {
+ OSFreeMem(pbValid);
+ OSFreeMem(psDevPAddr);
+ }
+
+ /* Flush TLB for PTs*/
+ psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+ psMMUContext->hDevData,
+ MMU_LEVEL_1,
+ IMG_FALSE);
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, i);
+#endif /*PDUMP*/
+
+ return PVRSRV_OK;
+
+e2:
+ OSLockRelease(psMMUContext->hLock);
+ MMU_UnmapPMRFast(psMMUContext,
+ sDevVAddrBase,
+ uiSizeBytes >> uiLog2PageSize,
+ uiLog2PageSize);
+e1:
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ if (psDevPAddr != asDevPAddr)
+ {
+ OSFreeMem(pbValid);
+ OSFreeMem(psDevPAddr);
+ }
+e0:
+ PVR_ASSERT(eError == PVRSRV_OK);
+ return eError;
+}
+
+/*
+ MMU_UnmapPages
+*/
+void
+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 uiLog2PageSize)
+{
+ IMG_UINT32 uiPTEIndex = 0, ui32Loop=0;
+ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+ MMU_Levelx_INFO *psLevel = NULL;
+ IMG_HANDLE hPriv;
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+ IMG_UINT64 uiProtFlags = 0;
+ MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+ IMG_UINT64 uiEntry = 0;
+ IMG_UINT32 uiFlushStart = 0;
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010llX to 0x%010llX",
+ ui32PageCount,
+ (IMG_UINT64)sDevVAddr.uiAddr,
+ ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1);
+#endif
+
+ /* Get PT and address configs */
+ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+ &psConfig, &hPriv, &psDevVAddrConfig);
+
+ _MMU_ConvertDevMemFlags(IMG_TRUE,
+ 0,
+ &uiMMUProtFlags,
+ psMMUContext);
+
+ /* Callback to get device specific protection flags */
+
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+
+ /* Fill the entry with a bad address but leave space for protection flags */
+ uiEntry = (gsBadDevPhyAddr.uiAddr & ~psConfig->uiProtMask) | uiProtFlags;
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+
+ /* Fill the entry with a bad address but leave space for protection flags */
+ uiEntry = (((IMG_UINT32) gsBadDevPhyAddr.uiAddr) & ~psConfig->uiProtMask) | (IMG_UINT32) uiProtFlags;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+ goto e0;
+ }
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+ uiFlushStart = uiPTEIndex;
+
+ /* Unmap page by page and keep the loop as quick as possible.
+ * Only use parts of _SetupPTE that need to be executed. */
+ while (ui32Loop < ui32PageCount)
+ {
+
+ /* Set the PT entry to invalid and poison it with a bad address */
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ ((IMG_UINT64*) psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = uiEntry;
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ ((IMG_UINT32*) psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = (IMG_UINT32) uiEntry;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+ goto e1;
+ }
+
+ /* Log modifications */
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP,
+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr));
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+ uiPTEIndex, MMU_LEVEL_1,
+ HTBLOG_U64_BITS_HIGH(uiEntry), HTBLOG_U64_BITS_LOW(uiEntry),
+ IMG_FALSE);
+
+#if defined (PDUMP)
+ PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psLevel->sMemDesc.pvCpuVAddr,
+ psLevel->sMemDesc.sDevPAddr,
+ uiPTEIndex,
+ 1,
+ NULL,
+ NULL,
+ 0,
+ psConfig->uiBytesPerEntry,
+ psConfig->uiAddrLog2Align,
+ psConfig->uiAddrShift,
+ psConfig->uiAddrMask,
+ psConfig->uiProtMask,
+ psConfig->uiValidEnMask,
+ 0,
+ psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+ sDevVAddr.uiAddr += uiPageSize;
+ ui32Loop++;
+
+ /* Calculate PT index and get new table descriptor */
+ if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (ui32Loop != ui32PageCount))
+ {
+ uiPTEIndex++;
+ }
+ else
+ {
+ psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+ uiFlushStart = uiPTEIndex;
+ }
+ }
+
+ OSLockRelease(psMMUContext->hLock);
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ /* Flush TLB for PTs*/
+ psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+ psMMUContext->hDevData,
+ MMU_LEVEL_1,
+ IMG_TRUE);
+
+ return;
+
+e1:
+ OSLockRelease(psMMUContext->hLock);
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+e0:
+ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table"));
+ PVR_ASSERT(0);
+ return;
+}
+
+/*
+ MMU_ChangeValidity
+*/
+PVRSRV_ERROR
+MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiNumPages,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_BOOL bMakeValid,
+ PMR *psPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ IMG_HANDLE hPriv;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ const MMU_PxE_CONFIG *psConfig;
+ MMU_Levelx_INFO *psLevel = NULL;
+ IMG_UINT32 uiFlushStart = 0;
+ IMG_UINT32 uiPTIndex = 0;
+ IMG_UINT32 i;
+ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+ IMG_BOOL bValid;
+
+#if defined(PDUMP)
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ PDUMPCOMMENT("Change valid bit of the data pages to %d (0x%llX - 0x%llX)",
+ bMakeValid,
+ sDevVAddr.uiAddr,
+ sDevVAddr.uiAddr + (uiNumPages<<uiLog2PageSize) - 1 );
+#endif /*PDUMP*/
+
+ /* We should verify the size and contiguity when supporting variable page size */
+ PVR_ASSERT (psMMUContext != NULL);
+ PVR_ASSERT (psPMR != NULL);
+
+ /* Get general PT and address configs */
+ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+ &psConfig, &hPriv, &psDevVAddrConfig);
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTIndex);
+ uiFlushStart = uiPTIndex;
+
+ /* Do a page table walk and change attribute for every page in range. */
+ for (i=0; i < uiNumPages; )
+ {
+
+ /* Set the entry */
+ if (bMakeValid == IMG_TRUE)
+ {
+ /* Only set valid if physical address exists (sparse allocs might have none)*/
+ eError = PMR_IsOffsetValid(psPMR, uiLog2PageSize, 1, i<<uiLog2PageSize, &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Cannot determine validity of page table entries page"));
+ goto e_exit;
+ }
+
+ if (bValid)
+ {
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ ((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ ((IMG_UINT32 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+ PVR_DPF((PVR_DBG_ERROR, "Cannot change page table entries due to wrong configuration"));
+ goto e_exit;
+ }
+ }
+ }
+ else
+ {
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ ((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ ((IMG_UINT32 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+ PVR_DPF((PVR_DBG_ERROR, "Cannot change page table entries due to wrong configuration"));
+ goto e_exit;
+ }
+ }
+
+#if defined(PDUMP)
+ PMR_PDumpSymbolicAddr(psPMR, i<<uiLog2PageSize,
+ sizeof(aszMemspaceName), &aszMemspaceName[0],
+ sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+ &uiSymbolicAddrOffset,
+ &uiNextSymName);
+
+ PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psLevel->sMemDesc.pvCpuVAddr,
+ psLevel->sMemDesc.sDevPAddr,
+ uiPTIndex,
+ 1,
+ aszMemspaceName,
+ aszSymbolicAddress,
+ uiSymbolicAddrOffset,
+ psConfig->uiBytesPerEntry,
+ psConfig->uiAddrLog2Align,
+ psConfig->uiAddrShift,
+ psConfig->uiAddrMask,
+ psConfig->uiProtMask,
+ psConfig->uiValidEnMask,
+ 0,
+ psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+ sDevVAddr.uiAddr += uiPageSize;
+ i++;
+
+ /* Calculate PT index and get new table descriptor */
+ if (uiPTIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (i != uiNumPages))
+ {
+ uiPTIndex++;
+ }
+ else
+ {
+
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiPTIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ if (eError != PVRSRV_OK)
+ goto e_exit;
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTIndex);
+ uiFlushStart = uiPTIndex;
+ }
+ }
+
+e_exit:
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ /* Flush TLB for PTs*/
+ psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+ psMMUContext->hDevData,
+ MMU_LEVEL_1,
+ !bMakeValid);
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ MMU_AcquireBaseAddr
+*/
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr)
+{
+ if (!psMMUContext)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ *psPhysAddr = psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr;
+ return PVRSRV_OK;
+}
+
+/*
+ MMU_ReleaseBaseAddr
+*/
+void
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext)
+{
+ PVR_UNREFERENCED_PARAMETER(psMMUContext);
+}
+
+/*
+ MMU_SetDeviceData
+*/
+void MMU_SetDeviceData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hDevData)
+{
+ psMMUContext->hDevData = hDevData;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+ MMU_SetOSid, MMU_GetOSid
+*/
+
+void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt)
+{
+ psMMUContext->ui32OSid = ui32OSid;
+ psMMUContext->ui32OSidReg = ui32OSidReg;
+ psMMUContext->bOSidAxiProt = bOSidAxiProt;
+
+ return ;
+}
+
+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt)
+{
+ *pui32OSid = psMMUContext->ui32OSid;
+ *pui32OSidReg = psMMUContext->ui32OSidReg;
+ *pbOSidAxiProt = psMMUContext->bOSidAxiProt;
+
+ return ;
+}
+
+#endif
+
+/*
+ MMU_CheckFaultAddress
+*/
+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_PxE_CONFIG *psMMUPDEConfig;
+ const MMU_PxE_CONFIG *psMMUPTEConfig;
+ const MMU_DEVVADDR_CONFIG *psMMUDevVAddrConfig;
+ IMG_HANDLE hPriv;
+ MMU_Levelx_INFO *psLevel = NULL;
+ PVRSRV_ERROR eError;
+ IMG_UINT64 uiIndex;
+ IMG_UINT32 ui32PCIndex;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 ui32Log2PageSize;
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ /*
+ At this point we don't know the page size so assume it's 4K.
+ When we get the PD level (MMU_LEVEL_2) we can check to see
+ if this assumption is correct.
+ */
+ eError = psDevAttrs->pfnGetPageSizeConfiguration(12,
+ &psMMUPDEConfig,
+ &psMMUPTEConfig,
+ &psMMUDevVAddrConfig,
+ &hPriv);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get the page size info for log2 page sizeof 12"));
+ }
+
+ psLevel = &psMMUContext->sBaseLevelInfo;
+ psConfig = psDevAttrs->psBaseConfig;
+
+ switch(psMMUContext->psDevAttrs->eTopLevel)
+ {
+ case MMU_LEVEL_3:
+ /* Determine the PC index */
+ uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexMask;
+ uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexShift;
+ ui32PCIndex = (IMG_UINT32) uiIndex;
+ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PCIndex));
+
+ if (ui32PCIndex >= psLevel->ui32NumOfEntries)
+ {
+ PVR_DUMPDEBUG_LOG("PC index (%d) out of bounds (%d)", ui32PCIndex, psLevel->ui32NumOfEntries);
+ break;
+ }
+
+ if (psConfig->uiBytesPerEntry == 4)
+ {
+ IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PCE for index %d = 0x%08x and %s be valid",
+ ui32PCIndex,
+ pui32Ptr[ui32PCIndex],
+ psLevel->apsNextLevel[ui32PCIndex]?"should":"should not");
+ }
+ else
+ {
+ IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PCE for index %d = 0x%016llx and %s be valid",
+ ui32PCIndex,
+ pui64Ptr[ui32PCIndex],
+ psLevel->apsNextLevel[ui32PCIndex]?"should":"should not");
+ }
+
+ psLevel = psLevel->apsNextLevel[ui32PCIndex];
+ if (!psLevel)
+ {
+ break;
+ }
+ psConfig = psMMUPDEConfig;
+ /* Fall through */
+
+ case MMU_LEVEL_2:
+ /* Determine the PD index */
+ uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexMask;
+ uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexShift;
+ ui32PDIndex = (IMG_UINT32) uiIndex;
+ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PDIndex));
+
+ if (ui32PDIndex >= psLevel->ui32NumOfEntries)
+ {
+ PVR_DUMPDEBUG_LOG("PD index (%d) out of bounds (%d)", ui32PDIndex, psLevel->ui32NumOfEntries);
+ break;
+ }
+
+ if (psConfig->uiBytesPerEntry == 4)
+ {
+ IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PDE for index %d = 0x%08x and %s be valid",
+ ui32PDIndex,
+ pui32Ptr[ui32PDIndex],
+ psLevel->apsNextLevel[ui32PDIndex]?"should":"should not");
+
+ if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get the page size from the PDE"));
+ }
+ }
+ else
+ {
+ IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PDE for index %d = 0x%016llx and %s be valid",
+ ui32PDIndex,
+ pui64Ptr[ui32PDIndex],
+ psLevel->apsNextLevel[ui32PDIndex]?"should":"should not");
+
+ if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get the page size from the PDE"));
+ }
+ }
+
+ /*
+ We assumed the page size was 4K, now we have the actual size
+ from the PDE we can confirm if our assumption was correct.
+ Until now it hasn't mattered as the PC and PD are the same
+ regardless of the page size
+ */
+ if (ui32Log2PageSize != 12)
+ {
+ /* Put the 4K page size data */
+ psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+ /* Get the correct size data */
+ eError = psDevAttrs->pfnGetPageSizeConfiguration(ui32Log2PageSize,
+ &psMMUPDEConfig,
+ &psMMUPTEConfig,
+ &psMMUDevVAddrConfig,
+ &hPriv);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get the page size info for log2 page sizeof %d", ui32Log2PageSize));
+ break;
+ }
+ }
+ psLevel = psLevel->apsNextLevel[ui32PDIndex];
+ if (!psLevel)
+ {
+ break;
+ }
+ psConfig = psMMUPTEConfig;
+ /* Fall through */
+
+ case MMU_LEVEL_1:
+ /* Determine the PT index */
+ uiIndex = psDevVAddr->uiAddr & psMMUDevVAddrConfig->uiPTIndexMask;
+ uiIndex = uiIndex >> psMMUDevVAddrConfig->uiPTIndexShift;
+ ui32PTIndex = (IMG_UINT32) uiIndex;
+ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PTIndex));
+
+ if (ui32PTIndex >= psLevel->ui32NumOfEntries)
+ {
+ PVR_DUMPDEBUG_LOG("PT index (%d) out of bounds (%d)", ui32PTIndex, psLevel->ui32NumOfEntries);
+ break;
+ }
+
+ if (psConfig->uiBytesPerEntry == 4)
+ {
+ IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PTE for index %d = 0x%08x",
+ ui32PTIndex,
+ pui32Ptr[ui32PTIndex]);
+ }
+ else
+ {
+ IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PTE for index %d = 0x%016llx",
+ ui32PTIndex,
+ pui64Ptr[ui32PTIndex]);
+ }
+
+ break;
+ default:
+ PVR_LOG(("Unsupported MMU setup"));
+ break;
+ }
+
+ OSLockRelease(psMMUContext->hLock);
+}
+
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_DEV_VIRTADDR sDevVAddr)
+{
+ MMU_Levelx_INFO *psLevel = NULL;
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_HANDLE hPriv;
+ IMG_UINT32 uiIndex = 0;
+ IMG_BOOL bStatus = IMG_FALSE;
+
+ _MMU_GetPTConfig(psMMUContext, uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig);
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ switch(psMMUContext->psDevAttrs->eTopLevel)
+ {
+ case MMU_LEVEL_3:
+ uiIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+ psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+ if (psLevel == NULL)
+ break;
+ /* fall through */
+ case MMU_LEVEL_2:
+ uiIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+ if (psLevel != NULL)
+ psLevel = psLevel->apsNextLevel[uiIndex];
+ else
+ psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+
+ if (psLevel == NULL)
+ break;
+ /* fall through */
+ case MMU_LEVEL_1:
+ uiIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+ if (psLevel == NULL)
+ psLevel = &psMMUContext->sBaseLevelInfo;
+
+ bStatus = ((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiIndex]
+ & psConfig->uiValidEnMask;
+ break;
+ default:
+ PVR_LOG(("MMU_IsVDevAddrValid: Unsupported MMU setup"));
+ break;
+ }
+
+ OSLockRelease(psMMUContext->hLock);
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ return bStatus;
+}
+
+#if defined(PDUMP)
+/*
+ MMU_ContextDerivePCPDumpSymAddr
+*/
+PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+ IMG_CHAR *pszPDumpSymbolicNameBuffer,
+ size_t uiPDumpSymbolicNameBufferSize)
+{
+ size_t uiCount;
+ IMG_UINT64 ui64PhysAddr;
+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+ if (!psMMUContext->sBaseLevelInfo.sMemDesc.bValid)
+ {
+ /* We don't have any allocations. You're not allowed to ask
+ for the page catalogue base address until you've made at
+ least one allocation */
+ return PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR;
+ }
+
+ ui64PhysAddr = (IMG_UINT64)psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr.uiAddr;
+
+ PVR_ASSERT(uiPDumpSymbolicNameBufferSize >= (IMG_UINT32)(21 + OSStringLength(psDevId->pszPDumpDevName)));
+
+ /* Page table Symbolic Name is formed from page table phys addr
+ prefixed with MMUPT_. */
+
+ uiCount = OSSNPrintf(pszPDumpSymbolicNameBuffer,
+ uiPDumpSymbolicNameBufferSize,
+ ":%s:%s%016llX",
+ psDevId->pszPDumpDevName,
+ psMMUContext->sBaseLevelInfo.sMemDesc.bValid?"MMUPC_":"XXX",
+ ui64PhysAddr);
+
+ if (uiCount + 1 > uiPDumpSymbolicNameBufferSize)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*
+ MMU_PDumpWritePageCatBase
+*/
+PVRSRV_ERROR
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+ const IMG_CHAR *pszSpaceName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32WordSize,
+ IMG_UINT32 ui32AlignShift,
+ IMG_UINT32 ui32Shift,
+ PDUMP_FLAGS_T uiPdumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszPageCatBaseSymbolicAddr[100];
+ const IMG_CHAR *pszPDumpDevName = psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName;
+
+ eError = MMU_ContextDerivePCPDumpSymAddr(psMMUContext,
+ &aszPageCatBaseSymbolicAddr[0],
+ sizeof(aszPageCatBaseSymbolicAddr));
+ if (eError == PVRSRV_OK)
+ {
+ eError = PDumpWriteSymbAddress(pszSpaceName,
+ uiOffset,
+ aszPageCatBaseSymbolicAddr,
+ 0, /* offset -- Could be non-zero for var. pgsz */
+ pszPDumpDevName,
+ ui32WordSize,
+ ui32AlignShift,
+ ui32Shift,
+ uiPdumpFlags | PDUMP_FLAGS_CONTINUOUS);
+ }
+
+ return eError;
+}
+
+/*
+ MMU_AcquirePDumpMMUContext
+*/
+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext,
+ IMG_UINT32 *pui32PDumpMMUContextID)
+{
+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+ if (!psMMUContext->ui32PDumpContextIDRefCount)
+ {
+ PDUMP_MMU_ALLOC_MMUCONTEXT(psDevId->pszPDumpDevName,
+ psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr,
+ psMMUContext->psDevAttrs->eMMUType,
+ &psMMUContext->uiPDumpContextID);
+ }
+
+ psMMUContext->ui32PDumpContextIDRefCount++;
+ *pui32PDumpMMUContextID = psMMUContext->uiPDumpContextID;
+
+ return PVRSRV_OK;
+}
+
+/*
+ MMU_ReleasePDumpMMUContext
+*/
+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext)
+{
+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+ PVR_ASSERT(psMMUContext->ui32PDumpContextIDRefCount != 0);
+ psMMUContext->ui32PDumpContextIDRefCount--;
+
+ if (psMMUContext->ui32PDumpContextIDRefCount == 0)
+ {
+ PDUMP_MMU_FREE_MMUCONTEXT(psDevId->pszPDumpDevName,
+ psMMUContext->uiPDumpContextID);
+ }
+
+ return PVRSRV_OK;
+}
+#endif
+
+/******************************************************************************
+ End of file (mmu_common.c)
+******************************************************************************/
+
+
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Common MMU Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements basic low level control of MMU.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef MMU_COMMON_H
+#define MMU_COMMON_H
+
+/*
+ The Memory Management Unit (MMU) performs device virtual to physical translation.
+
+ Terminology:
+ - page catalogue, PC (optional, 3 tier MMU)
+ - page directory, PD
+ - page table, PT (can be variable sized)
+ - data page, DP (can be variable sized)
+ Note: PD and PC are fixed size and can't be larger than
+ the native physical (CPU) page size
+ Shifts and AlignShift variables:
+ - 'xxxShift' represent the number of bits a bitfield is shifted left from bit0
+ - 'xxxAlignShift' is used to convert a bitfield (based at bit0) into byte units
+ by applying a bit shift left by 'xxxAlignShift' bits
+*/
+
+/*
+ Device Virtual Address Config:
+
+ Incoming Device Virtual Address is deconstructed into up to 4
+ fields, where the virtual address is up to 64bits:
+ MSB-----------------------------------------------LSB
+ | PC Index: | PD Index: | PT Index: | DP offset: |
+ | d bits | c bits | b-v bits | a+v bits |
+ -----------------------------------------------------
+ where v is the variable page table modifier, e.g.
+ v == 0 -> 4KB DP
+ v == 2 -> 16KB DP
+ v == 4 -> 64KB DP
+ v == 6 -> 256KB DP
+ v == 8 -> 1MB DP
+ v == 10 -> 4MB DP
+*/
+
+/* services/server/include/ */
+#include "pmr.h"
+
+/* include/ */
+#include "img_types.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_error.h"
+#include "servicesext.h"
+
+
+/*!
+ The level of the MMU
+*/
+typedef enum
+{
+ MMU_LEVEL_0 = 0, /* Level 0 = Page */
+
+ MMU_LEVEL_1,
+ MMU_LEVEL_2,
+ MMU_LEVEL_3,
+ MMU_LEVEL_LAST
+} MMU_LEVEL;
+
+/* moved after declaration of MMU_LEVEL, as pdump_mmu.h references it */
+#include "pdump_mmu.h"
+
+#define MMU_MAX_LEVEL 3
+
+struct _MMU_DEVVADDR_CONFIG_;
+
+/*!
+ MMU device attributes. This structure is the interface between the generic
+ MMU code and the device specific MMU code.
+*/
+typedef struct _MMU_DEVICEATTRIBS_
+{
+ PDUMP_MMU_TYPE eMMUType;
+
+ IMG_CHAR *pszMMUPxPDumpMemSpaceName;
+
+ /*! The type of the top level object */
+ MMU_LEVEL eTopLevel;
+
+ /*! Alignment requirement of the base object */
+ IMG_UINT32 ui32BaseAlign;
+
+ /*! HW config of the base object */
+ struct _MMU_PxE_CONFIG_ *psBaseConfig;
+
+ /*! Address split for the base object */
+ const struct _MMU_DEVVADDR_CONFIG_ *psTopLevelDevVAddrConfig;
+
+ /*! Callback for creating protection bits for the page catalogue entry with 8 byte entry */
+ IMG_UINT64 (*pfnDerivePCEProt8)(IMG_UINT32, IMG_UINT32);
+ /*! Callback for creating protection bits for the page catalogue entry with 4 byte entry */
+ IMG_UINT32 (*pfnDerivePCEProt4)(IMG_UINT32);
+ /*! Callback for creating protection bits for the page directory entry with 8 byte entry */
+ IMG_UINT64 (*pfnDerivePDEProt8)(IMG_UINT32, IMG_UINT32);
+ /*! Callback for creating protection bits for the page directory entry with 4 byte entry */
+ IMG_UINT32 (*pfnDerivePDEProt4)(IMG_UINT32);
+ /*! Callback for creating protection bits for the page table entry with 8 byte entry */
+ IMG_UINT64 (*pfnDerivePTEProt8)(IMG_UINT32, IMG_UINT32);
+ /*! Callback for creating protection bits for the page table entry with 4 byte entry */
+ IMG_UINT32 (*pfnDerivePTEProt4)(IMG_UINT32);
+
+ /*! Callback for getting the MMU configuration based on the specified page size */
+ PVRSRV_ERROR (*pfnGetPageSizeConfiguration)(IMG_UINT32 ui32DataPageSize,
+ const struct _MMU_PxE_CONFIG_ **ppsMMUPDEConfig,
+ const struct _MMU_PxE_CONFIG_ **ppsMMUPTEConfig,
+ const struct _MMU_DEVVADDR_CONFIG_ **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv2);
+ /*! Callback for putting the MMU configuration obtained from pfnGetPageSizeConfiguration */
+ PVRSRV_ERROR (*pfnPutPageSizeConfiguration)(IMG_HANDLE hPriv);
+
+ /*! Callback for getting the page size from the PDE for the page table entry with 4 byte entry */
+ PVRSRV_ERROR (*pfnGetPageSizeFromPDE4)(IMG_UINT32, IMG_UINT32 *);
+ /*! Callback for getting the page size from the PDE for the page table entry with 8 byte entry */
+ PVRSRV_ERROR (*pfnGetPageSizeFromPDE8)(IMG_UINT64, IMG_UINT32 *);
+
+ /*! Private data handle */
+ IMG_HANDLE hGetPageSizeFnPriv;
+} MMU_DEVICEATTRIBS;
+
+/*!
+ MMU virtual address split
+*/
+typedef struct _MMU_DEVVADDR_CONFIG_
+{
+ /*! Page catalogue index mask */
+ IMG_UINT64 uiPCIndexMask;
+ /*! Page catalogue index shift */
+ IMG_UINT8 uiPCIndexShift;
+ /*! Total number of PC entries */
+ IMG_UINT32 uiNumEntriesPC;
+ /*! Page directory mask */
+ IMG_UINT64 uiPDIndexMask;
+ /*! Page directory shift */
+ IMG_UINT8 uiPDIndexShift;
+ /*! Total number of PD entries */
+ IMG_UINT32 uiNumEntriesPD;
+ /*! Page table mask */
+ IMG_UINT64 uiPTIndexMask;
+ /*! Page index shift */
+ IMG_UINT8 uiPTIndexShift;
+ /*! Total number of PT entries */
+ IMG_UINT32 uiNumEntriesPT;
+ /*! Page offset mask */
+ IMG_UINT64 uiPageOffsetMask;
+ /*! Page offset shift */
+ IMG_UINT8 uiPageOffsetShift;
+ /*! First virtual address mappable for this config */
+ IMG_UINT64 uiOffsetInBytes;
+
+} MMU_DEVVADDR_CONFIG;
+
+/*
+ P(C/D/T) Entry Config:
+
+ MSB-----------------------------------------------LSB
+ | PT Addr: | variable PT ctrl | protection flags: |
+ | bits c+v | b bits | a bits |
+ -----------------------------------------------------
+ where v is the variable page table modifier and is optional
+*/
+/*!
+ Generic MMU entry description. This is used to describe PC, PD and PT entries.
+*/
+typedef struct _MMU_PxE_CONFIG_
+{
+ IMG_UINT8 uiBytesPerEntry; /*! Size of an entry in bytes */
+
+ IMG_UINT64 uiAddrMask; /*! Physical address mask */
+ IMG_UINT8 uiAddrShift; /*! Physical address shift */
+ IMG_UINT8 uiAddrLog2Align; /*! Physical address Log 2 alignment */
+
+ IMG_UINT64 uiVarCtrlMask; /*! Variable control mask */
+ IMG_UINT8 uiVarCtrlShift; /*! Variable control shift */
+
+ IMG_UINT64 uiProtMask; /*! Protection flags mask */
+ IMG_UINT8 uiProtShift; /*! Protection flags shift */
+
+ IMG_UINT64 uiValidEnMask; /*! Entry valid bit mask */
+ IMG_UINT8 uiValidEnShift; /*! Entry valid bit shift */
+} MMU_PxE_CONFIG;
+
+/* MMU Protection flags */
+
+
+/* These are specified generically and in a h/w independent way, and
+ are interpreted at each level (PC/PD/PT) separately. */
+
+/* The following flags are for internal use only, and should not
+ traverse the API */
+#define MMU_PROTFLAGS_INVALID 0x80000000U
+
+typedef IMG_UINT32 MMU_PROTFLAGS_T;
+
+/* The following flags should be supplied by the caller: */
+#define MMU_PROTFLAGS_READABLE (1U<<0)
+#define MMU_PROTFLAGS_WRITEABLE (1U<<1)
+#define MMU_PROTFLAGS_CACHE_COHERENT (1U<<2)
+#define MMU_PROTFLAGS_CACHED (1U<<3)
+
+/* Device specific flags*/
+#define MMU_PROTFLAGS_DEVICE_OFFSET 16
+#define MMU_PROTFLAGS_DEVICE_MASK 0x000f0000UL
+#define MMU_PROTFLAGS_DEVICE(n) \
+ (((n) << MMU_PROTFLAGS_DEVICE_OFFSET) & \
+ MMU_PROTFLAGS_DEVICE_MASK)
+
+
+typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
+
+struct _PVRSRV_DEVICE_NODE_;
+
+typedef struct _MMU_PAGESIZECONFIG_
+{
+ const MMU_PxE_CONFIG *psPDEConfig;
+ const MMU_PxE_CONFIG *psPTEConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_UINT32 uiRefCount;
+ IMG_UINT32 uiMaxRefCount;
+} MMU_PAGESIZECONFIG;
+
+/*************************************************************************/ /*!
+@Function MMU_ContextCreate
+
+@Description Create a new MMU context
+
+@Input psDevNode Device node of the device to create the
+ MMU context for
+
+@Output ppsMMUContext The created MMU context
+
+@Return PVRSRV_OK if the MMU context was successfully created
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_ContextCreate (struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ MMU_CONTEXT **ppsMMUContext,
+ MMU_DEVICEATTRIBS *psDevAttrs);
+
+
+/*************************************************************************/ /*!
+@Function MMU_ContextDestroy
+
+@Description Destroy a MMU context
+
+@Input ppsMMUContext MMU context to destroy
+
+@Return None
+*/
+/*****************************************************************************/
+extern void
+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext);
+
+/*************************************************************************/ /*!
+@Function MMU_Alloc
+
+@Description Allocate the page tables required for the specified virtual range
+
+@Input psMMUContext MMU context to operate on
+
+@Input uSize The size of the allocation
+
+@Output puActualSize Actual size of allocation
+
+@Input uiProtFlags Generic MMU protection flags
+
+@Input uDevVAddrAlignment Alignment requirement of the virtual
+ allocation
+
+@Input psDevVAddr Virtual address to start the allocation
+ from
+
+@Return PVRSRV_OK if the allocation of the page tables was successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_Alloc (MMU_CONTEXT *psMMUContext,
+ IMG_DEVMEM_SIZE_T uSize,
+ IMG_DEVMEM_SIZE_T *puActualSize,
+ IMG_UINT32 uiProtFlags,
+ IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_UINT32 uiLog2PageSize);
+
+
+/*************************************************************************/ /*!
+@Function MMU_Free
+
+@Description Free the page tables of the specified virtual range
+
+@Input psMMUContext MMU context to operate on
+
+@Input psDevVAddr Virtual address to start the free
+ from
+
+@Input uSize The size of the allocation
+
+@Return None
+*/
+/*****************************************************************************/
+extern void
+MMU_Free (MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiLog2DataPageSize);
+
+
+/*************************************************************************/ /*!
+@Function MMU_MapPages
+
+@Description Map pages to the MMU.
+ Two modes of operation: One requires a list of physical page
+ indices that are going to be mapped, the other just takes
+ the PMR and a possible offset to map parts of it.
+
+@Input psMMUContext MMU context to operate on
+
+@Input uiMappingFlags Memalloc flags for the mapping
+
+@Input sDevVAddrBase Device virtual address of the 1st page
+
+@Input psPMR PMR to map
+
+@Input ui32PhysPgOffset Physical offset into the PMR
+
+@Input ui32MapPageCount Number of pages to map
+
+@Input paui32MapIndices List of page indices to map,
+ can be NULL
+
+@Input uiLog2PageSize Log2 page size of the pages to map
+
+@Return PVRSRV_OK if the mapping was successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_MapPages(MMU_CONTEXT *psMMUContext,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ PMR *psPMR,
+ IMG_UINT32 ui32PhysPgOffset,
+ IMG_UINT32 ui32MapPageCount,
+ IMG_UINT32 *paui32MapIndices,
+ IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function MMU_UnmapPages
+
+@Description Unmap pages from the MMU.
+
+@Input psMMUContext MMU context to operate on
+
+@Input uiMappingFlags Memalloc flags for the mapping
+
+@Input psDevVAddr Device virtual address of the 1st page
+
+@Input ui32PageCount Number of pages to unmap
+
+@Input pai32UnmapIndicies Array of page indices to be unmapped
+
+@Input uiLog2PageSize log2 size of the page
+
+
+@Input bDummyBacking Bool that indicates if the unmapped
+ regions need to be backed by dummy
+ page
+
+@Return None
+*/
+/*****************************************************************************/
+extern void
+MMU_UnmapPages (MMU_CONTEXT *psMMUContext,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 *pai32UnmapIndicies,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_BOOL bDummyBacking);
+
+/*************************************************************************/ /*!
+@Function MMU_MapPMRFast
+
+@Description Map a PMR into the MMU. Must be not sparse.
+ This is supposed to cover most mappings and, as the name suggests,
+ should be as fast as possible.
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddr Device virtual address to map the PMR
+ into
+
+@Input psPMR PMR to map
+
+@Input uiSizeBytes Size in bytes to map
+
+@Input uiMappingFlags Memalloc flags for the mapping
+
+@Return PVRSRV_OK if the PMR was successfully mapped
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_MapPMRFast (MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ const PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSizeBytes,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function MMU_UnmapPMRFast
+
+@Description Unmap pages from the MMU as fast as possible.
+ PMR must be non sparse!
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddrBase Device virtual address of the 1st page
+
+@Input ui32PageCount Number of pages to unmap
+
+@Input uiLog2PageSize log2 size of the page
+
+@Return None
+*/
+/*****************************************************************************/
+extern void
+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function MMU_ChangeValidity
+
+@Description Sets or unsets the valid bit of page table entries for a given
+ address range.
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddr The device virtual base address of
+ the range we want to modify
+
+@Input uiSizeBytes The size of the range in bytes
+
+@Input uiLog2PageSize Log2 of the used page size
+
+@Input bMakeValid Choose to set or unset the valid bit.
+ (bMakeValid == IMG_TRUE ) -> SET
+ (bMakeValid == IMG_FALSE) -> UNSET
+
+@Input psPMR The PMR backing the allocation.
+ Needed in case we have sparse memory
+ where we have to check whether a physical
+ address actually backs the virtual.
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSizeBytes,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_BOOL bMakeValid,
+ PMR *psPMR);
+
+/*************************************************************************/ /*!
+@Function MMU_AcquireBaseAddr
+
+@Description Acquire the device physical address of the base level MMU object
+
+@Input psMMUContext MMU context to operate on
+
+@Output psPhysAddr Device physical address of the base level
+ MMU object
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr);
+
+/*************************************************************************/ /*!
+@Function MMU_ReleaseBaseAddr
+
+@Description Release the device physical address of the base level MMU object
+
+@Input psMMUContext MMU context to operate on
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+void
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/***********************************************************************************/ /*!
+@Function MMU_SetOSid
+
+@Description Set the OSid associated with the application (and the MMU Context)
+
+@Input psMMUContext MMU context to store the OSid on
+
+@Input ui32OSid the OSid in question
+
+@Input ui32OSidReg The value that the firmware will assign to the
+ registers.
+
+@Input bOSidAxiProt Toggles whether the AXI prot bit will be set or
+ not.
+@Return None
+*/
+/***********************************************************************************/
+
+void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt);
+
+/***********************************************************************************/ /*!
+@Function MMU_GetOSid
+
+@Description Retrieve the OSid associated with the MMU context.
+
+@Input psMMUContext MMU context in which the OSid is stored
+
+@Output pui32OSid The OSid in question
+
+@Output pui32OSidReg The OSid that the firmware will assign to the
+ registers.
+
+@Output pbOSidAxiProt Toggles whether the AXI prot bit will be set or
+ not.
+@Return None
+*/
+/***********************************************************************************/
+
+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 * pui32OSid, IMG_UINT32 * pui32OSidReg, IMG_BOOL *pbOSidAxiProt);
+#endif
+
+/*************************************************************************/ /*!
+@Function MMU_SetDeviceData
+
+@Description Set the device specific callback data
+
+@Input psMMUContext MMU context to store the data on
+
+@Input hDevData Device data
+
+@Return None
+*/
+/*****************************************************************************/
+void MMU_SetDeviceData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hDevData);
+
+/*************************************************************************/ /*!
+@Function MMU_CheckFaultAddress
+
+@Description Check the specified MMU context to see if the provided address
+ should be valid
+
+@Input psMMUContext MMU context to store the data on
+
+@Input psDevVAddr Address to check
+
+@Input pfnDumpDebugPrintf Debug print function
+
+@Input pvDumpDebugFile Optional file identifier to be passed
+ to the debug print function if required
+
+@Return None
+*/
+/*****************************************************************************/
+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/*************************************************************************/ /*!
+@Function MMUI_IsVDevAddrValid
+@Description Checks if given address is valid.
+@Input psMMUContext MMU context to store the data on
+@Input uiLog2PageSize page size
+@Input psDevVAddr Address to check
+@Return IMG_TRUE of address is valid
+*/ /**************************************************************************/
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_DEV_VIRTADDR sDevVAddr);
+
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function MMU_ContextDerivePCPDumpSymAddr
+
+@Description Derives a PDump Symbolic address for the top level MMU object
+
+@Input psMMUContext MMU context to operate on
+
+@Input pszPDumpSymbolicNameBuffer Buffer to write the PDump symbolic
+ address to
+
+@Input uiPDumpSymbolicNameBufferSize Size of the buffer
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+ IMG_CHAR *pszPDumpSymbolicNameBuffer,
+ size_t uiPDumpSymbolicNameBufferSize);
+
+/*************************************************************************/ /*!
+@Function MMU_PDumpWritePageCatBase
+
+@Description PDump write of the top level MMU object to a device register
+
+@Input psMMUContext MMU context to operate on
+
+@Input pszSpaceName PDump name of the mem/reg space
+
+@Input uiOffset Offset to write the address to
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+ const IMG_CHAR *pszSpaceName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32WordSize,
+ IMG_UINT32 ui32AlignShift,
+ IMG_UINT32 ui32Shift,
+ PDUMP_FLAGS_T uiPdumpFlags);
+
+/*************************************************************************/ /*!
+@Function MMU_AcquirePDumpMMUContext
+
+@Description Acquire a reference to the PDump MMU context for this MMU
+ context
+
+@Input psMMUContext MMU context to operate on
+
+@Input pszRegSpaceName PDump name of the register space
+
+@Output pui32PDumpMMUContextID PDump MMU context ID
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32PDumpMMUContextID);
+
+/*************************************************************************/ /*!
+@Function MMU_ReleasePDumpMMUContext
+
+@Description Release a reference to the PDump MMU context for this MMU context
+
+@Input psMMUContext MMU context to operate on
+
+@Input pszRegSpaceName PDump name of the register space
+
+@Output pui32PDumpMMUContextID PDump MMU context ID
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(MMU_PDumpWritePageCatBase)
+#endif
+static INLINE void
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+ const IMG_CHAR *pszSpaceName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32WordSize,
+ IMG_UINT32 ui32AlignShift,
+ IMG_UINT32 ui32Shift,
+ PDUMP_FLAGS_T uiPdumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMMUContext);
+ PVR_UNREFERENCED_PARAMETER(pszSpaceName);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32WordSize);
+ PVR_UNREFERENCED_PARAMETER(ui32AlignShift);
+ PVR_UNREFERENCED_PARAMETER(ui32Shift);
+ PVR_UNREFERENCED_PARAMETER(uiPdumpFlags);
+}
+#endif /* PDUMP */
+
+
+#endif /* #ifdef MMU_COMMON_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common linux module setup
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+
+#include "pvr_debugfs.h"
+#include "private_data.h"
+#include "linkage.h"
+#include "lists.h"
+#include "power.h"
+#include "env_connection.h"
+#include "process_stats.h"
+#include "module_common.h"
+#include "pvrsrv.h"
+#include "pvr_hwperf.h"
+#include "pvr_drv.h"
+#include <linux/moduleparam.h>
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+#include "km_apphint.h"
+#include "srvinit.h"
+#endif
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+#include "vmm_pvz_server.h"
+#if defined(PVRSRV_GPUVIRT_MULTIDRV_MODEL)
+#include "vmm_pvz_mdm.h"
+#endif
+#endif
+#endif
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+extern IMG_UINT32 gPVRDebugLevel;
+module_param(gPVRDebugLevel, uint, 0644);
+MODULE_PARM_DESC(gPVRDebugLevel,
+ "Sets the level of debug output (default 0x7)");
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+#if defined(DEBUG)
+extern IMG_UINT32 gPMRAllocFail;
+module_param(gPMRAllocFail, uint, 0644);
+MODULE_PARM_DESC(gPMRAllocFail, "When number of PMR allocs reaches"
+ " this value, it will fail (default value is 0 which"
+ "means that alloc function will behave normally).");
+#endif /* defined(DEBUG) */
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+extern unsigned int gui32RGXLoadTimeDevCount;
+
+extern char *gazRGXBVNCList[PVRSRV_MAX_DEVICES];
+module_param_array_named(RGXBVNC, gazRGXBVNCList, charp, &gui32RGXLoadTimeDevCount, S_IRUGO);
+MODULE_PARM_DESC(RGXBVNC, "Array of comma separated strings that define BVNC info of the devices. "
+ "module parameter usage is RGXBVNC=x.x.x.x,y.y.y.y etc");
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+/* Display class interface */
+#include "kerneldisplay.h"
+EXPORT_SYMBOL(DCRegisterDevice);
+EXPORT_SYMBOL(DCUnregisterDevice);
+EXPORT_SYMBOL(DCDisplayConfigurationRetired);
+EXPORT_SYMBOL(DCDisplayHasPendingCommand);
+EXPORT_SYMBOL(DCImportBufferAcquire);
+EXPORT_SYMBOL(DCImportBufferRelease);
+
+/* Physmem interface (required by LMA DC drivers) */
+#include "physheap.h"
+EXPORT_SYMBOL(PhysHeapAcquire);
+EXPORT_SYMBOL(PhysHeapRelease);
+EXPORT_SYMBOL(PhysHeapGetType);
+EXPORT_SYMBOL(PhysHeapRegionGetCpuPAddr);
+EXPORT_SYMBOL(PhysHeapRegionGetSize);
+EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr);
+
+EXPORT_SYMBOL(PVRSRVSystemInstallDeviceLISR);
+EXPORT_SYMBOL(PVRSRVSystemUninstallDeviceLISR);
+#endif
+
+/* Host para-virtz call handlers (required by guest drivers) */
+#if defined(PVRSRV_GPUVIRT_MULTIDRV_MODEL)
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+EXPORT_SYMBOL(PvzServerCreateDevConfig);
+EXPORT_SYMBOL(PvzServerDestroyDevConfig);
+EXPORT_SYMBOL(PvzServerCreateDevConfig2);
+EXPORT_SYMBOL(PvzServerDestroyDevConfig2);
+EXPORT_SYMBOL(PvzServerCreateDevPhysHeaps);
+EXPORT_SYMBOL(PvzServerDestroyDevPhysHeaps);
+EXPORT_SYMBOL(PvzServerMapDevPhysHeap);
+EXPORT_SYMBOL(PvzServerUnmapDevPhysHeap);
+EXPORT_SYMBOL(PvzServerCreateDevPhysHeaps2);
+EXPORT_SYMBOL(PvzServerDestroyDevPhysHeaps2);
+#endif
+#endif
+
+#if !(defined(PVRSRV_GPUVIRT_GUESTDRV) && defined(PVRSRV_GPUVIRT_MULTIDRV_MODEL))
+#include "pvr_notifier.h"
+
+/*
+ * Export some symbols that may be needed by other drivers
+ *
+ * When support for GPU virtualization is present and the multi-driver
+ * model (multiple drivers in same OS kernel) is being used, then only
+ * the host driver is a true device drivers (i.e. is registered with
+ * the kernel to manage the physical device), the other guest drivers
+ * are all modules.
+ */
+EXPORT_SYMBOL(PVRSRVCheckStatus);
+EXPORT_SYMBOL(PVRSRVGetDriverStatus);
+EXPORT_SYMBOL(PVRSRVGetErrorStringKM);
+
+#include "rgxapi_km.h"
+#if defined(SUPPORT_SHARED_SLC) && !defined(PVRSRV_GPUVIRT_GUESTDRV)
+/* Guest drivers do not perform device management so RGXInitSLC is absent */
+EXPORT_SYMBOL(RGXInitSLC);
+#endif
+
+EXPORT_SYMBOL(RGXHWPerfConnect);
+EXPORT_SYMBOL(RGXHWPerfDisconnect);
+EXPORT_SYMBOL(RGXHWPerfControl);
+EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCounters);
+EXPORT_SYMBOL(RGXHWPerfDisableCounters);
+EXPORT_SYMBOL(RGXHWPerfAcquireData);
+EXPORT_SYMBOL(RGXHWPerfReleaseData);
+#endif
+
+CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile)
+{
+ if (pFile)
+ {
+ struct drm_file *psDRMFile = pFile->private_data;
+
+ return psDRMFile->driver_priv;
+ }
+
+ return NULL;
+}
+
+struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection)
+{
+ ENV_CONNECTION_DATA *psEnvConnection;
+
+ psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
+ PVR_ASSERT(psEnvConnection != NULL);
+
+ return psEnvConnection->psFile;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDriverInit
+@Description Common one time driver initialisation
+@Return int 0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDriverInit(void)
+{
+ PVRSRV_ERROR pvrerr;
+ int error = 0;
+
+#if defined(PDUMP)
+ error = dbgdrv_init();
+ if (error != 0)
+ {
+ return error;
+ }
+#endif
+
+ error = PVRDebugFSInit();
+ if (error != 0)
+ {
+ return error;
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ if (PVRSRVStatsInitialise() != PVRSRV_OK)
+ {
+ return -ENOMEM;
+ }
+#endif
+
+ if (PVROSFuncInit() != PVRSRV_OK)
+ {
+ return -ENOMEM;
+ }
+
+ LinuxBridgeInit();
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ error = pvr_apphint_init();
+ if (error != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: failed AppHint setup(%d)",
+ __func__, error));
+ }
+#endif
+
+ pvrerr = PVRSRVDriverInit();
+ if (pvrerr != PVRSRV_OK)
+ {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDriverDeinit
+@Description Common one time driver de-initialisation
+@Return void
+*/ /***************************************************************************/
+void PVRSRVCommonDriverDeinit(void)
+{
+ PVRSRVDriverDeInit();
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ pvr_apphint_deinit();
+#endif
+
+ LinuxBridgeDeInit();
+
+ PVROSFuncDeInit();
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PVRSRVStatsDestroy();
+#endif
+ PVRDebugFSDeInit();
+
+#if defined(PDUMP)
+ dbgdrv_cleanup();
+#endif
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceInit
+@Description Common device related initialisation.
+@Input psDeviceNode The device node for which initialisation should be
+ performed
+@Return int 0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ int error = 0;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ {
+ PVRSRV_ERROR eError = pvr_sync_init(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create sync (%d)",
+ __func__, eError));
+ return -EBUSY;
+ }
+ }
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ psDeviceNode->psBufferSyncContext =
+ pvr_buffer_sync_context_create(psDeviceNode);
+ if (IS_ERR(psDeviceNode->psBufferSyncContext))
+ {
+ error = PTR_ERR(psDeviceNode->psBufferSyncContext);
+ psDeviceNode->psBufferSyncContext = NULL;
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: unable to initialise buffer_sync support (%d)",
+ __func__, error));
+ return error;
+ }
+#endif
+
+ error = PVRDebugCreateDebugFSEntries();
+ if (error != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: failed to create default debugfs entries (%d)",
+ __func__, error));
+ }
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ error = PVRGpuTraceInit(psDeviceNode);
+ if (error != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: failed to initialise PVR GPU Tracing (%d)",
+ __func__, error));
+ }
+#endif
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ /* register the AppHint device control before device initialisation
+ * so individual AppHints can be configured during the init phase
+ */
+ error = pvr_apphint_device_register(psDeviceNode);
+ if (error != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: failed to initialise device AppHints (%d)",
+ __func__, error));
+ }
+#else
+ error = PVRSRVHWperfCreateDebugFs();
+ if (error != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: failed to initialise HWPerf debugfs (%d)",
+ __func__, error));
+ }
+#endif
+
+ /*Initialize the device dependent bridges */
+
+ error = DeviceDepBridgeInit(psDevInfo->sDevFeatureCfg.ui64Features);
+ if (error != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Device dependent bridge initialization failed (%d)",
+ __func__, error));
+ }
+
+ return 0;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceDeinit
+@Description Common device related de-initialisation.
+@Input psDeviceNode The device node for which de-initialisation should
+ be performed
+@Return void
+*/ /***************************************************************************/
+void PVRSRVCommonDeviceDeinit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ int error = 0;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ pvr_apphint_device_unregister(psDeviceNode);
+#else
+ PVRSRVHWperfDestroyDebugFs();
+#endif
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ PVRGpuTraceDeInit(psDeviceNode);
+#endif
+
+ PVRDebugRemoveDebugFSEntries();
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_context_destroy(psDeviceNode->psBufferSyncContext);
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ pvr_sync_deinit();
+#endif
+
+ error = DeviceDepBridgeDeInit(psDevInfo->sDevFeatureCfg.ui64Features);
+ if (error != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Device dependent bridge deinitialization failed (%d)",
+ __func__, error));
+ }
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceShutdown
+@Description Common device shutdown.
+@Input psDeviceNode The device node representing the device that should
+ be shutdown
+@Return void
+*/ /***************************************************************************/
+
+void PVRSRVCommonDeviceShutdown(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /*
+ * Take the bridge mutex, and never release it, to stop processes trying to
+ * use the driver after it has been shutdown.
+ */
+ OSAcquireBridgeLock();
+
+ (void) PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+ PVRSRV_SYS_POWER_STATE_OFF);
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceSuspend
+@Description Common device suspend.
+@Input psDeviceNode The device node representing the device that should
+ be suspended
+@Return int 0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceSuspend(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /*
+ * OSSetDriverSuspended prevents processes from using the driver while it's
+ * suspended (this is needed for Android). Acquire the bridge lock first to
+ * ensure the driver isn't currently in use.
+ */
+ OSAcquireBridgeLock();
+ OSSetDriverSuspended();
+ OSReleaseBridgeLock();
+
+ if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+ PVRSRV_SYS_POWER_STATE_OFF) != PVRSRV_OK)
+ {
+ OSClearDriverSuspended();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceResume
+@Description Common device resume.
+@Input psDeviceNode The device node representing the device that should
+ be resumed
+@Return int 0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceResume(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+ PVRSRV_SYS_POWER_STATE_ON) != PVRSRV_OK)
+ {
+ return -EINVAL;
+ }
+
+ OSClearDriverSuspended();
+
+ /*
+ * Reprocess the device queues in case commands were blocked during
+ * suspend.
+ */
+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE)
+ {
+ PVRSRVCheckStatus(NULL);
+ }
+
+ return 0;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceOpen
+@Description Common device open.
+@Input psDeviceNode The device node representing the device being
+ opened by a user mode process
+@Input psDRMFile The DRM file data that backs the file handle
+ returned to the user mode process
+@Return int 0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceOpen(PVRSRV_DEVICE_NODE *psDeviceNode,
+ struct drm_file *psDRMFile)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ ENV_CONNECTION_PRIVATE_DATA sPrivData;
+ void *pvConnectionData;
+ PVRSRV_ERROR eError;
+ int iErr = 0;
+
+ OSAcquireBridgeLock();
+
+ if (!psPVRSRVData)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__));
+ iErr = -ENODEV;
+ goto e1;
+ }
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT)
+ {
+ eError = PVRSRVDeviceInitialise(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise device (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ iErr = -ENODEV;
+ goto e1;
+ }
+ }
+#endif
+
+ sPrivData.psDevNode = psDeviceNode;
+ sPrivData.psFile = psDRMFile->filp;
+
+ /*
+ * Here we pass the file pointer which will passed through to our
+ * OSConnectionPrivateDataInit function where we can save it so
+ * we can back reference the file structure from it's connection
+ */
+ eError = PVRSRVConnectionConnect(&pvConnectionData, (void *) &sPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ iErr = -ENOMEM;
+ goto e1;
+ }
+
+ psDRMFile->driver_priv = pvConnectionData;
+ OSReleaseBridgeLock();
+
+out:
+ return iErr;
+e1:
+ OSReleaseBridgeLock();
+ goto out;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceRelease
+@Description Common device release.
+@Input psDeviceNode The device node for the device that the given file
+ represents
+@Input psDRMFile The DRM file data that's being released
+@Return void
+*/ /***************************************************************************/
+void PVRSRVCommonDeviceRelease(PVRSRV_DEVICE_NODE *psDeviceNode,
+ struct drm_file *psDRMFile)
+{
+ void *pvConnectionData;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+ OSAcquireBridgeLock();
+
+ pvConnectionData = psDRMFile->driver_priv;
+ if (pvConnectionData)
+ {
+ PVRSRVConnectionDisconnect(pvConnectionData);
+ psDRMFile->driver_priv = NULL;
+ }
+
+ OSReleaseBridgeLock();
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File module_common.h
+@Title Common linux module setup header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _MODULE_COMMON_H_
+#define _MODULE_COMMON_H_
+
+/* DRVNAME is the name we use to register our driver. */
+#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME
+
+struct _PVRSRV_DEVICE_NODE_;
+struct drm_file;
+
+int PVRSRVCommonDriverInit(void);
+void PVRSRVCommonDriverDeinit(void);
+
+int PVRSRVCommonDeviceInit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+void PVRSRVCommonDeviceDeinit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+void PVRSRVCommonDeviceShutdown(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+int PVRSRVCommonDeviceSuspend(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+int PVRSRVCommonDeviceResume(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+int PVRSRVCommonDeviceOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ struct drm_file *psDRMFile);
+void PVRSRVCommonDeviceRelease(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ struct drm_file *psDRMFile);
+
+#endif /* _MODULE_COMMON_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services initialisation parameters header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services initialisation parameter support for the Linux kernel.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __OS_SRVINIT_PARAM_H__
+#define __OS_SRVINIT_PARAM_H__
+
+#include "km_apphint_defs.h"
+
+#define SrvInitParamOpen() 0
+#define SrvInitParamClose(pvState) ((void)(pvState))
+
+#define SrvInitParamGetBOOL(state, name, value) \
+ pvr_apphint_get_bool(APPHINT_ID_ ## name, &value)
+
+#define SrvInitParamGetUINT32(state, name, value) \
+ pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)
+
+#define SrvInitParamGetUINT64(state, name, value) \
+ pvr_apphint_get_uint64(APPHINT_ID_ ## name, &value)
+
+#define SrvInitParamGetSTRING(state, name, buffer, size) \
+ pvr_apphint_get_string(APPHINT_ID_ ## name, buffer, size)
+
+#define SrvInitParamGetUINT32BitField(state, name, value) \
+ pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)
+
+#define SrvInitParamGetUINT32List(state, name, value) \
+ pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)
+
+
+#endif /* __OS_SRVINIT_PARAM_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Linux specific per process data functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "connection_server.h"
+#include "osconnection_server.h"
+
+#include "env_connection.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+#include <linux/sched.h>
+
+#if defined (SUPPORT_ION)
+#include <linux/err.h>
+#include PVR_ANDROID_ION_HEADER
+
+/*
+ The ion device (the base object for all requests)
+ gets created by the system and we acquire it via
+ linux specific functions provided by the system layer
+*/
+#include "ion_sys.h"
+#endif
+
+#define SUPPORT_ROCKCHIP_ION (1)
+#if SUPPORT_ROCKCHIP_ION
+struct ion_client *rockchip_ion_client_create(const char *name);
+#endif
+
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData)
+{
+ ENV_CONNECTION_PRIVATE_DATA *psPrivData = pvOSData;
+ ENV_CONNECTION_DATA *psEnvConnection;
+#if defined(SUPPORT_ION)
+ ENV_ION_CONNECTION_DATA *psIonConnection;
+#endif
+
+ *phOsPrivateData = OSAllocZMem(sizeof(ENV_CONNECTION_DATA));
+
+ if (*phOsPrivateData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __FUNCTION__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psEnvConnection = (ENV_CONNECTION_DATA *)*phOsPrivateData;
+
+ psEnvConnection->owner = current->tgid;
+
+ /* Save the pointer to our struct file */
+ psEnvConnection->psFile = psPrivData->psFile;
+ psEnvConnection->psDevNode = psPrivData->psDevNode;
+
+#if defined(SUPPORT_ION)
+ psIonConnection = (ENV_ION_CONNECTION_DATA *)OSAllocZMem(sizeof(ENV_ION_CONNECTION_DATA));
+ if (psIonConnection == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __FUNCTION__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psEnvConnection->psIonData = psIonConnection;
+ /*
+ We can have more then one connection per process so we need more then
+ the PID to have a unique name
+ */
+ psEnvConnection->psIonData->psIonDev = IonDevAcquire();
+ OSSNPrintf(psEnvConnection->psIonData->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%p-%d", *phOsPrivateData, OSGetCurrentClientProcessIDKM());
+#if SUPPORT_ROCKCHIP_ION
+ psEnvConnection->psIonData->psIonClient =
+ rockchip_ion_client_create(psEnvConnection->psIonData->azIonClientName);
+#else
+ psEnvConnection->psIonData->psIonClient =
+ ion_client_create(psEnvConnection->psIonData->psIonDev,
+ psEnvConnection->psIonData->azIonClientName);
+#endif
+ if (IS_ERR_OR_NULL(psEnvConnection->psIonData->psIonClient))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSConnectionPrivateDataInit: Couldn't create "
+ "ion client for per connection data"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ psEnvConnection->psIonData->ui32IonClientRefCount = 1;
+#endif /* SUPPORT_ION */
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+ ENV_CONNECTION_DATA *psEnvConnection;
+
+ if (hOsPrivateData == NULL)
+ {
+ return PVRSRV_OK;
+ }
+
+ psEnvConnection = hOsPrivateData;
+
+#if defined(SUPPORT_ION)
+ EnvDataIonClientRelease(psEnvConnection->psIonData);
+#endif
+
+ OSFreeMem(hOsPrivateData);
+ /*not nulling pointer, copy on stack*/
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_DEVICE_NODE *OSGetDevData(CONNECTION_DATA *psConnection)
+{
+ ENV_CONNECTION_DATA *psEnvConnection;
+
+ psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
+ PVR_ASSERT(psEnvConnection);
+
+ return psEnvConnection->psDevNode;
+}
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Server side connection management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description API for OS specific callbacks from server side connection
+ management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#ifndef _OSCONNECTION_SERVER_H_
+#define _OSCONNECTION_SERVER_H_
+
+#include "handle.h"
+
+
+#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData);
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData);
+
+PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
+
+PVRSRV_DEVICE_NODE* OSGetDevData(CONNECTION_DATA *psConnection);
+
+#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataInit)
+#endif
+/*************************************************************************/ /*!
+@Function OSConnectionPrivateDataInit
+@Description Allocates and initialises any OS-specific private data
+ relating to a connection.
+ Called from PVRSRVConnectionConnect().
+@Input pvOSData pointer to any OS private data
+@Output phOsPrivateData handle to the created connection
+ private data
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData)
+{
+ PVR_UNREFERENCED_PARAMETER(phOsPrivateData);
+ PVR_UNREFERENCED_PARAMETER(pvOSData);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataDeInit)
+#endif
+/*************************************************************************/ /*!
+@Function OSConnectionPrivateDataDeInit
+@Description Frees previously allocated OS-specific private data
+ relating to a connection.
+@Input hOsPrivateData handle to the connection private data
+ to be freed
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+ PVR_UNREFERENCED_PARAMETER(hOsPrivateData);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSPerProcessSetHandleOptions)
+#endif
+static INLINE PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psHandleBase);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSGetDevData)
+#endif
+static INLINE PVRSRV_DEVICE_NODE* OSGetDevData(CONNECTION_DATA *psConnection)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return NULL;
+}
+#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+
+
+#endif /* _OSCONNECTION_SERVER_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Environment related functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/div64.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/hugetlb.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <asm/hardirq.h>
+#include <asm/tlbflush.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <asm/uaccess.h>
+#include <linux/spinlock.h>
+#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || \
+ defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) || \
+ defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || \
+ defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) || \
+ defined(PVR_LINUX_USING_WORKQUEUES)
+#include <linux/workqueue.h>
+#endif
+#include <linux/kthread.h>
+#include <asm/atomic.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+#include <linux/pfn_t.h>
+#include <linux/pfn.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+
+#include "log2.h"
+#include "osfunc.h"
+#include "img_types.h"
+#include "allocmem.h"
+#include "devicemem_server_utils.h"
+#include "pvr_debugfs.h"
+#include "event.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "pvr_debug.h"
+#include "pvrsrv_memallocflags.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#include "physmem_osmem_linux.h"
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+#include "dma_support.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+#if defined(VIRTUAL_PLATFORM)
+#define EVENT_OBJECT_TIMEOUT_US (120000000ULL)
+#else
+#if defined(EMULATOR)
+#define EVENT_OBJECT_TIMEOUT_US (2000000ULL)
+#else
+#define EVENT_OBJECT_TIMEOUT_US (100000ULL)
+#endif /* EMULATOR */
+#endif
+
+/*
+ * Main driver lock, used to ensure driver code is single threaded. There are
+ * some places where this lock must not be taken, such as in the mmap related
+ * driver entry points.
+ */
+static DEFINE_MUTEX(gPVRSRVLock);
+
+static void *g_pvBridgeBuffers = NULL;
+static atomic_t g_DriverSuspended;
+
+struct task_struct *BridgeLockGetOwner(void);
+IMG_BOOL BridgeLockIsLocked(void);
+
+
+PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr)
+{
+ IMG_CPU_PHYADDR sCpuPAddr;
+ struct page *psPage;
+ IMG_UINT32 ui32Order=0;
+
+ PVR_ASSERT(uiSize != 0);
+ /*Align the size to the page granularity */
+ uiSize = PAGE_ALIGN(uiSize);
+
+ /*Get the order to be used with the allocation */
+ ui32Order = get_order(uiSize);
+
+ /*allocate the pages */
+ psPage = alloc_pages(GFP_KERNEL, ui32Order);
+ if (psPage == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ uiSize = (1 << ui32Order) * PAGE_SIZE;
+
+ psMemHandle->u.pvHandle = psPage;
+ psMemHandle->ui32Order = ui32Order;
+ sCpuPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(psPage));
+
+ /*
+ * Even when more pages are allocated as base MMU object we still need one single physical address because
+ * they are physically contiguous.
+ */
+ PhysHeapCpuPAddrToDevPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL], 1, psDevPAddr, &sCpuPAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+ uiSize,
+ (IMG_UINT64)(uintptr_t) psPage);
+#else
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+ psPage,
+ sCpuPAddr,
+ uiSize,
+ NULL);
+#endif
+#endif
+
+ return PVRSRV_OK;
+}
+
+void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle)
+{
+ struct page *psPage = (struct page*) psMemHandle->u.pvHandle;
+ IMG_UINT32 uiSize, uiPageCount=0;
+
+ uiPageCount = (1 << psMemHandle->ui32Order);
+ uiSize = (uiPageCount * PAGE_SIZE);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+ (IMG_UINT64)(uintptr_t) psPage);
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT64)(uintptr_t) psPage);
+#endif
+#endif
+
+ __free_pages(psPage, psMemHandle->ui32Order);
+ psMemHandle->ui32Order = 0;
+}
+
+PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+ void **pvPtr)
+{
+ size_t actualSize = 1 << (PAGE_SHIFT + psMemHandle->ui32Order);
+ *pvPtr = kmap((struct page*)psMemHandle->u.pvHandle);
+
+ PVR_UNREFERENCED_PARAMETER(psDevPAddr);
+
+ PVR_UNREFERENCED_PARAMETER(actualSize); /* If we don't take an #ifdef path */
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, actualSize);
+#else
+ {
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+ *pvPtr,
+ sCpuPAddr,
+ actualSize,
+ NULL);
+ }
+#endif
+#endif
+
+ return PVRSRV_OK;
+}
+
+void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ /* Mapping is done a page at a time */
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, (1 << (PAGE_SHIFT + psMemHandle->ui32Order)));
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, (IMG_UINT64)(uintptr_t)pvPtr);
+#endif
+#endif
+
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(pvPtr);
+
+ kunmap((struct page*) psMemHandle->u.pvHandle);
+}
+
+PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+ PG_HANDLE *psMemHandle,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 uiLength)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ struct page* psPage = (struct page*) psMemHandle->u.pvHandle;
+
+ void* pvVirtAddrStart = kmap(psPage) + uiOffset;
+ IMG_CPU_PHYADDR sPhysStart, sPhysEnd;
+
+ if (uiLength == 0)
+ {
+ goto e0;
+ }
+
+ if ((uiOffset + uiLength) > ((1 << psMemHandle->ui32Order) * PAGE_SIZE))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid size params, uiOffset %u, uiLength %u",
+ __FUNCTION__,
+ uiOffset,
+ uiLength));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ sPhysStart.uiAddr = page_to_phys(psPage) + uiOffset;
+ sPhysEnd.uiAddr = sPhysStart.uiAddr + uiLength;
+
+ OSCleanCPUCacheRangeKM(psDevNode,
+ pvVirtAddrStart,
+ pvVirtAddrStart + uiLength,
+ sPhysStart,
+ sPhysEnd);
+
+e0:
+ kunmap(psPage);
+
+ return eError;
+}
+
+#if defined(__GNUC__)
+#define PVRSRV_MEM_ALIGN __attribute__ ((aligned (0x8)))
+#define PVRSRV_MEM_ALIGN_MASK (0x7)
+#else
+#error "PVRSRV Alignment macros need to be defined for this compiler"
+#endif
+
+IMG_UINT32 OSCPUCacheAttributeSize(IMG_DCACHE_ATTRIBUTE eCacheAttribute)
+{
+ IMG_UINT32 uiSize = 0;
+
+ switch(eCacheAttribute)
+ {
+ case PVR_DCACHE_LINE_SIZE:
+ uiSize = cache_line_size();
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache attribute type %d",
+ __FUNCTION__, (IMG_UINT32)eCacheAttribute));
+ PVR_ASSERT(0);
+ break;
+ }
+
+ return uiSize;
+}
+
+IMG_UINT32 OSVSScanf(IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...)
+{
+ va_list argList;
+ IMG_INT32 iCount = 0;
+
+ va_start(argList, pszFormat);
+ iCount = vsscanf(pStr, pszFormat, argList);
+ va_end(argList);
+
+ return iCount;
+}
+
+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen)
+{
+ return (IMG_INT) memcmp(pvBufA, pvBufB, uiLen);
+}
+
+IMG_CHAR *OSStringNCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize)
+{
+ return strncpy(pszDest, pszSrc, uSize);
+}
+
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...)
+{
+ va_list argList;
+ IMG_INT32 iCount;
+
+ va_start(argList, pszFormat);
+ iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
+ va_end(argList);
+
+ return iCount;
+}
+
+size_t OSStringLength(const IMG_CHAR *pStr)
+{
+ return strlen(pStr);
+}
+
+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount)
+{
+ return strnlen(pStr, uiCount);
+}
+
+IMG_INT32 OSStringCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2)
+{
+ return strcmp(pStr1, pStr2);
+}
+
+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2,
+ size_t uiSize)
+{
+ return strncmp(pStr1, pStr2, uiSize);
+}
+
+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base,
+ IMG_UINT32 *ui32Result)
+{
+ if (kstrtou32(pStr, ui32Base, ui32Result) != 0)
+ return PVRSRV_ERROR_CONVERSION_FAILED;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSInitEnvData(void)
+{
+ /* allocate memory for the bridge buffers to be used during an ioctl */
+ g_pvBridgeBuffers = OSAllocMem(PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE);
+ if (g_pvBridgeBuffers == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ atomic_set(&g_DriverSuspended, 0);
+
+ LinuxInitPhysmem();
+
+ return PVRSRV_OK;
+}
+
+
+void OSDeInitEnvData(void)
+{
+
+ LinuxDeinitPhysmem();
+
+ if (g_pvBridgeBuffers)
+ {
+ /* free-up the memory allocated for bridge buffers */
+ OSFreeMem(g_pvBridgeBuffers);
+ g_pvBridgeBuffers = NULL;
+ }
+}
+
+PVRSRV_ERROR OSGetGlobalBridgeBuffers(void **ppvBridgeInBuffer,
+ void **ppvBridgeOutBuffer)
+{
+ PVR_ASSERT (ppvBridgeInBuffer && ppvBridgeOutBuffer);
+
+ *ppvBridgeInBuffer = g_pvBridgeBuffers;
+ *ppvBridgeOutBuffer = *ppvBridgeInBuffer + PVRSRV_MAX_BRIDGE_IN_SIZE;
+
+ return PVRSRV_OK;
+}
+
+IMG_BOOL OSSetDriverSuspended(void)
+{
+ int suspend_level = atomic_inc_return(&g_DriverSuspended);
+ return (1 != suspend_level)? IMG_FALSE: IMG_TRUE;
+}
+
+IMG_BOOL OSClearDriverSuspended(void)
+{
+ int suspend_level = atomic_dec_return(&g_DriverSuspended);
+ return (0 != suspend_level)? IMG_FALSE: IMG_TRUE;
+}
+
+IMG_BOOL OSGetDriverSuspended(void)
+{
+ return (0 < atomic_read(&g_DriverSuspended))? IMG_TRUE: IMG_FALSE;
+}
+
+void OSReleaseThreadQuanta(void)
+{
+ schedule();
+}
+
+/* Not matching/aligning this API to the Clockus() API above to avoid necessary
+ * multiplication/division operations in calling code.
+ */
+static inline IMG_UINT64 Clockns64(void)
+{
+ IMG_UINT64 timenow;
+
+ /* Kernel thread preempt protection. Some architecture implementations
+ * (ARM) of sched_clock are not preempt safe when the kernel is configured
+ * as such e.g. CONFIG_PREEMPT and others.
+ */
+ preempt_disable();
+
+ /* Using sched_clock instead of ktime_get since we need a time stamp that
+ * correlates with that shown in kernel logs and trace data not one that
+ * is a bit behind. */
+ timenow = sched_clock();
+
+ preempt_enable();
+
+ return timenow;
+}
+
+IMG_UINT64 OSClockns64(void)
+{
+ return Clockns64();
+}
+
+IMG_UINT64 OSClockus64(void)
+{
+ IMG_UINT64 timenow = Clockns64();
+ IMG_UINT32 remainder;
+
+ return OSDivide64r64(timenow, 1000, &remainder);
+}
+
+IMG_UINT32 OSClockus(void)
+{
+ return (IMG_UINT32) OSClockus64();
+}
+
+IMG_UINT32 OSClockms(void)
+{
+ IMG_UINT64 timenow = Clockns64();
+ IMG_UINT32 remainder;
+
+ return OSDivide64(timenow, 1000000, &remainder);
+}
+
+static inline IMG_UINT64 KClockns64(void)
+{
+ ktime_t sTime = ktime_get();
+
+ return sTime.tv64;
+}
+
+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time)
+{
+ *pui64Time = KClockns64();
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time)
+{
+ IMG_UINT64 timenow = KClockns64();
+ IMG_UINT32 remainder;
+
+ *pui64Time = OSDivide64r64(timenow, 1000, &remainder);
+ return PVRSRV_OK;
+}
+
+IMG_UINT64 OSClockMonotonicRawns64(void)
+{
+ struct timespec ts;
+
+ getrawmonotonic(&ts);
+ return (IMG_UINT64) ts.tv_sec * 1000000000 + ts.tv_nsec;
+}
+
+IMG_UINT64 OSClockMonotonicRawus64(void)
+{
+ IMG_UINT32 rem;
+ return OSDivide64r64(OSClockMonotonicRawns64(), 1000, &rem);
+}
+
+/*
+ OSWaitus
+*/
+void OSWaitus(IMG_UINT32 ui32Timeus)
+{
+ udelay(ui32Timeus);
+}
+
+
+/*
+ OSSleepms
+*/
+void OSSleepms(IMG_UINT32 ui32Timems)
+{
+ msleep(ui32Timems);
+}
+
+
+INLINE IMG_UINT64 OSGetCurrentProcessVASpaceSize(void)
+{
+ return (IMG_UINT64)TASK_SIZE;
+}
+
+INLINE IMG_PID OSGetCurrentProcessID(void)
+{
+ if (in_interrupt())
+ {
+ return KERNEL_ID;
+ }
+
+ return (IMG_PID)task_tgid_nr(current);
+}
+
+INLINE IMG_CHAR *OSGetCurrentProcessName(void)
+{
+ return current->comm;
+}
+
+INLINE uintptr_t OSGetCurrentThreadID(void)
+{
+ if (in_interrupt())
+ {
+ return KERNEL_ID;
+ }
+
+ return current->pid;
+}
+
+IMG_PID OSGetCurrentClientProcessIDKM(void)
+{
+ return OSGetCurrentProcessID();
+}
+
+IMG_CHAR *OSGetCurrentClientProcessNameKM(void)
+{
+ return OSGetCurrentProcessName();
+}
+
+uintptr_t OSGetCurrentClientThreadIDKM(void)
+{
+ return OSGetCurrentThreadID();
+}
+
+size_t OSGetPageSize(void)
+{
+ return PAGE_SIZE;
+}
+
+size_t OSGetPageShift(void)
+{
+ return PAGE_SHIFT;
+}
+
+size_t OSGetPageMask(void)
+{
+ return (OSGetPageSize()-1);
+}
+
+size_t OSGetOrder(size_t uSize)
+{
+ return get_order(PAGE_ALIGN(uSize));
+}
+
+typedef struct
+{
+ int os_error;
+ PVRSRV_ERROR pvr_error;
+} error_map_t;
+
+/* return -ve versions of POSIX errors as they are used in this form */
+static const error_map_t asErrorMap[] =
+{
+ {-EFAULT, PVRSRV_ERROR_BRIDGE_EFAULT},
+ {-EINVAL, PVRSRV_ERROR_BRIDGE_EINVAL},
+ {-ENOMEM, PVRSRV_ERROR_BRIDGE_ENOMEM},
+ {-ERANGE, PVRSRV_ERROR_BRIDGE_ERANGE},
+ {-EPERM, PVRSRV_ERROR_BRIDGE_EPERM},
+ {-ENOTTY, PVRSRV_ERROR_BRIDGE_ENOTTY},
+ {-ENOTTY, PVRSRV_ERROR_BRIDGE_CALL_FAILED},
+ {-ERANGE, PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL},
+ {-ENOMEM, PVRSRV_ERROR_OUT_OF_MEMORY},
+ {-EINVAL, PVRSRV_ERROR_INVALID_PARAMS},
+
+ {0, PVRSRV_OK}
+};
+
+#define num_rows(a) (sizeof(a)/sizeof(a[0]))
+
+int PVRSRVToNativeError(PVRSRV_ERROR e)
+{
+ int os_error = -EFAULT;
+ int i;
+ for (i = 0; i < num_rows(asErrorMap); i++)
+ {
+ if (e == asErrorMap[i].pvr_error)
+ {
+ os_error = asErrorMap[i].os_error;
+ break;
+ }
+ }
+ return os_error;
+}
+
+#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
+typedef struct _MISR_DATA_ {
+ struct workqueue_struct *psWorkQueue;
+ struct work_struct sMISRWork;
+ PFN_MISR pfnMISR;
+ void *hData;
+} MISR_DATA;
+
+/*
+ MISRWrapper
+*/
+static void MISRWrapper(struct work_struct *data)
+{
+ MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork);
+
+ psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+ OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR,
+ void *hData)
+{
+ MISR_DATA *psMISRData;
+
+ psMISRData = OSAllocMem(sizeof(*psMISRData));
+ if (psMISRData == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psMISRData->hData = hData;
+ psMISRData->pfnMISR = pfnMISR;
+
+ PVR_TRACE(("Installing MISR with cookie %p", psMISRData));
+
+ psMISRData->psWorkQueue = create_singlethread_workqueue("pvr_workqueue");
+
+ if (psMISRData->psWorkQueue == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed"));
+ OSFreeMem(psMISRData);
+ return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+ }
+
+ INIT_WORK(&psMISRData->sMISRWork, MISRWrapper);
+
+ *hMISRData = (IMG_HANDLE) psMISRData;
+
+ return PVRSRV_OK;
+}
+
+/*
+ OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+ MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+ PVR_TRACE(("Uninstalling MISR with cookie %p", psMISRData));
+
+ destroy_workqueue(psMISRData->psWorkQueue);
+ OSFreeMem(psMISRData);
+
+ return PVRSRV_OK;
+}
+
+/*
+ OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+ MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+ /*
+ Note:
+
+ In the case of NO_HARDWARE we want the driver to be synchronous so
+ that we don't have to worry about waiting for previous operations
+ to complete
+ */
+#if defined(NO_HARDWARE)
+ psMISRData->pfnMISR(psMISRData->hData);
+ return PVRSRV_OK;
+#else
+ {
+ bool rc = queue_work(psMISRData->psWorkQueue, &psMISRData->sMISRWork);
+ return (rc ? PVRSRV_OK : PVRSRV_ERROR_ALREADY_EXISTS);
+ }
+#endif
+}
+#else /* defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) */
+#if defined(PVR_LINUX_MISR_USING_WORKQUEUE)
+typedef struct _MISR_DATA_ {
+ struct work_struct sMISRWork;
+ PFN_MISR pfnMISR;
+ void *hData;
+} MISR_DATA;
+
+/*
+ MISRWrapper
+*/
+static void MISRWrapper(struct work_struct *data)
+{
+ MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork);
+
+ psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+ OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR, void *hData)
+{
+ MISR_DATA *psMISRData;
+
+ psMISRData = OSAllocMem(sizeof(*psMISRData));
+ if (psMISRData == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psMISRData->hData = hData;
+ psMISRData->pfnMISR = pfnMISR;
+
+ PVR_TRACE(("Installing MISR with cookie %p", psMISRData));
+
+ INIT_WORK(&psMISRData->sMISRWork, MISRWrapper);
+
+ *hMISRData = (IMG_HANDLE) psMISRData;
+
+ return PVRSRV_OK;
+}
+
+
+/*
+ OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+ PVR_TRACE(("Uninstalling MISR with cookie %p", psMISRData));
+
+ flush_scheduled_work();
+
+ OSFreeMem(hMISRData);
+
+ return PVRSRV_OK;
+}
+
+/*
+ OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+ MISR_DATA *psMISRData = hMISRData;
+#if defined(NO_HARDWARE)
+ psMISRData->pfnMISR(psMISRData->hData);
+#else
+ schedule_work(&psMISRData->sMISRWork);
+#endif
+ return PVRSRV_OK;
+}
+
+#else /* #if defined(PVR_LINUX_MISR_USING_WORKQUEUE) */
+typedef struct _MISR_DATA_ {
+ struct tasklet_struct sMISRTasklet;
+ PFN_MISR pfnMISR;
+ void *hData;
+} MISR_DATA;
+
+/*
+ MISRWrapper
+*/
+static void MISRWrapper(unsigned long data)
+{
+ MISR_DATA *psMISRData = (MISR_DATA *) data;
+
+ psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+ OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR, void *hData)
+{
+ MISR_DATA *psMISRData;
+
+ psMISRData = OSAllocMem(sizeof(*psMISRData));
+ if (psMISRData == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psMISRData->hData = hData;
+ psMISRData->pfnMISR = pfnMISR;
+
+ PVR_TRACE(("Installing MISR with cookie %p", psMISRData));
+
+ tasklet_init(&psMISRData->sMISRTasklet, MISRWrapper, (unsigned long)psMISRData);
+
+ *hMISRData = (IMG_HANDLE) psMISRData;
+
+ return PVRSRV_OK;
+}
+
+/*
+ OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+ MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+ PVR_TRACE(("Uninstalling MISR with cookie %p", psMISRData));
+
+ tasklet_kill(&psMISRData->sMISRTasklet);
+
+ return PVRSRV_OK;
+}
+
+/*
+ OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+ MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+#if defined(NO_HARDWARE)
+ psMISRData->pfnMISR(psMISRData->hData);
+#else
+ tasklet_schedule(&psMISRData->sMISRTasklet);
+#endif
+ return PVRSRV_OK;
+}
+
+#endif /* #if defined(PVR_LINUX_MISR_USING_WORKQUEUE) */
+#endif /* #if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) */
+
+/* OS specific values for thread priority */
+static const IMG_INT32 ai32OSPriorityValues[OS_THREAD_LAST_PRIORITY] =
+{
+ -20, /* OS_THREAD_HIGHEST_PRIORITY */
+ -10, /* OS_THREAD_HIGH_PRIORITY */
+ 0, /* OS_THREAD_NORMAL_PRIORITY */
+ 9, /* OS_THREAD_LOW_PRIORITY */
+ 19, /* OS_THREAD_LOWEST_PRIORITY */
+ -22, /* OS_THREAD_NOSET_PRIORITY */
+};
+
+typedef struct {
+ struct task_struct *kthread;
+ PFN_THREAD pfnThread;
+ void *hData;
+ OS_THREAD_LEVEL eThreadPriority;
+} OSThreadData;
+
+static int OSThreadRun(void *data)
+{
+ OSThreadData *psOSThreadData = data;
+
+ /* If i32NiceValue is acceptable, set the nice value for the new thread */
+ if (psOSThreadData->eThreadPriority != OS_THREAD_NOSET_PRIORITY &&
+ psOSThreadData->eThreadPriority < OS_THREAD_LAST_PRIORITY)
+ set_user_nice(current, ai32OSPriorityValues[psOSThreadData->eThreadPriority]);
+
+ /* Call the client's kernel thread with the client's data pointer */
+ psOSThreadData->pfnThread(psOSThreadData->hData);
+
+ /* Wait for OSThreadDestroy() to call kthread_stop() */
+ while (!kthread_should_stop())
+ {
+ schedule();
+ }
+
+ return 0;
+}
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+ IMG_CHAR *pszThreadName,
+ PFN_THREAD pfnThread,
+ void *hData)
+{
+ return OSThreadCreatePriority(phThread, pszThreadName, pfnThread, hData, OS_THREAD_NOSET_PRIORITY);
+}
+
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+ IMG_CHAR *pszThreadName,
+ PFN_THREAD pfnThread,
+ void *hData,
+ OS_THREAD_LEVEL eThreadPriority)
+{
+ OSThreadData *psOSThreadData;
+ PVRSRV_ERROR eError;
+
+ psOSThreadData = OSAllocMem(sizeof(*psOSThreadData));
+ if (psOSThreadData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ psOSThreadData->pfnThread = pfnThread;
+ psOSThreadData->hData = hData;
+ psOSThreadData->eThreadPriority= eThreadPriority;
+ psOSThreadData->kthread = kthread_run(OSThreadRun, psOSThreadData, pszThreadName);
+
+ if (IS_ERR(psOSThreadData->kthread))
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_kthread;
+ }
+
+ *phThread = psOSThreadData;
+
+ return PVRSRV_OK;
+
+fail_kthread:
+ OSFreeMem(psOSThreadData);
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread)
+{
+ OSThreadData *psOSThreadData = hThread;
+ int ret;
+
+ /* Let the thread know we are ready for it to end and wait for it. */
+ ret = kthread_stop(psOSThreadData->kthread);
+ if (0 != ret)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "kthread_stop failed(%d)", ret));
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ OSFreeMem(psOSThreadData);
+
+ return PVRSRV_OK;
+}
+
+void OSPanic(void)
+{
+ BUG();
+
+#if defined(__KLOCWORK__)
+ /* Klocworks does not understand that BUG is terminal... */
+ abort();
+#endif
+}
+
+PVRSRV_ERROR OSSetThreadPriority(IMG_HANDLE hThread,
+ IMG_UINT32 nThreadPriority,
+ IMG_UINT32 nThreadWeight)
+{
+ PVR_UNREFERENCED_PARAMETER(hThread);
+ PVR_UNREFERENCED_PARAMETER(nThreadPriority);
+ PVR_UNREFERENCED_PARAMETER(nThreadWeight);
+ /* Default priorities used on this platform */
+
+ return PVRSRV_OK;
+}
+
+void *
+OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
+ size_t ui32Bytes,
+ IMG_UINT32 ui32MappingFlags)
+{
+ void *pvLinAddr;
+
+ if (ui32MappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK))
+ {
+ PVR_ASSERT(!"Found non-cpu cache mode flag when mapping to the cpu");
+ return NULL;
+ }
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ /*
+ * This is required to support DMA physheaps for GPU virtualization.
+ * Unfortunately, if a region of kernel managed memory is turned into
+ * a DMA buffer, conflicting mappings can come about easily on Linux
+ * as the original memory is mapped by the kernel as normal cached
+ * memory whilst DMA buffers are mapped mostly as uncached device or
+ * cache-coherent device memory. In both cases the system will have
+ * two conflicting mappings for the same memory region and will have
+ * "undefined behaviour" for most processors notably ARMv6 onwards
+ * and some x86 micro-architectures
+ *
+ * As a result we perform ioremapping manually, for DMA physheap
+ * allocations, by translating from CPU/VA <-> BUS/PA.
+ */
+ pvLinAddr = SysDmaDevPAddrToCpuVAddr(BasePAddr.uiAddr, ui32Bytes);
+ if (pvLinAddr != NULL)
+ {
+ return pvLinAddr;
+ }
+#endif
+
+ switch (ui32MappingFlags)
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ pvLinAddr = (void *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
+ break;
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ pvLinAddr = (void *)ioremap_wc(BasePAddr.uiAddr, ui32Bytes);
+#else
+ pvLinAddr = (void *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
+#endif
+ break;
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM)
+ pvLinAddr = (void *)ioremap_cache(BasePAddr.uiAddr, ui32Bytes);
+#else
+ pvLinAddr = (void *)ioremap(BasePAddr.uiAddr, ui32Bytes);
+#endif
+ break;
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT:
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT:
+ PVR_ASSERT(!"Unexpected cpu cache mode");
+ pvLinAddr = NULL;
+ break;
+ default:
+ PVR_ASSERT(!"Unsupported cpu cache mode");
+ pvLinAddr = NULL;
+ break;
+ }
+
+ return pvLinAddr;
+}
+
+
+IMG_BOOL
+OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes, IMG_UINT32 ui32MappingFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+
+ if (ui32MappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK))
+ {
+ PVR_ASSERT(!"Found non-cpu cache mode flag when unmapping from the cpu");
+ return IMG_FALSE;
+ }
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ if (SysDmaCpuVAddrToDevPAddr(pvLinAddr))
+ {
+ return IMG_TRUE;
+ }
+#endif
+
+ iounmap(pvLinAddr);
+
+ return IMG_TRUE;
+}
+
+#define OS_MAX_TIMERS 8
+
+/* Timer callback strucure used by OSAddTimer */
+typedef struct TIMER_CALLBACK_DATA_TAG
+{
+ IMG_BOOL bInUse;
+ PFN_TIMER_FUNC pfnTimerFunc;
+ void *pvData;
+ struct timer_list sTimer;
+ IMG_UINT32 ui32Delay;
+ IMG_BOOL bActive;
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ struct work_struct sWork;
+#endif
+}TIMER_CALLBACK_DATA;
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+static struct workqueue_struct *psTimerWorkQueue;
+#endif
+
+static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+static DEFINE_MUTEX(sTimerStructLock);
+#else
+/* The lock is used to control access to sTimers */
+static DEFINE_SPINLOCK(sTimerStructLock);
+#endif
+
+static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData)
+{
+ if (!psTimerCBData->bActive)
+ return;
+
+ /* call timer callback */
+ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
+
+ /* reset timer */
+ mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
+}
+
+
+/*************************************************************************/ /*!
+@Function OSTimerCallbackWrapper
+@Description OS specific timer callback wrapper function
+@Input uData Timer callback data
+*/ /**************************************************************************/
+static void OSTimerCallbackWrapper(uintptr_t uData)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)uData;
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ int res;
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+ res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
+#else
+ res = schedule_work(&psTimerCBData->sWork);
+#endif
+ if (res == 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
+ }
+#else
+ OSTimerCallbackBody(psTimerCBData);
+#endif
+}
+
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork);
+
+ OSTimerCallbackBody(psTimerCBData);
+}
+#endif
+
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData;
+ IMG_UINT32 ui32i;
+#if !(defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE))
+ unsigned long ulLockFlags;
+#endif
+
+ /* check callback */
+ if(!pfnTimerFunc)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
+ return NULL;
+ }
+
+ /* Allocate timer callback data structure */
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ mutex_lock(&sTimerStructLock);
+#else
+ spin_lock_irqsave(&sTimerStructLock, ulLockFlags);
+#endif
+ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+ {
+ psTimerCBData = &sTimers[ui32i];
+ if (!psTimerCBData->bInUse)
+ {
+ psTimerCBData->bInUse = IMG_TRUE;
+ break;
+ }
+ }
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ mutex_unlock(&sTimerStructLock);
+#else
+ spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags);
+#endif
+ if (ui32i >= OS_MAX_TIMERS)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
+ return NULL;
+ }
+
+ psTimerCBData->pfnTimerFunc = pfnTimerFunc;
+ psTimerCBData->pvData = pvData;
+ psTimerCBData->bActive = IMG_FALSE;
+
+ /*
+ HZ = ticks per second
+ ui32MsTimeout = required ms delay
+ ticks = (Hz * ui32MsTimeout) / 1000
+ */
+ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
+ ? 1
+ : ((HZ * ui32MsTimeout) / 1000);
+ /* initialise object */
+ init_timer(&psTimerCBData->sTimer);
+
+ /* setup timer object */
+ psTimerCBData->sTimer.function = (void *)OSTimerCallbackWrapper;
+ psTimerCBData->sTimer.data = (uintptr_t)psTimerCBData;
+
+ return (IMG_HANDLE)(uintptr_t)(ui32i + 1);
+}
+
+
+static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer)
+{
+ IMG_UINT32 ui32i = (IMG_UINT32)((uintptr_t)hTimer) - 1;
+
+ PVR_ASSERT(ui32i < OS_MAX_TIMERS);
+
+ return &sTimers[ui32i];
+}
+
+PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+ PVR_ASSERT(psTimerCBData->bInUse);
+ PVR_ASSERT(!psTimerCBData->bActive);
+
+ /* free timer callback data struct */
+ psTimerCBData->bInUse = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+ PVR_ASSERT(psTimerCBData->bInUse);
+ PVR_ASSERT(!psTimerCBData->bActive);
+
+ /* Start timer arming */
+ psTimerCBData->bActive = IMG_TRUE;
+
+ /* set the expire time */
+ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
+
+ /* Add the timer to the list */
+ add_timer(&psTimerCBData->sTimer);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+ PVR_ASSERT(psTimerCBData->bInUse);
+ PVR_ASSERT(psTimerCBData->bActive);
+
+ /* Stop timer from arming */
+ psTimerCBData->bActive = IMG_FALSE;
+ smp_mb();
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+ flush_workqueue(psTimerWorkQueue);
+#endif
+#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ flush_scheduled_work();
+#endif
+
+ /* remove timer */
+ del_timer_sync(&psTimerCBData->sTimer);
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+ /*
+ * This second flush is to catch the case where the timer ran
+ * before we managed to delete it, in which case, it will have
+ * queued more work for the workqueue. Since the bActive flag
+ * has been cleared, this second flush won't result in the
+ * timer being rearmed.
+ */
+ flush_workqueue(psTimerWorkQueue);
+#endif
+#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ flush_scheduled_work();
+#endif
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, IMG_HANDLE *hEventObject)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVR_UNREFERENCED_PARAMETER(pszName);
+
+ if(hEventObject)
+ {
+ if(LinuxEventObjectListCreate(hEventObject) != PVRSRV_OK)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: hEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+ }
+
+ return eError;
+}
+
+
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(hEventObject)
+ {
+ LinuxEventObjectListDestroy(hEventObject);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+/*
+ * EventObjectWaitTimeout()
+ */
+static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM,
+ IMG_UINT64 uiTimeoutus,
+ IMG_BOOL bHoldBridgeLock)
+{
+ PVRSRV_ERROR eError;
+
+ if(hOSEventKM && uiTimeoutus > 0)
+ {
+ eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, bHoldBridgeLock);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: invalid arguments %p, %lld", hOSEventKM, uiTimeoutus));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus)
+{
+ return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus, IMG_FALSE);
+}
+
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
+{
+ return OSEventObjectWaitTimeout(hOSEventKM, EVENT_OBJECT_TIMEOUT_US);
+}
+
+PVRSRV_ERROR OSEventObjectWaitTimeoutAndHoldBridgeLock(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus)
+{
+ return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus, IMG_TRUE);
+}
+
+PVRSRV_ERROR OSEventObjectWaitAndHoldBridgeLock(IMG_HANDLE hOSEventKM)
+{
+ return OSEventObjectWaitTimeoutAndHoldBridgeLock(hOSEventKM, EVENT_OBJECT_TIMEOUT_US);
+}
+
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject,
+ IMG_HANDLE *phOSEvent)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(hEventObject)
+ {
+ if(LinuxEventObjectAdd(hEventObject, phOSEvent) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectOpen: hEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(hOSEventKM)
+ {
+ if(LinuxEventObjectDelete(hOSEventKM) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject)
+{
+ PVRSRV_ERROR eError;
+
+ if(hEventObject)
+ {
+ eError = LinuxEventObjectSignal(hEventObject);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignal: hOSEventKM is not a valid handle"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+IMG_BOOL OSProcHasPrivSrvInit(void)
+{
+ return capable(CAP_SYS_ADMIN) != 0;
+}
+
+PVRSRV_ERROR OSCopyToUser(void *pvProcess,
+ void *pvDest,
+ const void *pvSrc,
+ size_t ui32Bytes)
+{
+ PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+ if(pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
+ return PVRSRV_OK;
+ else
+ return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+PVRSRV_ERROR OSCopyFromUser(void *pvProcess,
+ void *pvDest,
+ const void *pvSrc,
+ size_t ui32Bytes)
+{
+ PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+ if(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0)
+ return PVRSRV_OK;
+ else
+ return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, void *pvUserPtr, size_t ui32Bytes)
+{
+ IMG_INT linuxType;
+
+ if (eVerification == PVR_VERIFY_READ)
+ {
+ linuxType = VERIFY_READ;
+ }
+ else
+ {
+ PVR_ASSERT(eVerification == PVR_VERIFY_WRITE);
+ linuxType = VERIFY_WRITE;
+ }
+
+ return access_ok(linuxType, pvUserPtr, ui32Bytes);
+}
+
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+ *pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+ return ui64Divident;
+}
+
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+ *pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+ return (IMG_UINT32) ui64Divident;
+}
+
+/* One time osfunc initialisation */
+PVRSRV_ERROR PVROSFuncInit(void)
+{
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+ {
+ PVR_ASSERT(!psTimerWorkQueue);
+
+ psTimerWorkQueue = create_workqueue("pvr_timer");
+ if (psTimerWorkQueue == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", __FUNCTION__));
+ return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+ }
+ }
+#endif
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ {
+ IMG_UINT32 ui32i;
+
+ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+ {
+ TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
+
+ INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack);
+ }
+ }
+#endif
+ return PVRSRV_OK;
+}
+
+/*
+ * Osfunc deinitialisation.
+ * Note that PVROSFuncInit may not have been called
+ */
+void PVROSFuncDeInit(void)
+{
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+ if (psTimerWorkQueue != NULL)
+ {
+ destroy_workqueue(psTimerWorkQueue);
+ psTimerWorkQueue = NULL;
+ }
+#endif
+}
+
+void OSDumpStack(void)
+{
+ dump_stack();
+}
+
+static struct task_struct *gsOwner;
+
+void OSAcquireBridgeLock(void)
+{
+ mutex_lock(&gPVRSRVLock);
+ gsOwner = current;
+}
+
+void OSReleaseBridgeLock(void)
+{
+ gsOwner = NULL;
+ mutex_unlock(&gPVRSRVLock);
+}
+
+struct task_struct *BridgeLockGetOwner(void)
+{
+ return gsOwner;
+}
+
+IMG_BOOL BridgeLockIsLocked(void)
+{
+ return OSLockIsLocked(&gPVRSRVLock);
+}
+
+/*************************************************************************/ /*!
+@Function OSCreateStatisticEntry
+@Description Create a statistic entry in the specified folder.
+@Input pszName String containing the name for the entry.
+@Input pvFolder Reference from OSCreateStatisticFolder() of the
+ folder to create the entry in, or NULL for the
+ root.
+@Input pfnStatsPrint Pointer to function that can be used to print the
+ values of all the statistics.
+@Input pfnIncMemRefCt Pointer to function that can be used to take a
+ reference on the memory backing the statistic
+ entry.
+@Input pfnDecMemRefCt Pointer to function that can be used to drop a
+ reference on the memory backing the statistic
+ entry.
+@Input pvData OS specific reference that can be used by
+ pfnGetElement.
+@Return Pointer void reference to the entry created, which can be
+ passed to OSRemoveStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateStatisticEntry(IMG_CHAR* pszName, void *pvFolder,
+ OS_STATS_PRINT_FUNC* pfnStatsPrint,
+ OS_INC_STATS_MEM_REFCOUNT_FUNC* pfnIncMemRefCt,
+ OS_DEC_STATS_MEM_REFCOUNT_FUNC* pfnDecMemRefCt,
+ void *pvData)
+{
+ return (void *)PVRDebugFSCreateStatisticEntry(pszName, (PVR_DEBUGFS_DIR_DATA *)pvFolder, pfnStatsPrint, pfnIncMemRefCt, pfnDecMemRefCt, pvData);
+} /* OSCreateStatisticEntry */
+
+
+/*************************************************************************/ /*!
+@Function OSRemoveStatisticEntry
+@Description Removes a statistic entry.
+@Input pvEntry Pointer void reference to the entry created by
+ OSCreateStatisticEntry().
+*/ /**************************************************************************/
+void OSRemoveStatisticEntry(void *pvEntry)
+{
+ PVRDebugFSRemoveStatisticEntry((PVR_DEBUGFS_DRIVER_STAT *)pvEntry);
+} /* OSRemoveStatisticEntry */
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void *OSCreateRawStatisticEntry(const IMG_CHAR *pszFileName, void *pvParentDir,
+ OS_STATS_PRINT_FUNC *pfStatsPrint)
+{
+ return (void *) PVRDebugFSCreateRawStatisticEntry(pszFileName, pvParentDir,
+ pfStatsPrint);
+}
+
+void OSRemoveRawStatisticEntry(void *pvEntry)
+{
+ PVRDebugFSRemoveRawStatisticEntry(pvEntry);
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function OSCreateStatisticFolder
+@Description Create a statistic folder to hold statistic entries.
+@Input pszName String containing the name for the folder.
+@Input pvFolder Reference from OSCreateStatisticFolder() of the folder
+ to create the folder in, or NULL for the root.
+@Return Pointer void reference to the folder created, which can be
+ passed to OSRemoveStatisticFolder() to remove the folder.
+*/ /**************************************************************************/
+void *OSCreateStatisticFolder(IMG_CHAR *pszName, void *pvFolder)
+{
+ PVR_DEBUGFS_DIR_DATA *psNewStatFolder = NULL;
+ int iResult;
+
+ iResult = PVRDebugFSCreateEntryDir(pszName, (PVR_DEBUGFS_DIR_DATA *)pvFolder, &psNewStatFolder);
+ return (iResult == 0) ? (void *)psNewStatFolder : NULL;
+} /* OSCreateStatisticFolder */
+
+
+/*************************************************************************/ /*!
+@Function OSRemoveStatisticFolder
+@Description Removes a statistic folder.
+@Input ppvFolder Reference from OSCreateStatisticFolder() of the
+ folder that should be removed.
+ This needs to be double pointer because it has to
+ be NULLed right after memory is freed to avoid
+ possible races and use-after-free situations.
+*/ /**************************************************************************/
+void OSRemoveStatisticFolder(void **ppvFolder)
+{
+ PVRDebugFSRemoveEntryDir((PVR_DEBUGFS_DIR_DATA **)ppvFolder);
+} /* OSRemoveStatisticFolder */
+
+
+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_CPU_PHYADDR sCpuPAHeapBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_BOOL bIsLMA)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ pfn_t sPFN;
+#else
+ IMG_UINT64 uiPFN;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+
+ PVRSRV_ERROR eError;
+
+ struct mm_struct *psMM = current->mm;
+ struct vm_area_struct *psVMA = NULL;
+ struct address_space *psMapping = NULL;
+ struct page *psPage = NULL;
+
+ IMG_UINT64 uiCPUVirtAddr = 0;
+ IMG_UINT32 ui32Loop = 0;
+ IMG_UINT32 ui32PageSize = OSGetPageSize();
+ IMG_BOOL bMixedMap = IMG_FALSE;
+
+ /*
+ * Acquire the lock before manipulating the VMA
+ * In this case only mmap_sem lock would suffice as the pages associated with this VMA
+ * are never meant to be swapped out.
+ *
+ * In the future, in case the pages are marked as swapped, page_table_lock needs
+ * to be acquired in conjunction with this to disable page swapping.
+ */
+
+ /* Find the Virtual Memory Area associated with the user base address */
+ psVMA = find_vma(psMM, (uintptr_t)sCpuVAddrBase);
+ if (NULL == psVMA)
+ {
+ eError = PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND;
+ return eError;
+ }
+
+ /* Acquire the memory sem */
+ down_write(&psMM->mmap_sem);
+
+ psMapping = psVMA->vm_file->f_mapping;
+
+ /* Set the page offset to the correct value as this is disturbed in MMAP_PMR func */
+ psVMA->vm_pgoff = (psVMA->vm_start >> PAGE_SHIFT);
+
+ /* Delete the entries for the pages that got freed */
+ if (ui32FreePageCount && (pai32FreeIndices != NULL))
+ {
+ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+ {
+ uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32FreeIndices[ui32Loop] * ui32PageSize));
+
+ unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1);
+
+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+ /*
+ * Still need to map pages in case remap flag is set.
+ * That is not done until the remap case succeeds
+ */
+#endif
+ }
+ eError = PVRSRV_OK;
+ }
+
+ if ((psVMA->vm_flags & VM_MIXEDMAP) || bIsLMA)
+ {
+ psVMA->vm_flags |= VM_MIXEDMAP;
+ bMixedMap = IMG_TRUE;
+ }
+ else
+ {
+ if (ui32AllocPageCount && (NULL != pai32AllocIndices))
+ {
+ for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++)
+ {
+
+ psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = page_to_pfn_t(psPage);
+
+ if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
+#else
+ uiPFN = page_to_pfn(psPage);
+
+ if (!pfn_valid(uiPFN) || (page_count(pfn_to_page(uiPFN)) == 0))
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+ {
+ bMixedMap = IMG_TRUE;
+ psVMA->vm_flags |= VM_MIXEDMAP;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Map the pages that got allocated */
+ if (ui32AllocPageCount && (NULL != pai32AllocIndices))
+ {
+ for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++)
+ {
+ int err;
+
+ uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32AllocIndices[ui32Loop] * ui32PageSize));
+ unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1);
+
+ if (bIsLMA)
+ {
+ phys_addr_t uiAddr = sCpuPAHeapBase.uiAddr +
+ ((IMG_DEV_PHYADDR *)psPageArray)[pai32AllocIndices[ui32Loop]].uiAddr;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = phys_to_pfn_t(uiAddr, 0);
+ psPage = pfn_t_to_page(sPFN);
+#else
+ uiPFN = uiAddr >> PAGE_SHIFT;
+ psPage = pfn_to_page(uiPFN);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+ }
+ else
+ {
+ psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = page_to_pfn_t(psPage);
+#else
+ uiPFN = page_to_pfn(psPage);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+ }
+
+ if (bMixedMap)
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ err = vm_insert_mixed(psVMA, uiCPUVirtAddr, sPFN);
+#else
+ err = vm_insert_mixed(psVMA, uiCPUVirtAddr, uiPFN);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+ }
+ else
+ {
+ err = vm_insert_page(psVMA, uiCPUVirtAddr, psPage);
+ }
+
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "Remap failure error code: %d", err));
+ eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+ goto eFailed;
+ }
+ }
+ }
+
+ eError = PVRSRV_OK;
+ eFailed:
+ up_write(&psMM->mmap_sem);
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function OSDebugSignalPID
+@Description Sends a SIGTRAP signal to a specific PID in user mode for
+ debugging purposes. The user mode process can register a handler
+ against this signal.
+ This is necessary to support the Rogue debugger. If the Rogue
+ debugger is not used then this function may be implemented as
+ a stub.
+@Input ui32PID The PID for the signal.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID)
+{
+ int err;
+ struct pid *psPID;
+
+ psPID = find_vpid(ui32PID);
+ if (psPID == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get PID struct.", __func__));
+ return PVRSRV_ERROR_NOT_FOUND;
+ }
+
+ err = kill_pid(psPID, SIGTRAP, 0);
+ if (err != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Signal Failure %d", __func__, err));
+ return PVRSRV_ERROR_SIGNAL_FAILED;
+ }
+
+ return PVRSRV_OK;
+}
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title OS functions header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS specific API definitions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifdef DEBUG_RELEASE_BUILD
+#pragma optimize( "", off )
+#define DEBUG 1
+#endif
+
+#ifndef __OSFUNC_H__
+#define __OSFUNC_H__
+
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#if defined(LINUX) && defined(__KERNEL__) && !defined(NO_HARDWARE)
+#include <asm/io.h>
+#endif
+
+#if defined(__QNXNTO__)
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#endif
+
+#if defined(INTEGRITY_OS)
+#include <string.h>
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_device.h"
+#include "device.h"
+
+/******************************************************************************
+ * Static defines
+ *****************************************************************************/
+#define KERNEL_ID 0xffffffffL
+#define ISR_ID 0xfffffffdL
+
+/*************************************************************************/ /*!
+@Function OSClockns64
+@Description This function returns the number of ticks since system boot
+ expressed in nanoseconds. Unlike OSClockns, OSClockns64 has
+ a near 64-bit range.
+@Return The 64-bit clock value, in nanoseconds.
+*/ /**************************************************************************/
+IMG_UINT64 OSClockns64(void);
+
+/*************************************************************************/ /*!
+@Function OSClockus64
+@Description This function returns the number of ticks since system boot
+ expressed in microseconds. Unlike OSClockus, OSClockus64 has
+ a near 64-bit range.
+@Return The 64-bit clock value, in microseconds.
+*/ /**************************************************************************/
+IMG_UINT64 OSClockus64(void);
+
+/*************************************************************************/ /*!
+@Function OSClockus
+@Description This function returns the number of ticks since system boot
+ in microseconds.
+@Return The 32-bit clock value, in microseconds.
+*/ /**************************************************************************/
+IMG_UINT32 OSClockus(void);
+
+/*************************************************************************/ /*!
+@Function OSClockms
+@Description This function returns the number of ticks since system boot
+ in milliseconds.
+@Return The 32-bit clock value, in milliseconds.
+*/ /**************************************************************************/
+IMG_UINT32 OSClockms(void);
+
+/*************************************************************************/ /*!
+@Function OSClockMonotonicns64
+@Description This function returns a clock value based on the system
+ monotonic clock.
+@Output pui64Time The 64-bit clock value, in nanoseconds.
+@Return Error Code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time);
+
+/*************************************************************************/ /*!
+@Function OSClockMonotonicus64
+@Description This function returns a clock value based on the system
+ monotonic clock.
+@Output pui64Time The 64-bit clock value, in microseconds.
+@Return Error Code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time);
+
+/*************************************************************************/ /*!
+@Function OSClockMonotonicRawns64
+@Description This function returns a clock value based on the system
+ monotonic raw clock.
+@Return 64bit ns timestamp
+*/ /**************************************************************************/
+IMG_UINT64 OSClockMonotonicRawns64(void);
+
+/*************************************************************************/ /*!
+@Function OSClockMonotonicRawns64
+@Description This function returns a clock value based on the system
+ monotonic raw clock.
+@Return 64bit us timestamp
+*/ /**************************************************************************/
+IMG_UINT64 OSClockMonotonicRawus64(void);
+
+/*************************************************************************/ /*!
+@Function OSGetPageSize
+@Description This function returns the page size.
+ If the OS is not using memory mappings it should return a
+ default value of 4096.
+@Return The size of a page, in bytes.
+*/ /**************************************************************************/
+size_t OSGetPageSize(void);
+
+/*************************************************************************/ /*!
+@Function OSGetPageShift
+@Description This function returns the page size expressed as a power
+ of two. A number of pages, left-shifted by this value, gives
+ the equivalent size in bytes.
+ If the OS is not using memory mappings it should return a
+ default value of 12.
+@Return The page size expressed as a power of two.
+*/ /**************************************************************************/
+size_t OSGetPageShift(void);
+
+/*************************************************************************/ /*!
+@Function OSGetPageMask
+@Description This function returns a bitmask that may be applied to an
+ address to mask off the least-significant bits so as to
+ leave the start address of the page containing that address.
+@Return The page mask.
+*/ /**************************************************************************/
+size_t OSGetPageMask(void);
+
+/*************************************************************************/ /*!
+@Function OSGetOrder
+@Description This function returns the order of power of two for a given
+ size. Eg. for a uSize of 4096 bytes the function would
+ return 12 (4096 = 2^12).
+@Input uSize The size in bytes.
+@Return The order of power of two.
+*/ /**************************************************************************/
+size_t OSGetOrder(size_t uSize);
+
+typedef void (*PFN_MISR)(void *pvData);
+typedef void (*PFN_THREAD)(void *pvData);
+
+/**************************************************************************/ /*!
+@Function OSChangeSparseMemCPUAddrMap
+@Description This function changes the CPU mapping of the underlying
+ sparse allocation. It is used by a PMR 'factory'
+ implementation if that factory supports sparse
+ allocations.
+@Input psPageArray array representing the pages in the
+ sparse allocation
+@Input sCpuVAddrBase the virtual base address of the sparse
+ allocation ('first' page)
+@Input sCpuPAHeapBase the physical address of the virtual
+ base address 'sCpuVAddrBase'
+@Input ui32AllocPageCount the number of pages referenced in
+ 'pai32AllocIndices'
+@Input pai32AllocIndices list of indices of pages within
+ 'psPageArray' that we now want to
+ allocate and map
+@Input ui32FreePageCount the number of pages referenced in
+ 'pai32FreeIndices'
+@Input pai32FreeIndices list of indices of pages within
+ 'psPageArray' we now want to
+ unmap and free
+@Input bIsLMA flag indicating if the sparse allocation
+ is from LMA or UMA memory
+@Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_CPU_PHYADDR sCpuPAHeapBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_BOOL bIsLMA);
+
+/*************************************************************************/ /*!
+@Function OSInstallMISR
+@Description Installs a Mid-level Interrupt Service Routine (MISR)
+ which handles higher-level processing of interrupts from
+ the device (GPU).
+ An MISR runs outside of interrupt context, and so may be
+ descheduled. This means it can contain code that would
+ not be permitted in the LISR.
+ An MISR is invoked when OSScheduleMISR() is called. This
+ call should be made by installed LISR once it has completed
+ its interrupt processing.
+ Multiple MISRs may be installed by the driver to handle
+ different causes of interrupt.
+@Input pfnMISR pointer to the function to be installed
+ as the MISR
+@Input hData private data provided to the MISR
+@Output hMISRData handle to the installed MISR (to be used
+ for a subsequent uninstall)
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData,
+ PFN_MISR pfnMISR,
+ void *hData);
+
+/*************************************************************************/ /*!
+@Function OSUninstallMISR
+@Description Uninstalls a Mid-level Interrupt Service Routine (MISR).
+@Input hMISRData handle to the installed MISR
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData);
+
+/*************************************************************************/ /*!
+@Function OSScheduleMISR
+@Description Schedules a Mid-level Interrupt Service Routine (MISR) to be
+ executed. An MISR should be executed outside of interrupt
+ context, for example in a work queue.
+@Input hMISRData handle to the installed MISR
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData);
+
+
+/*************************************************************************/ /*!
+@Function OSThreadCreate
+@Description Creates a kernel thread and starts it running. The caller
+ is responsible for informing the thread that it must finish
+ and return from the pfnThread function. It is not possible
+ to kill or terminate it.The new thread runs with the default
+ priority provided by the Operating System.
+@Output phThread Returned handle to the thread.
+@Input pszThreadName Name to assign to the thread.
+@Input pfnThread Thread entry point function.
+@Input hData Thread specific data pointer for pfnThread().
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+ IMG_CHAR *pszThreadName,
+ PFN_THREAD pfnThread,
+ void *hData);
+
+/*! Available priority levels for the creation of a new Kernel Thread. */
+typedef enum priority_levels
+{
+ OS_THREAD_HIGHEST_PRIORITY = 0,
+ OS_THREAD_HIGH_PRIORITY,
+ OS_THREAD_NORMAL_PRIORITY,
+ OS_THREAD_LOW_PRIORITY,
+ OS_THREAD_LOWEST_PRIORITY,
+ OS_THREAD_NOSET_PRIORITY, /* With this option the priority level is is the default for the given OS */
+ OS_THREAD_LAST_PRIORITY /* This must be always the last entry */
+} OS_THREAD_LEVEL;
+
+/*************************************************************************/ /*!
+@Function OSThreadCreatePriority
+@Description As OSThreadCreate, this function creates a kernel thread and
+ starts it running. The difference is that with this function
+ is possible to specify the priority used to schedule the new
+ thread.
+
+@Output phThread Returned handle to the thread.
+@Input pszThreadName Name to assign to the thread.
+@Input pfnThread Thread entry point function.
+@Input hData Thread specific data pointer for pfnThread().
+@Input eThreadPriority Priority level to assign to the new thread.
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+ IMG_CHAR *pszThreadName,
+ PFN_THREAD pfnThread,
+ void *hData,
+ OS_THREAD_LEVEL eThreadPriority);
+
+/*************************************************************************/ /*!
+@Function OSThreadDestroy
+@Description Waits for the thread to end and then destroys the thread
+ handle memory. This function will block and wait for the
+ thread to finish successfully, thereby providing a sync point
+ for the thread completing its work. No attempt is made to kill
+ or otherwise terminate the thread.
+@Input hThread The thread handle returned by OSThreadCreate().
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread);
+
+/*************************************************************************/ /*!
+@Function OSSetThreadPriority
+@Description Set the priority and weight of a thread
+@Input hThread The thread handle.
+@Input nThreadPriority The integer value of the thread priority
+@Input nThreadWeight The integer value of the thread weight
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSSetThreadPriority( IMG_HANDLE hThread,
+ IMG_UINT32 nThreadPriority,
+ IMG_UINT32 nThreadWeight);
+
+#if defined(__arm64__) || defined(__aarch64__) || defined (PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)
+
+/* Workarounds for assumptions made that memory will not be mapped uncached
+ * in kernel or user address spaces on arm64 platforms (or other testing).
+ */
+
+/**************************************************************************/ /*!
+@Function DeviceMemSet
+@Description Set memory, whose mapping may be uncached, to a given value.
+ On some architectures, additional processing may be needed
+ if the mapping is uncached. In such cases, OSDeviceMemSet()
+ is defined as a call to this function.
+@Input pvDest void pointer to the memory to be set
+@Input ui8Value byte containing the value to be set
+@Input ui32Size the number of bytes to be set to the given value
+@Return None
+ */ /**************************************************************************/
+void DeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size);
+
+/**************************************************************************/ /*!
+@Function DeviceMemCopy
+@Description Copy values from one area of memory, to another, when one
+ or both mappings may be uncached.
+ On some architectures, additional processing may be needed
+ if mappings are uncached. In such cases, OSDeviceMemCopy()
+ is defined as a call to this function.
+@Input pvDst void pointer to the destination memory
+@Input pvSrc void pointer to the source memory
+@Input ui32Size the number of bytes to be copied
+@Return None
+ */ /**************************************************************************/
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t ui32Size);
+
+#define OSDeviceMemSet(a,b,c) DeviceMemSet((a), (b), (c))
+#define OSDeviceMemCopy(a,b,c) DeviceMemCopy((a), (b), (c))
+#define OSCachedMemSet(a,b,c) memset((a), (b), (c))
+#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c))
+
+#else /* !(defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */
+
+/* Everything else */
+
+/**************************************************************************/ /*!
+@Function OSDeviceMemSet
+@Description Set memory, whose mapping may be uncached, to a given value.
+ On some architectures, additional processing may be needed
+ if the mapping is uncached.
+@Input a void pointer to the memory to be set
+@Input b byte containing the value to be set
+@Input c the number of bytes to be set to the given value
+@Return Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSDeviceMemSet(a,b,c) memset((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function OSDeviceMemCopy
+@Description Copy values from one area of memory, to another, when one
+ or both mappings may be uncached.
+ On some architectures, additional processing may be needed
+ if mappings are uncached.
+@Input a void pointer to the destination memory
+@Input b void pointer to the source memory
+@Input c the number of bytes to be copied
+@Return Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSDeviceMemCopy(a,b,c) memcpy((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function OSCachedMemSet
+@Description Set memory, where the mapping is known to be cached, to a
+ given value. This function exists to allow an optimal memset
+ to be performed when memory is known to be cached.
+@Input a void pointer to the memory to be set
+@Input b byte containing the value to be set
+@Input c the number of bytes to be set to the given value
+@Return Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSCachedMemSet(a,b,c) memset((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function OSCachedMemCopy
+@Description Copy values from one area of memory, to another, when both
+ mappings are known to be cached.
+ This function exists to allow an optimal memcpy to be
+ performed when memory is known to be cached.
+@Input a void pointer to the destination memory
+@Input b void pointer to the source memory
+@Input c the number of bytes to be copied
+@Return Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c))
+
+#endif /* !(defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */
+
+/**************************************************************************/ /*!
+@Function OSMapPhysToLin
+@Description Maps physical memory into a linear address range.
+@Input BasePAddr physical CPU address
+@Input ui32Bytes number of bytes to be mapped
+@Input ui32Flags flags denoting the caching mode to be employed
+ for the mapping (uncached/write-combined,
+ cached coherent or cached incoherent).
+ See pvrsrv_memallocflags.h for full flag bit
+ definitions.
+@Return Pointer to the new mapping if successful, NULL otherwise.
+ */ /**************************************************************************/
+void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, size_t ui32Bytes, IMG_UINT32 ui32Flags);
+
+/**************************************************************************/ /*!
+@Function OSUnMapPhysToLin
+@Description Unmaps physical memory previously mapped by OSMapPhysToLin().
+@Input pvLinAddr the linear mapping to be unmapped
+@Input ui32Bytes number of bytes to be unmapped
+@Input ui32Flags flags denoting the caching mode that was employed
+ for the original mapping.
+@Return IMG_TRUE if unmapping was successful, IMG_FALSE otherwise.
+ */ /**************************************************************************/
+IMG_BOOL OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes, IMG_UINT32 ui32Flags);
+
+/**************************************************************************/ /*!
+@Function OSCPUOperation
+@Description Perform the specified cache operation on the CPU.
+@Input eCacheOp the type of cache operation to be performed
+@Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP eCacheOp);
+
+/**************************************************************************/ /*!
+@Function OSFlushCPUCacheRangeKM
+@Description Clean and invalidate the CPU cache for the specified
+ address range.
+@Input psDevNode device on which the allocation was made
+@Input pvVirtStart virtual start address of the range to be
+ flushed
+@Input pvVirtEnd virtual end address of the range to be
+ flushed
+@Input sCPUPhysStart physical start address of the range to be
+ flushed
+@Input sCPUPhysEnd physical end address of the range to be
+ flushed
+@Return None
+ */ /**************************************************************************/
+void OSFlushCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd);
+
+
+/**************************************************************************/ /*!
+@Function OSCleanCPUCacheRangeKM
+@Description Clean the CPU cache for the specified address range.
+ This writes out the contents of the cache and unsets the
+ 'dirty' bit (which indicates the physical memory is
+ consistent with the cache contents).
+@Input psDevNode device on which the allocation was made
+@Input pvVirtStart virtual start address of the range to be
+ cleaned
+@Input pvVirtEnd virtual end address of the range to be
+ cleaned
+@Input sCPUPhysStart physical start address of the range to be
+ cleaned
+@Input sCPUPhysEnd physical end address of the range to be
+ cleaned
+@Return None
+ */ /**************************************************************************/
+void OSCleanCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/**************************************************************************/ /*!
+@Function OSInvalidateCPUCacheRangeKM
+@Description Invalidate the CPU cache for the specified address range.
+ The cache must reload data from those addresses if they
+ are accessed.
+@Input psDevNode device on which the allocation was made
+@Input pvVirtStart virtual start address of the range to be
+ invalidated
+@Input pvVirtEnd virtual end address of the range to be
+ invalidated
+@Input sCPUPhysStart physical start address of the range to be
+ invalidated
+@Input sCPUPhysEnd physical end address of the range to be
+ invalidated
+@Return None
+ */ /**************************************************************************/
+void OSInvalidateCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/**************************************************************************/ /*!
+@Function OSCPUCacheOpAddressType
+@Description Returns the address type (i.e. virtual/physical/both) that is
+ used to perform cache maintenance on the CPU. This is used
+ to infer whether the virtual or physical address supplied to
+ the OSxxxCPUCacheRangeKM functions can be omitted when called.
+@Input uiCacheOp the type of cache operation to be performed
+@Return PVRSRV_CACHE_OP_ADDR_TYPE
+ */ /**************************************************************************/
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_CACHE_OP uiCacheOp);
+
+/*!
+ ******************************************************************************
+ * Cache attribute size type
+ *****************************************************************************/
+typedef enum _IMG_DCACHE_ATTRIBUTE_
+{
+ PVR_DCACHE_LINE_SIZE = 0, /*!< The cache line size */
+ PVR_DCACHE_ATTRIBUTE_COUNT /*!< The number of attributes (must be last) */
+} IMG_DCACHE_ATTRIBUTE;
+
+/**************************************************************************/ /*!
+@Function OSCPUCacheAttributeSize
+@Description Returns the size of a given cache attribute.
+ Typically this function is used to return the cache line
+ size, but may be extended to return the size of other
+ cache attributes.
+@Input eCacheAttribute the cache attribute whose size should
+ be returned.
+@Return The size of the specified cache attribute, in bytes.
+ */ /**************************************************************************/
+IMG_UINT32 OSCPUCacheAttributeSize(IMG_DCACHE_ATTRIBUTE eCacheAttribute);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentProcessID
+@Description Returns ID of current process (thread group)
+@Return ID of current process
+*****************************************************************************/
+IMG_PID OSGetCurrentProcessID(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentProcessName
+@Description Gets the name of current process
+@Return Process name
+*****************************************************************************/
+IMG_CHAR *OSGetCurrentProcessName(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentProcessVASpaceSize
+@Description Returns the CPU virtual address space size of current process
+@Return Process VA space size
+*/ /**************************************************************************/
+IMG_UINT64 OSGetCurrentProcessVASpaceSize(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentThreadID
+@Description Returns ID for current thread
+@Return ID of current thread
+*****************************************************************************/
+uintptr_t OSGetCurrentThreadID(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentClientProcessIDKM
+@Description Returns ID of current client process (thread group) which
+ has made a bridge call into the server.
+ For some operating systems, this may simply be the current
+ process id. For others, it may be that a dedicated thread
+ is used to handle the processing of bridge calls and that
+ some additional processing is required to obtain the ID of
+ the client process making the bridge call.
+@Return ID of current client process
+*****************************************************************************/
+IMG_PID OSGetCurrentClientProcessIDKM(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentClientProcessNameKM
+@Description Gets the name of current client process
+@Return Client process name
+*****************************************************************************/
+IMG_CHAR *OSGetCurrentClientProcessNameKM(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentClientThreadIDKM
+@Description Returns ID for current client thread
+ For some operating systems, this may simply be the current
+ thread id. For others, it may be that a dedicated thread
+ is used to handle the processing of bridge calls and that
+ some additional processing is require to obtain the ID of
+ the client thread making the bridge call.
+@Return ID of current client thread
+*****************************************************************************/
+uintptr_t OSGetCurrentClientThreadIDKM(void);
+
+/**************************************************************************/ /*!
+@Function OSMemCmp
+@Description Compares two blocks of memory for equality.
+@Input pvBufA Pointer to the first block of memory
+@Input pvBufB Pointer to the second block of memory
+@Input uiLen The number of bytes to be compared
+@Return Value < 0 if pvBufA is less than pvBufB.
+ Value > 0 if pvBufB is less than pvBufA.
+ Value = 0 if pvBufA is equal to pvBufB.
+*****************************************************************************/
+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen);
+
+/*************************************************************************/ /*!
+@Function OSPhyContigPagesAlloc
+@Description Allocates a number of contiguous physical pages.
+ If allocations made by this function are CPU cached then
+ OSPhyContigPagesClean has to be implemented to write the
+ cached data to memory.
+@Input psDevNode the device for which the allocation is
+ required
+@Input uiSize the size of the required allocation (in bytes)
+@Output psMemHandle a returned handle to be used to refer to this
+ allocation
+@Output psDevPAddr the physical address of the allocation
+@Return PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+/*************************************************************************/ /*!
+@Function OSPhyContigPagesFree
+@Description Frees a previous allocation of contiguous physical pages
+@Input psDevNode the device on which the allocation was made
+@Input psMemHandle the handle of the allocation to be freed
+@Return None.
+*****************************************************************************/
+void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle);
+
+/*************************************************************************/ /*!
+@Function OSPhyContigPagesMap
+@Description Maps the specified allocation of contiguous physical pages
+ to a kernel virtual address
+@Input psDevNode the device on which the allocation was made
+@Input psMemHandle the handle of the allocation to be mapped
+@Input uiSize the size of the allocation (in bytes)
+@Input psDevPAddr the physical address of the allocation
+@Output pvPtr the virtual kernel address to which the
+ allocation is now mapped
+@Return PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+ void **pvPtr);
+
+/*************************************************************************/ /*!
+@Function OSPhyContigPagesUnmap
+@Description Unmaps the kernel mapping for the specified allocation of
+ contiguous physical pages
+@Input psDevNode the device on which the allocation was made
+@Input psMemHandle the handle of the allocation to be unmapped
+@Input pvPtr the virtual kernel address to which the
+ allocation is currently mapped
+@Return None.
+*****************************************************************************/
+void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr);
+
+/*************************************************************************/ /*!
+@Function OSPhyContigPagesClean
+@Description Write the content of the specified allocation from CPU cache to
+ memory from (start + uiOffset) to (start + uiOffset + uiLength)
+ It is expected to be implemented as a cache clean operation but
+ it is allowed to fall back to a cache clean + invalidate
+ (i.e. flush).
+ If allocations returned by OSPhyContigPagesAlloc are always
+ uncached this can be implemented as nop.
+@Input psDevNode device on which the allocation was made
+@Input psMemHandle the handle of the allocation to be flushed
+@Input uiOffset the offset in bytes from the start of the
+ allocation from where to start flushing
+@Input uiLength the amount to flush from the offset in bytes
+@Return PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+ PG_HANDLE *psMemHandle,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 uiLength);
+
+
+/**************************************************************************/ /*!
+@Function OSInitEnvData
+@Description Called to initialise any environment-specific data. This
+ could include initialising the bridge calling infrastructure
+ or device memory management infrastructure.
+@Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSInitEnvData(void);
+
+/**************************************************************************/ /*!
+@Function OSDeInitEnvData
+@Description The counterpart to OSInitEnvData(). Called to free any
+ resources which may have been allocated by OSInitEnvData().
+@Return None.
+ */ /**************************************************************************/
+void OSDeInitEnvData(void);
+
+/**************************************************************************/ /*!
+@Function OSSScanf
+@Description OS function to support the standard C sscanf() function.
+ */ /**************************************************************************/
+IMG_UINT32 OSVSScanf(IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...);
+
+/**************************************************************************/ /*!
+@Function OSStringNCopy
+@Description OS function to support the standard C strncpy() function.
+ */ /**************************************************************************/
+IMG_CHAR* OSStringNCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize);
+
+/**************************************************************************/ /*!
+@Function OSSNPrintf
+@Description OS function to support the standard C snprintf() function.
+ */ /**************************************************************************/
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) __printf(3, 4);
+
+/**************************************************************************/ /*!
+@Function OSStringLength
+@Description OS function to support the standard C strlen() function.
+ */ /**************************************************************************/
+size_t OSStringLength(const IMG_CHAR *pStr);
+
+/**************************************************************************/ /*!
+@Function OSStringNLength
+@Description Return the length of a string, excluding the terminating null
+ byte ('\0'), but return at most 'uiCount' bytes. Only the first
+ 'uiCount' bytes of 'pStr' are interrogated.
+@Input pStr pointer to the string
+@Input uiCount the maximum length to return
+@Return Length of the string if less than 'uiCount' bytes, otherwise
+ 'uiCount'.
+ */ /**************************************************************************/
+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount);
+
+/**************************************************************************/ /*!
+@Function OSStringCompare
+@Description OS function to support the standard C strcmp() function.
+ */ /**************************************************************************/
+IMG_INT32 OSStringCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2);
+
+/**************************************************************************/ /*!
+@Function OSStringNCompare
+@Description OS function to support the standard C strncmp() function.
+ */ /**************************************************************************/
+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2,
+ size_t uiSize);
+
+/**************************************************************************/ /*!
+@Function OSStringToUINT32
+@Description Changes string to IMG_UINT32.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base,
+ IMG_UINT32 *ui32Result);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectCreate
+@Description Create an event object.
+@Input pszName name to assign to the new event object.
+@Output EventObject the created event object.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
+ IMG_HANDLE *EventObject);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectDestroy
+@Description Destroy an event object.
+@Input hEventObject the event object to destroy.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectSignal
+@Description Signal an event object. Any thread waiting on that event
+ object will be woken.
+@Input hEventObject the event object to signal.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectWait
+@Description Wait for an event object to signal. The function is passed
+ an OS event object handle (which allows the OS to have the
+ calling thread wait on the associated event object).
+ The calling thread will be rescheduled when the associated
+ event object signals.
+ If the event object has not signalled after a default timeout
+ period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+ will return with the result code PVRSRV_ERROR_TIMEOUT.
+
+ Note: The global bridge lock should be released while waiting
+ for the event object to signal (if held by the current thread).
+ The following logic should be implemented in the OS
+ implementation:
+ ...
+ bReleasePVRLock = (!bHoldBridgeLock &&
+ BridgeLockIsLocked() &&
+ current == BridgeLockGetOwner());
+ if (bReleasePVRLock == IMG_TRUE) OSReleaseBridgeLock();
+ ...
+ / * sleep & reschedule - wait for signal * /
+ ...
+ if (bReleasePVRLock == IMG_TRUE) OSReleaseBridgeLock();
+ ...
+
+@Input hOSEventKM the OS event object handle associated with
+ the event object.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectWaitTimeout
+@Description Wait for an event object to signal or timeout. The function
+ is passed an OS event object handle (which allows the OS to
+ have the calling thread wait on the associated event object).
+ The calling thread will be rescheduled when the associated
+ event object signals.
+ If the event object has not signalled after the specified
+ timeout period (passed in 'uiTimeoutus'), the function
+ will return with the result code PVRSRV_ERROR_TIMEOUT.
+ NB. The global bridge lock should be released while waiting
+ for the event object to signal (if held by the current thread)
+ See OSEventObjectWait() for details.
+@Input hOSEventKM the OS event object handle associated with
+ the event object.
+@Input uiTimeoutus the timeout period (in usecs)
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectWaitAndHoldBridgeLock
+@Description Wait for an event object to signal. The function is passed
+ an OS event object handle (which allows the OS to have the
+ calling thread wait on the associated event object).
+ The calling thread will be rescheduled when the associated
+ event object signals.
+ If the event object has not signalled after a default timeout
+ period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+ will return with the result code PVRSRV_ERROR_TIMEOUT.
+ The global bridge lock is held while waiting for the event
+ object to signal (this will prevent other bridge calls from
+ being serviced during this time).
+ See OSEventObjectWait() for details.
+@Input hOSEventKM the OS event object handle associated with
+ the event object.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitAndHoldBridgeLock(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectWaitTimeoutAndHoldBridgeLock
+@Description Wait for an event object to signal or timeout. The function
+ is passed an OS event object handle (which allows the OS to
+ have the calling thread wait on the associated event object).
+ The calling thread will be rescheduled when the associated
+ event object signals.
+ If the event object has not signalled after the specified
+ timeout period (passed in 'uiTimeoutus'), the function
+ will return with the result code PVRSRV_ERROR_TIMEOUT.
+ The global bridge lock is held while waiting for the event
+ object to signal (this will prevent other bridge calls from
+ being serviced during this time).
+ See OSEventObjectWait() for details.
+@Input hOSEventKM the OS event object handle associated with
+ the event object.
+@Input uiTimeoutus the timeout period (in usecs)
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitTimeoutAndHoldBridgeLock(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectOpen
+@Description Open an OS handle on the specified event object.
+ This OS handle may then be used to make a thread wait for
+ that event object to signal.
+@Input hEventObject Event object handle.
+@Output phOSEvent OS handle to the returned event object.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject,
+ IMG_HANDLE *phOSEvent);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectClose
+@Description Close an OS handle previously opened for an event object.
+@Input hOSEventKM OS event object handle to close.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM);
+
+/**************************************************************************/ /*!
+@Function OSStringCopy
+@Description OS function to support the standard C strcpy() function.
+ */ /**************************************************************************/
+/* Avoid macros so we don't evaluate pszSrc twice */
+static INLINE IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
+{
+ return OSStringNCopy(pszDest, pszSrc, OSStringLength(pszSrc) + 1);
+}
+
+/*************************************************************************/ /*!
+@Function OSWaitus
+@Description Implements a busy wait of the specified number of microseconds.
+ This function does NOT release thread quanta.
+@Input ui32Timeus The duration of the wait period (in us)
+@Return None.
+*/ /**************************************************************************/
+void OSWaitus(IMG_UINT32 ui32Timeus);
+
+/*************************************************************************/ /*!
+@Function OSSleepms
+@Description Implements a sleep of the specified number of milliseconds.
+ This function may allow pre-emption, meaning the thread
+ may potentially not be rescheduled for a longer period.
+@Input ui32Timems The duration of the sleep (in ms)
+@Return None.
+*/ /**************************************************************************/
+void OSSleepms(IMG_UINT32 ui32Timems);
+
+/*************************************************************************/ /*!
+@Function OSReleaseThreadQuanta
+@Description Relinquishes the current thread's execution time-slice,
+ permitting the OS scheduler to schedule another thread.
+@Return None.
+*/ /**************************************************************************/
+void OSReleaseThreadQuanta(void);
+
+#if defined(LINUX) && defined(__KERNEL__) && !defined(NO_HARDWARE)
+ #define OSReadHWReg8(addr, off) (IMG_UINT8)readb((IMG_PBYTE)(addr) + (off))
+ #define OSReadHWReg16(addr, off) (IMG_UINT16)readw((IMG_PBYTE)(addr) + (off))
+ #define OSReadHWReg32(addr, off) (IMG_UINT32)readl((IMG_PBYTE)(addr) + (off))
+ /* Little endian support only */
+ #define OSReadHWReg64(addr, off) \
+ ({ \
+ __typeof__(addr) _addr = addr; \
+ __typeof__(off) _off = off; \
+ (IMG_UINT64) \
+ ( \
+ ( (IMG_UINT64)(readl((IMG_PBYTE)(_addr) + (_off) + 4)) << 32) \
+ | readl((IMG_PBYTE)(_addr) + (_off)) \
+ ); \
+ })
+
+ #define OSWriteHWReg8(addr, off, val) writeb((IMG_UINT8)(val), (IMG_PBYTE)(addr) + (off))
+ #define OSWriteHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_PBYTE)(addr) + (off))
+ #define OSWriteHWReg32(addr, off, val) writel((IMG_UINT32)(val), (IMG_PBYTE)(addr) + (off))
+ /* Little endian support only */
+ #define OSWriteHWReg64(addr, off, val) do \
+ { \
+ __typeof__(addr) _addr = addr; \
+ __typeof__(off) _off = off; \
+ __typeof__(val) _val = val; \
+ writel((IMG_UINT32)((_val) & 0xffffffff), (_addr) + (_off)); \
+ writel((IMG_UINT32)(((IMG_UINT64)(_val) >> 32) & 0xffffffff), (_addr) + (_off) + 4); \
+ } while (0)
+
+#elif defined(NO_HARDWARE)
+ /* FIXME: OSReadHWReg should not exist in no hardware builds */
+ #define OSReadHWReg8(addr, off) (0x4eU)
+ #define OSReadHWReg16(addr, off) (0x3a4eU)
+ #define OSReadHWReg32(addr, off) (0x30f73a4eU)
+ #define OSReadHWReg64(addr, off) (0x5b376c9d30f73a4eU)
+
+ #define OSWriteHWReg8(addr, off, val)
+ #define OSWriteHWReg16(addr, off, val)
+ #define OSWriteHWReg32(addr, off, val)
+ #define OSWriteHWReg64(addr, off, val)
+#else
+/*************************************************************************/ /*!
+@Function OSReadHWReg8
+@Description Read from an 8-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to read from a location
+ but instead returns a constant value.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be read.
+@Return The byte read.
+*/ /**************************************************************************/
+ IMG_UINT8 OSReadHWReg8(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function OSReadHWReg16
+@Description Read from a 16-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to read from a location
+ but instead returns a constant value.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be read.
+@Return The word read.
+*/ /**************************************************************************/
+ IMG_UINT16 OSReadHWReg16(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function OSReadHWReg32
+@Description Read from a 32-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to read from a location
+ but instead returns a constant value.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be read.
+@Return The long word read.
+*/ /**************************************************************************/
+ IMG_UINT32 OSReadHWReg32(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function OSReadHWReg64
+@Description Read from a 64-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to read from a location
+ but instead returns a constant value.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be read.
+@Return The long long word read.
+*/ /**************************************************************************/
+ IMG_UINT64 OSReadHWReg64(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function OSWriteHWReg8
+@Description Write to an 8-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to write to a location.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be written to.
+@Input ui8Value The byte to be written to the register.
+@Return None.
+*/ /**************************************************************************/
+ void OSWriteHWReg8(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value);
+
+/*************************************************************************/ /*!
+@Function OSWriteHWReg16
+@Description Write to a 16-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to write to a location.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be written to.
+@Input ui16Value The word to be written to the register.
+@Return None.
+*/ /**************************************************************************/
+ void OSWriteHWReg16(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT16 ui16Value);
+
+/*************************************************************************/ /*!
+@Function OSWriteHWReg32
+@Description Write to a 32-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to write to a location.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be written to.
+@Input ui32Value The long word to be written to the register.
+@Return None.
+*/ /**************************************************************************/
+ void OSWriteHWReg32(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function OSWriteHWReg64
+@Description Write to a 64-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to write to a location.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be written to.
+@Input ui64Value The long long word to be written to the
+ register.
+@Return None.
+*/ /**************************************************************************/
+ void OSWriteHWReg64(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value);
+#endif
+
+typedef void (*PFN_TIMER_FUNC)(void*);
+/*************************************************************************/ /*!
+@Function OSAddTimer
+@Description OS specific function to install a timer callback. The
+ timer will then need to be enabled, as it is disabled by
+ default.
+ When enabled, the callback will be invoked once the specified
+ timeout has elapsed.
+@Input pfnTimerFunc Timer callback
+@Input *pvData Callback data
+@Input ui32MsTimeout Callback period
+@Return Valid handle on success, NULL if a failure
+*/ /**************************************************************************/
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout);
+
+/*************************************************************************/ /*!
+@Function OSRemoveTimer
+@Description Removes the specified timer. The handle becomes invalid and
+ should no longer be used.
+@Input hTimer handle of the timer to be removed
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer);
+
+/*************************************************************************/ /*!
+@Function OSEnableTimer
+@Description Enable the specified timer. after enabling, the timer will
+ invoke the associated callback at an interval determined by
+ the configured timeout period until disabled.
+@Input hTimer handle of the timer to be enabled
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer);
+
+/*************************************************************************/ /*!
+@Function OSDisableTimer
+@Description Disable the specified timer
+@Input hTimer handle of the timer to be disabled
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer);
+
+
+/*************************************************************************/ /*!
+ @Function OSPanic
+ @Description Take action in response to an unrecoverable driver error
+ @Return None
+*/ /**************************************************************************/
+void OSPanic(void);
+
+/*************************************************************************/ /*!
+@Function OSProcHasPrivSrvInit
+@Description Checks whether the current process has sufficient privileges
+ to initialise services
+@Return IMG_TRUE if it does, IMG_FALSE if it does not.
+*/ /**************************************************************************/
+IMG_BOOL OSProcHasPrivSrvInit(void);
+
+/*!
+ ******************************************************************************
+ * Access operation verification type
+ *****************************************************************************/
+typedef enum _img_verify_test
+{
+ PVR_VERIFY_WRITE = 0, /*!< Used with OSAccessOK() to check writing is possible */
+ PVR_VERIFY_READ /*!< Used with OSAccessOK() to check reading is possible */
+} IMG_VERIFY_TEST;
+
+/*************************************************************************/ /*!
+@Function OSAccessOK
+@Description Checks that a user space pointer is valid
+@Input eVerification the test to be verified. This can be either
+ PVRSRV_VERIFY_WRITE or PVRSRV_VERIFY_READ.
+@Input pvUserPtr pointer to the memory to be checked
+@Input ui32Bytes size of the memory to be checked
+@Return IMG_TRUE if the specified access is valid, IMG_FALSE if not.
+*/ /**************************************************************************/
+IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, void *pvUserPtr, size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function OSCopyFromUser
+@Description Copy data from user-addressable memory to kernel-addressable
+ memory.
+ For operating systems that do not have a user/kernel space
+ distinction, this function should be implemented as a stub
+ which simply returns PVRSRV_ERROR_NOT_SUPPORTED.
+@Input pvProcess handle of the connection
+@Input pvDest pointer to the destination Kernel memory
+@Input pvSrc pointer to the source User memory
+@Input ui32Bytes size of the data to be copied
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyToUser(void *pvProcess, void *pvDest, const void *pvSrc, size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function OSCopyToUser
+@Description Copy data to user-addressable memory from kernel-addressable
+ memory.
+ For operating systems that do not have a user/kernel space
+ distinction, this function should be implemented as a stub
+ which simply returns PVRSRV_ERROR_NOT_SUPPORTED.
+@Input pvProcess handle of the connection
+@Input pvDest pointer to the destination User memory
+@Input pvSrc pointer to the source Kernel memory
+@Input ui32Bytes size of the data to be copied
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyFromUser(void *pvProcess, void *pvDest, const void *pvSrc, size_t ui32Bytes);
+
+#if defined (__linux__) || defined (WINDOWS_WDF) || defined(INTEGRITY_OS)
+#define OSBridgeCopyFromUser OSCopyFromUser
+#define OSBridgeCopyToUser OSCopyToUser
+#else
+/*************************************************************************/ /*!
+@Function OSBridgeCopyFromUser
+@Description Copy data from user-addressable memory into kernel-addressable
+ memory as part of a bridge call operation.
+ For operating systems that do not have a user/kernel space
+ distinction, this function will require whatever implementation
+ is needed to pass data for making the bridge function call.
+ For operating systems which do have a user/kernel space
+ distinction (such as Linux) this function may be defined so
+ as to equate to a call to OSCopyFromUser().
+@Input pvProcess handle of the connection
+@Input pvDest pointer to the destination Kernel memory
+@Input pvSrc pointer to the source User memory
+@Input ui32Bytes size of the data to be copied
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSBridgeCopyFromUser (void *pvProcess,
+ void *pvDest,
+ const void *pvSrc,
+ size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function OSBridgeCopyToUser
+@Description Copy data to user-addressable memory from kernel-addressable
+ memory as part of a bridge call operation.
+ For operating systems that do not have a user/kernel space
+ distinction, this function will require whatever implementation
+ is needed to pass data for making the bridge function call.
+ For operating systems which do have a user/kernel space
+ distinction (such as Linux) this function may be defined so
+ as to equate to a call to OSCopyToUser().
+@Input pvProcess handle of the connection
+@Input pvDest pointer to the destination User memory
+@Input pvSrc pointer to the source Kernel memory
+@Input ui32Bytes size of the data to be copied
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSBridgeCopyToUser (void *pvProcess,
+ void *pvDest,
+ const void *pvSrc,
+ size_t ui32Bytes);
+#endif
+
+/* To be increased if required in future */
+#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x2000 /*!< Size of the memory block used to hold data passed in to a bridge call */
+#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000 /*!< Size of the memory block used to hold data returned from a bridge call */
+
+/*************************************************************************/ /*!
+@Function OSGetGlobalBridgeBuffers
+@Description Returns the addresses and sizes of the buffers used to pass
+ data into and out of bridge function calls.
+@Output ppvBridgeInBuffer pointer to the input bridge data buffer
+ of size PVRSRV_MAX_BRIDGE_IN_SIZE.
+@Output ppvBridgeOutBuffer pointer to the output bridge data buffer
+ of size PVRSRV_MAX_BRIDGE_OUT_SIZE.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSGetGlobalBridgeBuffers (void **ppvBridgeInBuffer,
+ void **ppvBridgeOutBuffer);
+
+/*************************************************************************/ /*!
+@Function OSSetDriverSuspended
+@Description Prevent processes from using the driver while it is
+ suspended. This function is not required for most operating
+ systems.
+@Return IMG_TRUE on success, IMG_FALSE otherwise.
+*/ /**************************************************************************/
+IMG_BOOL OSSetDriverSuspended(void);
+
+/*************************************************************************/ /*!
+@Function OSClearDriverSuspended
+@Description Re-allows processes to use the driver when it is no longer
+ suspended. This function is not required for most operating
+ systems.
+@Return IMG_TRUE on success, IMG_FALSE otherwise.
+*/ /**************************************************************************/
+IMG_BOOL OSClearDriverSuspended(void);
+
+/*************************************************************************/ /*!
+@Function OSGetDriverSuspended
+@Description Returns whether or not processes are unable to use the driver
+ (due to it being suspended). This function is not required
+ for most operating systems.
+@Return IMG_TRUE if the driver is suspended (use is not possible),
+ IMG_FALSE if the driver is not suspended (use is possible).
+*/ /**************************************************************************/
+IMG_BOOL OSGetDriverSuspended(void);
+
+#if defined(LINUX) && defined(__KERNEL__)
+#define OSWriteMemoryBarrier() wmb()
+#define OSReadMemoryBarrier() rmb()
+#define OSMemoryBarrier() mb()
+#else
+/*************************************************************************/ /*!
+@Function OSWriteMemoryBarrier
+@Description Insert a write memory barrier.
+ The write memory barrier guarantees that all store operations
+ (writes) specified before the barrier will appear to happen
+ before all of the store operations specified after the barrier.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+void OSWriteMemoryBarrier(void);
+#define OSReadMemoryBarrier() OSMemoryBarrier()
+/*************************************************************************/ /*!
+@Function OSMemoryBarrier
+@Description Insert a read/write memory barrier.
+ The read and write memory barrier guarantees that all load
+ (read) and all store (write) operations specified before the
+ barrier will appear to happen before all of the load/store
+ operations specified after the barrier.
+@Return None.
+*/ /**************************************************************************/
+void OSMemoryBarrier(void);
+#endif
+
+/*************************************************************************/ /*!
+@Function PVRSRVToNativeError
+@Description Returns the OS-specific equivalent error number/code for
+ the specified PVRSRV_ERROR value.
+ If there is no equivalent, or the PVRSRV_ERROR value is
+ PVRSRV_OK (no error), 0 is returned.
+@Return The OS equivalent error code.
+*/ /**************************************************************************/
+int PVRSRVToNativeError(PVRSRV_ERROR e);
+#define OSPVRSRVToNativeError(e) ( (PVRSRV_OK == e)? 0: PVRSRVToNativeError(e) )
+
+
+#if defined(LINUX) && defined(__KERNEL__)
+
+/* Provide LockDep friendly definitions for Services RW locks */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "allocmem.h"
+
+typedef struct rw_semaphore *POSWR_LOCK;
+
+#define OSWRLockCreate(ppsLock) ({ \
+ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+ *(ppsLock) = OSAllocMem(sizeof(struct rw_semaphore)); \
+ if (*(ppsLock)) { init_rwsem(*(ppsLock)); e = PVRSRV_OK; }; \
+ e;})
+#define OSWRLockDestroy(psLock) ({OSFreeMem(psLock); PVRSRV_OK;})
+
+#define OSWRLockAcquireRead(psLock) ({down_read(psLock); PVRSRV_OK;})
+#define OSWRLockReleaseRead(psLock) ({up_read(psLock); PVRSRV_OK;})
+#define OSWRLockAcquireWrite(psLock) ({down_write(psLock); PVRSRV_OK;})
+#define OSWRLockReleaseWrite(psLock) ({up_write(psLock); PVRSRV_OK;})
+
+#elif defined(LINUX) || defined(__QNXNTO__) || defined (INTEGRITY_OS)
+/* User-mode unit tests use these definitions on Linux */
+
+typedef struct _OSWR_LOCK_ *POSWR_LOCK;
+
+PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock);
+void OSWRLockDestroy(POSWR_LOCK psLock);
+void OSWRLockAcquireRead(POSWR_LOCK psLock);
+void OSWRLockReleaseRead(POSWR_LOCK psLock);
+void OSWRLockAcquireWrite(POSWR_LOCK psLock);
+void OSWRLockReleaseWrite(POSWR_LOCK psLock);
+
+#else
+struct _OSWR_LOCK_ {
+ IMG_UINT32 ui32Dummy;
+};
+#if defined(WINDOWS_WDF)
+ typedef struct _OSWR_LOCK_ *POSWR_LOCK;
+#endif
+
+/*************************************************************************/ /*!
+@Function OSWRLockCreate
+@Description Create a writer/reader lock.
+ This type of lock allows multiple concurrent readers but
+ only a single writer, allowing for optimized performance.
+@Output ppsLock A handle to the created WR lock.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock)
+{
+ PVR_UNREFERENCED_PARAMETER(ppsLock);
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function OSWRLockDestroy
+@Description Destroys a writer/reader lock.
+@Input psLock The handle of the WR lock to be destroyed.
+@Return None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockDestroy(POSWR_LOCK psLock)
+{
+ PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function OSWRLockAcquireRead
+@Description Acquire a writer/reader read lock.
+ If the write lock is already acquired, the caller will
+ block until it is released.
+@Input psLock The handle of the WR lock to be acquired for
+ reading.
+@Return None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockAcquireRead(POSWR_LOCK psLock)
+{
+ PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function OSWRLockReleaseRead
+@Description Release a writer/reader read lock.
+@Input psLock The handle of the WR lock whose read lock is to
+ be released.
+@Return None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockReleaseRead(POSWR_LOCK psLock)
+{
+ PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function OSWRLockAcquireWrite
+@Description Acquire a writer/reader write lock.
+ If the write lock or any read lock are already acquired,
+ the caller will block until all are released.
+@Input psLock The handle of the WR lock to be acquired for
+ writing.
+@Return None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockAcquireWrite(POSWR_LOCK psLock)
+{
+ PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function OSWRLockReleaseWrite
+@Description Release a writer/reader write lock.
+@Input psLock The handle of the WR lock whose write lock is to
+ be released.
+@Return None
+*/ /**************************************************************************/
+static INLINE void OSWRLockReleaseWrite(POSWR_LOCK psLock)
+{
+ PVR_UNREFERENCED_PARAMETER(psLock);
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function OSDivide64r64
+@Description Divide a 64-bit value by a 32-bit value. Return the 64-bit
+ quotient.
+ The remainder is also returned in 'pui32Remainder'.
+@Input ui64Divident The number to be divided.
+@Input ui32Divisor The 32-bit value 'ui64Divident' is to
+ be divided by.
+@Output pui32Remainder The remainder of the division.
+@Return The 64-bit quotient (result of the division).
+*/ /**************************************************************************/
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+
+/*************************************************************************/ /*!
+@Function OSDivide64
+@Description Divide a 64-bit value by a 32-bit value. Return a 32-bit
+ quotient.
+ The remainder is also returned in 'pui32Remainder'.
+ This function allows for a more optional implementation
+ of a 64-bit division when the result is known to be
+ representable in 32-bits.
+@Input ui64Divident The number to be divided.
+@Input ui32Divisor The 32-bit value 'ui64Divident' is to
+ be divided by.
+@Output pui32Remainder The remainder of the division.
+@Return The 32-bit quotient (result of the division).
+*/ /**************************************************************************/
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+
+/*************************************************************************/ /*!
+@Function OSDumpStack
+@Description Dump the current task information and its stack trace.
+@Return None
+*/ /**************************************************************************/
+void OSDumpStack(void);
+
+/*************************************************************************/ /*!
+@Function OSAcquireBridgeLock
+@Description Acquire the global bridge lock.
+ This prevents another bridge call from being actioned while
+ we are still servicing the current bridge call.
+ NB. This function must not return until the lock is acquired
+ (meaning the implementation should not timeout or return with
+ an error, as the caller will assume they have the lock).
+ This function has an OS-specific implementation rather than
+ an abstracted implementation for efficiency reasons, as it
+ is called frequently.
+@Return None
+*/ /**************************************************************************/
+void OSAcquireBridgeLock(void);
+/*************************************************************************/ /*!
+@Function OSReleaseBridgeLock
+@Description Release the global bridge lock.
+ This function has an OS-specific implementation rather than
+ an abstracted implementation for efficiency reasons, as it
+ is called frequently.
+@Return None
+*/ /**************************************************************************/
+void OSReleaseBridgeLock(void);
+
+/*
+ * Functions for providing support for PID statistics.
+ */
+typedef void (OS_STATS_PRINTF_FUNC)(void *pvFilePtr, const IMG_CHAR *pszFormat, ...);
+
+typedef void (OS_STATS_PRINT_FUNC)(void *pvFilePtr,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+typedef IMG_UINT32 (OS_INC_STATS_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+typedef IMG_UINT32 (OS_DEC_STATS_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+
+/*************************************************************************/ /*!
+@Function OSCreateStatisticEntry
+@Description Create a statistic entry in the specified folder.
+ Where operating systems do not support a debugfs,
+ file system this function may be implemented as a stub.
+@Input pszName String containing the name for the entry.
+@Input pvFolder Reference from OSCreateStatisticFolder() of the
+ folder to create the entry in, or NULL for the
+ root.
+@Input pfnStatsPrint Pointer to function that can be used to print the
+ values of all the statistics.
+@Input pfnIncMemRefCt Pointer to function that can be used to take a
+ reference on the memory backing the statistic
+ entry.
+@Input pfnDecMemRefCt Pointer to function that can be used to drop a
+ reference on the memory backing the statistic
+ entry.
+@Input pvData OS specific reference that can be used by
+ pfnGetElement.
+@Return Pointer void reference to the entry created, which can be
+ passed to OSRemoveStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateStatisticEntry(IMG_CHAR* pszName, void *pvFolder,
+ OS_STATS_PRINT_FUNC* pfnStatsPrint,
+ OS_INC_STATS_MEM_REFCOUNT_FUNC* pfnIncMemRefCt,
+ OS_DEC_STATS_MEM_REFCOUNT_FUNC* pfnDecMemRefCt,
+ void *pvData);
+
+/*************************************************************************/ /*!
+@Function OSRemoveStatisticEntry
+@Description Removes a statistic entry.
+ Where operating systems do not support a debugfs,
+ file system this function may be implemented as a stub.
+@Input pvEntry Pointer void reference to the entry created by
+ OSCreateStatisticEntry().
+*/ /**************************************************************************/
+void OSRemoveStatisticEntry(void *pvEntry);
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+/*************************************************************************/ /*!
+@Function OSCreateRawStatisticEntry
+@Description Create a raw statistic entry in the specified folder.
+ Where operating systems do not support a debugfs
+ file system this function may be implemented as a stub.
+@Input pszFileName String containing the name for the entry.
+@Input pvParentDir Reference from OSCreateStatisticFolder() of the
+ folder to create the entry in, or NULL for the
+ root.
+@Input pfnStatsPrint Pointer to function that can be used to print the
+ values of all the statistics.
+@Return Pointer void reference to the entry created, which can be
+ passed to OSRemoveRawStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateRawStatisticEntry(const IMG_CHAR *pszFileName, void *pvParentDir,
+ OS_STATS_PRINT_FUNC *pfStatsPrint);
+
+/*************************************************************************/ /*!
+@Function OSRemoveRawStatisticEntry
+@Description Removes a raw statistic entry.
+ Where operating systems do not support a debugfs
+ file system this function may be implemented as a stub.
+@Input pvEntry Pointer void reference to the entry created by
+ OSCreateRawStatisticEntry().
+*/ /**************************************************************************/
+void OSRemoveRawStatisticEntry(void *pvEntry);
+#endif
+
+/*************************************************************************/ /*!
+@Function OSCreateStatisticFolder
+@Description Create a statistic folder to hold statistic entries.
+ Where operating systems do not support a debugfs,
+ file system this function may be implemented as a stub.
+@Input pszName String containing the name for the folder.
+@Input pvFolder Reference from OSCreateStatisticFolder() of the folder
+ to create the folder in, or NULL for the root.
+@Return Pointer void reference to the folder created, which can be
+ passed to OSRemoveStatisticFolder() to remove the folder.
+*/ /**************************************************************************/
+void *OSCreateStatisticFolder(IMG_CHAR *pszName, void *pvFolder);
+
+/*************************************************************************/ /*!
+@Function OSRemoveStatisticFolder
+@Description Removes a statistic folder.
+ Where operating systems do not support a debugfs,
+ file system this function may be implemented as a stub.
+@Input ppvFolder Reference from OSCreateStatisticFolder() of the
+ folder that should be removed.
+ This needs to be double pointer because it has to
+ be NULLed right after memory is freed to avoid
+ possible races and use-after-free situations.
+*/ /**************************************************************************/
+void OSRemoveStatisticFolder(void **ppvFolder);
+
+/*************************************************************************/ /*!
+@Function OSUserModeAccessToPerfCountersEn
+@Description Permit User-mode access to CPU performance counter
+ registers.
+ This function is called during device initialisation.
+ Certain CPU architectures may need to explicitly permit
+ User mode access to performance counters - if this is
+ required, the necessary code should be implemented inside
+ this function.
+@Return None.
+*/ /**************************************************************************/
+void OSUserModeAccessToPerfCountersEn(void);
+
+/*************************************************************************/ /*!
+@Function OSDebugSignalPID
+@Description Sends a SIGTRAP signal to a specific PID in user mode for
+ debugging purposes. The user mode process can register a handler
+ against this signal.
+ This is necessary to support the Rogue debugger. If the Rogue
+ debugger is not used then this function may be implemented as
+ a stub.
+@Input ui32PID The PID for the signal.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID);
+
+#if defined(CONFIG_L4)
+#include <asm/api-l4env/api.h>
+#include <asm/io.h>
+
+#if defined(page_to_phys)
+#undef page_to_phys
+#define page_to_phys(x) l4x_virt_to_phys(x)
+#else
+#error "Unable to override page_to_phys() implementation"
+#endif
+#endif
+
+#endif /* __OSFUNC_H__ */
+
+/******************************************************************************
+ End of file (osfunc.h)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title arm specific OS functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS functions who's implementation are processor specific
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
+ #include <asm/system.h>
+#endif
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+
+
+#if defined(CONFIG_OUTER_CACHE)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0))
+
+ /* Since 3.16 the outer_xxx() functions require irqs to be disabled and no
+ * other cache masters must operate on the outer cache. */
+ static DEFINE_SPINLOCK(gsCacheFlushLock);
+
+ #define OUTER_CLEAN_RANGE() { \
+ unsigned long uiLockFlags; \
+ \
+ spin_lock_irqsave(&gsCacheFlushLock, uiLockFlags); \
+ outer_clean_range(0, ULONG_MAX); \
+ spin_unlock_irqrestore(&gsCacheFlushLock, uiLockFlags); \
+ }
+
+ #define OUTER_FLUSH_ALL() { \
+ unsigned long uiLockFlags; \
+ \
+ spin_lock_irqsave(&gsCacheFlushLock, uiLockFlags); \
+ outer_flush_all(); \
+ spin_unlock_irqrestore(&gsCacheFlushLock, uiLockFlags); \
+ }
+
+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
+
+ /* No need to disable IRQs for older kernels */
+ #define OUTER_CLEAN_RANGE() outer_clean_range(0, ULONG_MAX)
+ #define OUTER_FLUSH_ALL() outer_flush_all()
+#endif /*(LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
+
+#else /* CONFIG_OUTER_CACHE */
+
+ /* Don't do anything if we have no outer cache */
+ #define OUTER_CLEAN_RANGE()
+ #define OUTER_FLUSH_ALL()
+#endif /* CONFIG_OUTER_CACHE */
+
+static void per_cpu_cache_flush(void *arg)
+{
+ PVR_UNREFERENCED_PARAMETER(arg);
+ flush_cache_all();
+}
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ switch(uiCacheOp)
+ {
+ /* Fall-through */
+ case PVRSRV_CACHE_OP_CLEAN:
+ on_each_cpu(per_cpu_cache_flush, NULL, 1);
+ OUTER_CLEAN_RANGE();
+ break;
+
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ case PVRSRV_CACHE_OP_FLUSH:
+ on_each_cpu(per_cpu_cache_flush, NULL, 1);
+ OUTER_FLUSH_ALL();
+ break;
+
+ case PVRSRV_CACHE_OP_NONE:
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Global cache operation type %d is invalid",
+ __FUNCTION__, uiCacheOp));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_ASSERT(0);
+ break;
+ }
+
+ return eError;
+}
+
+static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd)
+{
+ return (size_t)((char *)pvEnd - (char *)pvStart);
+}
+
+void OSFlushCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+ arm_dma_ops.sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+ arm_dma_ops.sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+ /* Inner cache */
+ dmac_flush_range(pvVirtStart, pvVirtEnd);
+
+ /* Outer cache */
+ outer_flush_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+void OSCleanCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+ arm_dma_ops.sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+ /* Inner cache */
+ dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_TO_DEVICE);
+
+ /* Outer cache */
+ outer_clean_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+void OSInvalidateCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+ arm_dma_ops.sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+#if defined(PVR_LINUX_DONT_USE_RANGE_BASED_INVALIDATE)
+ OSCleanCPUCacheRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+#else
+ /* Inner cache */
+ dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_FROM_DEVICE);
+
+ /* Outer cache */
+ outer_inv_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_CACHE_OP uiCacheOp)
+{
+ PVR_UNREFERENCED_PARAMETER(uiCacheOp);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+ return PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL;
+#else
+ return PVRSRV_CACHE_OP_ADDR_TYPE_BOTH;
+#endif
+}
+
+/* User Enable Register */
+#define PMUSERENR_EN 0x00000001 /* enable user access to the counters */
+
+static void per_cpu_perf_counter_user_access_en(void *data)
+{
+ PVR_UNREFERENCED_PARAMETER(data);
+#if !defined(CONFIG_L4)
+ /* Enable user-mode access to counters. */
+ asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(PMUSERENR_EN));
+#endif
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+ on_each_cpu(per_cpu_perf_counter_user_access_en, NULL, 1);
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title arm specific OS functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS functions who's implementation are processor specific
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/cpumask.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#if defined(CONFIG_OUTER_CACHE)
+ /* If you encounter a 64-bit ARM system with an outer cache, you'll need
+ * to add the necessary code to manage that cache. See osfunc_arm.c
+ * for an example of how to do so.
+ */
+ #error "CONFIG_OUTER_CACHE not supported on arm64."
+#endif
+
+static void per_cpu_cache_flush(void *arg)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
+ static IMG_BOOL bLog = IMG_TRUE;
+ /*
+ NOTE: Regarding arm64 global flush support on >= Linux v4.2:
+ - Global cache flush support is deprecated from v4.2 onwards
+ - Cache maintenance is done using UM/KM VA maintenance _only_
+ - If you find that more time is spent in VA cache maintenance
+ - Implement arm64 assembly sequence for global flush here
+ - asm volatile ();
+ - If you do not want to implement the global cache assembly
+ - Disable KM cache maintenance support in UM cache.c
+ - Remove this PVR_LOG message
+ */
+ if (bLog)
+ {
+ PVR_LOG(("Global d-cache flush assembly not implemented, using rangebased flush"));
+ bLog = IMG_FALSE;
+ }
+#else
+ flush_cache_all();
+#endif
+ PVR_UNREFERENCED_PARAMETER(arg);
+}
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ switch(uiCacheOp)
+ {
+ case PVRSRV_CACHE_OP_CLEAN:
+ case PVRSRV_CACHE_OP_FLUSH:
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ on_each_cpu(per_cpu_cache_flush, NULL, 1);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
+ eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+ break;
+
+ case PVRSRV_CACHE_OP_NONE:
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Global cache operation type %d is invalid",
+ __FUNCTION__, uiCacheOp));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_ASSERT(0);
+ break;
+ }
+
+ return eError;
+}
+
+void OSFlushCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(psDevNode->psDevConfig->pvOSDevice);
+
+ PVR_UNREFERENCED_PARAMETER(pvVirtStart);
+ PVR_UNREFERENCED_PARAMETER(pvVirtEnd);
+
+ dma_ops->sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+ dma_ops->sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+}
+
+void OSCleanCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(psDevNode->psDevConfig->pvOSDevice);
+
+ PVR_UNREFERENCED_PARAMETER(pvVirtStart);
+ PVR_UNREFERENCED_PARAMETER(pvVirtEnd);
+
+ dma_ops->sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+}
+
+void OSInvalidateCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(psDevNode->psDevConfig->pvOSDevice);
+
+ PVR_UNREFERENCED_PARAMETER(pvVirtStart);
+ PVR_UNREFERENCED_PARAMETER(pvVirtEnd);
+
+ dma_ops->sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_CACHE_OP uiCacheOp)
+{
+ PVR_UNREFERENCED_PARAMETER(uiCacheOp);
+ return PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+ /* FIXME: implement similarly to __arm__ */
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title x86 specific OS functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS functions who's implementation are processor specific
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/smp.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+
+static void per_cpu_cache_flush(void *arg)
+{
+ PVR_UNREFERENCED_PARAMETER(arg);
+ wbinvd();
+}
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ switch(uiCacheOp)
+ {
+ /* Fall-through */
+ case PVRSRV_CACHE_OP_CLEAN:
+ case PVRSRV_CACHE_OP_FLUSH:
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ on_each_cpu(per_cpu_cache_flush, NULL, 1);
+ break;
+
+ case PVRSRV_CACHE_OP_NONE:
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Global cache operation type %d is invalid",
+ __FUNCTION__, uiCacheOp));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_ASSERT(0);
+ break;
+ }
+
+ return eError;
+}
+
+static void x86_flush_cache_range(const void *pvStart, const void *pvEnd)
+{
+ IMG_BYTE *pbStart = (IMG_BYTE *)pvStart;
+ IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd;
+ IMG_BYTE *pbBase;
+
+ pbEnd = (IMG_BYTE *)PVR_ALIGN((uintptr_t)pbEnd,
+ (uintptr_t)boot_cpu_data.x86_clflush_size);
+
+ mb();
+ for(pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size)
+ {
+ clflush(pbBase);
+ }
+ mb();
+}
+
+void OSFlushCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+ x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+
+void OSCleanCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+ /* No clean feature on x86 */
+ x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+void OSInvalidateCPUCacheRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+ /* No invalidate-only support */
+ x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_CACHE_OP uiCacheOp)
+{
+ PVR_UNREFERENCED_PARAMETER(uiCacheOp);
+ return PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+ /* Not applicable to x86 architecture. */
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File oskm_apphint.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS-independent interface for retrieving KM apphints
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_defs.h"
+#if defined(LINUX)
+#include "km_apphint.h"
+#else
+#include "services_client_porting.h"
+#endif
+#if !defined(__OSKM_APPHINT_H__)
+#define __OSKM_APPHINT_H__
+
+
+#if defined(LINUX) && !defined(DOXYGEN)
+#if defined(SUPPORT_KERNEL_SRVINIT)
+static INLINE IMG_UINT os_get_km_apphint_UINT32(void *state, APPHINT_ID id, IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) {
+ return !pvr_apphint_get_uint32(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_UINT64(void *state, APPHINT_ID id, IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) {
+ return !pvr_apphint_get_uint64(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_BOOL(void *state, APPHINT_ID id, IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) {
+ return !pvr_apphint_get_bool(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_STRING(void *state, APPHINT_ID id, IMG_CHAR **pAppHintDefault, IMG_CHAR *buffer, size_t size) {
+ return !pvr_apphint_get_string(id, buffer, size);
+}
+
+#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \
+ os_get_km_apphint_UINT32(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \
+ os_get_km_apphint_UINT64(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \
+ os_get_km_apphint_BOOL(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \
+ os_get_km_apphint_STRING(state, APPHINT_ID_ ## name, appHintDefault, buffer, size)
+
+#else
+static INLINE IMG_UINT os_get_apphint_default_UINT32(IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) {
+ *pVal = *pAppHintDefault;
+ return IMG_TRUE;
+}
+static INLINE IMG_UINT os_get_apphint_default_UINT64(IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) {
+ *pVal = *pAppHintDefault;
+ return IMG_TRUE;
+}
+static INLINE IMG_UINT os_get_apphint_default_BOOL(IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) {
+ *pVal = *pAppHintDefault;
+ return IMG_TRUE;
+}
+static INLINE IMG_UINT os_get_apphint_default_STRING(IMG_CHAR **pAppHintDefault, IMG_CHAR *buffer, IMG_UINT32 size) {
+ strlcpy(buffer, *pAppHintDefault, size);
+ return IMG_TRUE;
+}
+
+#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \
+ os_get_apphint_default_UINT32(appHintDefault, value)
+
+#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \
+ os_get_apphint_default_UINT64(appHintDefault, value)
+
+#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \
+ os_get_apphint_default_BOOL(appHintDefault, value)
+
+#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \
+ os_get_apphint_default_STRING(appHintDefault, buffer, size)
+
+#endif
+
+#define OSCreateKMAppHintState(state) \
+ PVR_UNREFERENCED_PARAMETER(state)
+
+#define OSFreeKMAppHintState(state) \
+ PVR_UNREFERENCED_PARAMETER(state)
+
+#else /* #if defined(LINUX) && !defined(DOXYGEN) */
+
+static INLINE IMG_BOOL os_get_km_apphint_STRING(void *state, IMG_CHAR *name, IMG_CHAR **pAppHintDefault, IMG_CHAR *buffer, size_t size) {
+ PVR_UNREFERENCED_PARAMETER(size);
+ return PVRSRVGetAppHint(state, name, IMG_STRING_TYPE, pAppHintDefault, buffer);
+}
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintUINT32(state, name, appHintDefault, value)
+@Description Interface for retrieval of uint32 km app hint.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVGetAppHint() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Input state App hint state
+@Input name Name used to identify app hint
+@Input appHintDefault Default value to be returned if no
+ app hint is found.
+@Output value Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \
+ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintUINT64(state, name, appHintDefault, value)
+@Description Interface for retrieval of uint64 km app hint.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVGetAppHint() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Input state App hint state
+@Input name Name used to identify app hint
+@Input appHintDefault Default value to be returned if no
+ app hint is found.
+@Output value Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \
+ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintBOOL(state, name, appHintDefault, value)
+@Description Interface for retrieval of IMG_BOOL km app hint.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVGetAppHint() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Input state App hint state
+@Input name Name used to identify app hint
+@Input appHintDefault Default value to be returned if no
+ app hint is found.
+@Output value Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \
+ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size)
+@Description Interface for retrieval of string km app hint.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVGetAppHint() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Input state App hint state
+@Input name Name used to identify app hint
+@Input appHintDefault Default value to be returned if no
+ app hint is found.
+@Output buffer Buffer used to return app hint string.
+@Input size Size of the buffer.
+ */ /**************************************************************************/
+#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \
+ os_get_km_apphint_STRING(state, # name, appHintDefault, buffer, size)
+
+/**************************************************************************/ /*!
+@def OSCreateKMAppHintState(state)
+@Description Creates the app hint state.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVCreateAppHintState() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Output state App hint state
+ */ /**************************************************************************/
+#define OSCreateKMAppHintState(state) \
+ PVRSRVCreateAppHintState(IMG_SRV_UM, 0, state)
+
+/**************************************************************************/ /*!
+@def OSFreeKMAppHintState
+@Description Free the app hint state.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVCreateAppHintState() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Output state App hint state
+ */ /**************************************************************************/
+#define OSFreeKMAppHintState(state) \
+ PVRSRVFreeAppHintState(IMG_SRV_UM, state)
+
+#endif /* #if defined(LINUX) */
+
+#endif /* __OSKM_APPHINT_H__ */
+
+/******************************************************************************
+ End of file (oskm_apphint.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title OS Interface for mapping PMRs into CPU space.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS abstraction for the mmap2 interface for mapping PMRs into
+ User Mode memory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _OSMMAP_H_
+#define _OSMMAP_H_
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/**************************************************************************/ /*!
+@Function OSMMapPMR
+@Description Maps the specified PMR into CPU memory so that it may be
+ accessed by the user process.
+ Whether the memory is mapped read only, read/write, or not at
+ all, is dependent on the PMR itself.
+ The PMR handle is opaque to the user, and lower levels of this
+ stack ensure that the handle is private to this process, such that
+ this API cannot be abused to gain access to other people's PMRs.
+ The OS implementation of this function should return the virtual
+ address and length for the User to use. The "PrivData" is to be
+ stored opaquely by the caller (N.B. he should make no assumptions,
+ in particular, NULL is a valid handle) and given back to the
+ call to OSMUnmapPMR.
+ The OS implementation is free to use the PrivData handle for any
+ purpose it sees fit.
+@Input hBridge The bridge handle.
+@Input hPMR The handle of the PMR to be mapped.
+@Input uiPMRLength The size of the PMR.
+@Input uiFlags Flags indicating how the mapping should
+ be done (read-only, etc). These may not
+ be honoured if the PMR does not permit
+ them.
+@Input uiPMRLength The size of the PMR.
+@Output phOSMMapPrivDataOut Returned private data.
+@Output ppvMappingAddressOut The returned mapping.
+@Output puiMappingLengthOut The size of the returned mapping.
+@Return PVRSRV_OK on success, failure code otherwise.
+ */ /**************************************************************************/
+extern PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiPMRLength,
+ IMG_UINT32 uiFlags,
+ IMG_HANDLE *phOSMMapPrivDataOut,
+ void **ppvMappingAddressOut,
+ size_t *puiMappingLengthOut);
+
+/**************************************************************************/ /*!
+@Function OSMUnmapPMR
+@Description Unmaps the specified PMR from CPU memory.
+ This function is the counterpart to OSMMapPMR.
+ The caller is required to pass the PMR handle back in along
+ with the same 3-tuple of information that was returned by the
+ call to OSMMapPMR in phOSMMapPrivDataOut.
+ It is possible to unmap only part of the original mapping
+ with this call, by specifying only the address range to be
+ unmapped in pvMappingAddress and uiMappingLength.
+@Input hBridge The bridge handle.
+@Input hPMR The handle of the PMR to be unmapped.
+@Input hOSMMapPrivData The OS private data of the mapping.
+@Input pvMappingAddress The address to be unmapped.
+@Input uiMappingLength The size to be unmapped.
+@Return PVRSRV_OK on success, failure code otherwise.
+ */ /**************************************************************************/
+/*
+ FIXME:
+ perhaps this function should take _only_ the hOSMMapPrivData arg,
+ and the implementation is required to store any of the other data
+ items that it requires to do the unmap?
+*/
+extern void
+OSMUnmapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_HANDLE hOSMMapPrivData,
+ void *pvMappingAddress,
+ size_t uiMappingLength);
+
+
+#endif /* _OSMMAP_H_ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS abstraction for the mmap2 interface for mapping PMRs into
+ User Mode memory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* our exported API */
+#include "osmmap.h"
+
+/* include/ */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/include/ */
+
+/* services/include/srvhelper/ */
+#include "ra.h"
+
+#include "pmr.h"
+
+IMG_INTERNAL PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiPMRSize,
+ IMG_UINT32 uiFlags,
+ IMG_HANDLE *phOSMMapPrivDataOut,
+ void **ppvMappingAddressOut,
+ size_t *puiMappingLengthOut)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMR;
+ void *pvKernelAddress;
+ size_t uiLength;
+ IMG_HANDLE hPriv;
+
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+ /*
+ Normally this function would mmap a PMR into the memory space of
+ user process, but in this case we're taking a PMR and mapping it
+ into kernel virtual space. We keep the same function name for
+ symmetry as this allows the higher layers of the software stack
+ to not care whether they are user mode or kernel
+ */
+
+ psPMR = hPMR;
+
+ eError = PMRAcquireKernelMappingData(psPMR,
+ 0,
+ 0,
+ &pvKernelAddress,
+ &uiLength,
+ &hPriv);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ *phOSMMapPrivDataOut = hPriv;
+ *ppvMappingAddressOut = pvKernelAddress;
+ *puiMappingLengthOut = uiLength;
+
+ PVR_ASSERT(*puiMappingLengthOut == uiPMRSize);
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL void
+OSMUnmapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_HANDLE hOSMMapPrivData,
+ void *pvMappingAddress,
+ size_t uiMappingLength)
+{
+ PMR *psPMR;
+
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(pvMappingAddress);
+ PVR_UNREFERENCED_PARAMETER(uiMappingLength);
+
+ psPMR = hPMR;
+ PMRReleaseKernelMappingData(psPMR,
+ hOSMMapPrivData);
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Parameter dump macro target routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined (PDUMP)
+
+#include <asm/atomic.h>
+#include <stdarg.h>
+
+#include "pvrversion.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+
+#include "dbgdrvif_srv5.h"
+#include "allocmem.h"
+#include "pdump_km.h"
+#include "pdump_osfunc.h"
+#include "services_km.h"
+
+#include <linux/kernel.h> // sprintf
+#include <linux/string.h> // strncpy, strlen
+#include <linux/mutex.h>
+
+#define PDUMP_DATAMASTER_PIXEL (1)
+#define PDUMP_DATAMASTER_EDM (3)
+
+static PDBGKM_SERVICE_TABLE gpfnDbgDrv = NULL;
+
+
+typedef struct PDBG_PDUMP_STATE_TAG
+{
+ PDBG_STREAM psStream[PDUMP_NUM_CHANNELS];
+
+ IMG_CHAR *pszMsg;
+ IMG_CHAR *pszScript;
+ IMG_CHAR *pszFile;
+
+} PDBG_PDUMP_STATE;
+
+static PDBG_PDUMP_STATE gsDBGPdumpState = {{NULL}, NULL, NULL, NULL};
+
+#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+#define SZ_FILENAME_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+
+static struct mutex gsPDumpMutex;
+
+void DBGDrvGetServiceTable(void **fn_table);
+
+
+/*!
+ * \name PDumpOSGetScriptString
+ */
+PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript,
+ IMG_UINT32 *pui32MaxLen)
+{
+ *phScript = (IMG_HANDLE)gsDBGPdumpState.pszScript;
+ *pui32MaxLen = SZ_SCRIPT_SIZE_MAX;
+ if (!*phScript)
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+ * \name PDumpOSGetMessageString
+ */
+PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg,
+ IMG_UINT32 *pui32MaxLen)
+{
+ *ppszMsg = gsDBGPdumpState.pszMsg;
+ *pui32MaxLen = SZ_MSG_SIZE_MAX;
+ if (!*ppszMsg)
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+ * \name PDumpOSGetFilenameString
+ */
+PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile,
+ IMG_UINT32 *pui32MaxLen)
+{
+ *ppszFile = gsDBGPdumpState.pszFile;
+ *pui32MaxLen = SZ_FILENAME_SIZE_MAX;
+ if (!*ppszFile)
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+ * \name PDumpOSBufprintf
+ */
+PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...)
+{
+ IMG_CHAR* pszBuf = hBuf;
+ IMG_INT32 n;
+ va_list vaArgs;
+
+ va_start(vaArgs, pszFormat);
+
+ n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+ va_end(vaArgs);
+
+ if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+ }
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+ g_ui32EveryLineCounter++;
+#endif
+
+ /* Put line ending sequence at the end if it isn't already there */
+ PDumpOSVerifyLineEnding(pszBuf, ui32ScriptSizeMax);
+
+ return PVRSRV_OK;
+}
+
+/*!
+ * \name PDumpOSVSprintf
+ */
+PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, const IMG_CHAR* pszFormat, PDUMP_va_list vaArgs)
+{
+ IMG_INT32 n;
+
+ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+ if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+ * \name PDumpOSDebugPrintf
+ */
+void PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...)
+{
+ PVR_UNREFERENCED_PARAMETER(pszFormat);
+
+ /* FIXME: Implement using services PVR_DBG or otherwise with kprintf */
+}
+
+/*!
+ * \name PDumpOSSprintf
+ */
+PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...)
+{
+ IMG_INT32 n;
+ va_list vaArgs;
+
+ va_start(vaArgs, pszFormat);
+
+ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+ va_end(vaArgs);
+
+ if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+ * \name PDumpOSBuflen
+ */
+IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
+{
+ IMG_CHAR* pszBuf = hBuffer;
+ IMG_UINT32 ui32Count = 0;
+
+ while ((pszBuf[ui32Count]!=0) && (ui32Count<ui32BufferSizeMax) )
+ {
+ ui32Count++;
+ }
+ return(ui32Count);
+}
+
+/*!
+ * \name PDumpOSVerifyLineEnding
+ */
+void PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
+{
+ IMG_UINT32 ui32Count;
+ IMG_CHAR* pszBuf = hBuffer;
+
+ /* strlen */
+ ui32Count = PDumpOSBuflen(hBuffer, ui32BufferSizeMax);
+
+ /* Put \n sequence at the end if it isn't already there */
+ if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count<ui32BufferSizeMax))
+ {
+ pszBuf[ui32Count] = '\n';
+ ui32Count++;
+ pszBuf[ui32Count] = '\0';
+ }
+}
+
+
+
+/*!
+ * \name PDumpOSGetStreamOffset
+ */
+IMG_BOOL PDumpOSSetSplitMarker(IMG_HANDLE hStream, IMG_UINT32 ui32Marker)
+{
+ PDBG_STREAM psStream = (PDBG_STREAM) hStream;
+
+ PVR_ASSERT(gpfnDbgDrv);
+ gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
+ return IMG_TRUE;
+}
+
+/*!
+ * \name PDumpOSDebugDriverWrite
+ */
+IMG_UINT32 PDumpOSDebugDriverWrite( IMG_HANDLE psStream,
+ IMG_UINT8 *pui8Data,
+ IMG_UINT32 ui32BCount)
+{
+ PVR_ASSERT(gpfnDbgDrv != NULL);
+
+ return gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount);
+}
+
+/*!
+ * \name PDumpOSReleaseExecution
+ */
+void PDumpOSReleaseExecution(void)
+{
+ OSReleaseThreadQuanta();
+}
+
+/**************************************************************************
+ * Function Name : PDumpOSInit
+ * Outputs : None
+ * Returns :
+ * Description : Reset connection to vldbgdrv
+ * Then try to connect to PDUMP streams
+**************************************************************************/
+PVRSRV_ERROR PDumpOSInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript,
+ IMG_UINT32* pui32InitCapMode, IMG_CHAR** ppszEnvComment)
+{
+ PVRSRV_ERROR eError;
+
+ *pui32InitCapMode = DEBUG_CAPMODE_FRAMED;
+ *ppszEnvComment = NULL;
+
+ /* If we tried this earlier, then we might have connected to the driver
+ * But if pdump.exe was running then the stream connected would fail
+ */
+ if (!gpfnDbgDrv)
+ {
+ DBGDrvGetServiceTable((void **)&gpfnDbgDrv);
+
+ // If something failed then no point in trying to connect streams
+ if (gpfnDbgDrv == NULL)
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ }
+
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ if(!gsDBGPdumpState.pszFile)
+ {
+ gsDBGPdumpState.pszFile = OSAllocMem(SZ_FILENAME_SIZE_MAX);
+ if (gsDBGPdumpState.pszFile == NULL)
+ {
+ goto init_failed;
+ }
+ }
+
+ if(!gsDBGPdumpState.pszMsg)
+ {
+ gsDBGPdumpState.pszMsg = OSAllocMem(SZ_MSG_SIZE_MAX);
+ if (gsDBGPdumpState.pszMsg == NULL)
+ {
+ goto init_failed;
+ }
+ }
+
+ if(!gsDBGPdumpState.pszScript)
+ {
+ gsDBGPdumpState.pszScript = OSAllocMem(SZ_SCRIPT_SIZE_MAX);
+ if (gsDBGPdumpState.pszScript == NULL)
+ {
+ goto init_failed;
+ }
+ }
+
+ eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ if (!gpfnDbgDrv->pfnCreateStream(PDUMP_PARAM_CHANNEL_NAME, 0, 10, &psParam->hInit, &psParam->hMain, &psParam->hDeinit))
+ {
+ goto init_failed;
+ }
+ gsDBGPdumpState.psStream[PDUMP_CHANNEL_PARAM] = psParam->hMain;
+
+
+ if (!gpfnDbgDrv->pfnCreateStream(PDUMP_SCRIPT_CHANNEL_NAME, 0, 10, &psScript->hInit, &psScript->hMain, &psScript->hDeinit))
+ {
+ goto init_failed;
+ }
+ gsDBGPdumpState.psStream[PDUMP_CHANNEL_SCRIPT] = psScript->hMain;
+ }
+
+ return PVRSRV_OK;
+
+init_failed:
+ PDumpOSDeInit(psParam, psScript);
+ return eError;
+}
+
+
+void PDumpOSDeInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript)
+{
+ gpfnDbgDrv->pfnDestroyStream(psScript->hInit, psScript->hMain, psScript->hDeinit);
+ gpfnDbgDrv->pfnDestroyStream(psParam->hInit, psParam->hMain, psParam->hDeinit);
+
+ if(gsDBGPdumpState.pszFile)
+ {
+ OSFreeMem(gsDBGPdumpState.pszFile);
+ gsDBGPdumpState.pszFile = NULL;
+ }
+
+ if(gsDBGPdumpState.pszScript)
+ {
+ OSFreeMem(gsDBGPdumpState.pszScript);
+ gsDBGPdumpState.pszScript = NULL;
+ }
+
+ if(gsDBGPdumpState.pszMsg)
+ {
+ OSFreeMem(gsDBGPdumpState.pszMsg);
+ gsDBGPdumpState.pszMsg = NULL;
+ }
+
+ gpfnDbgDrv = NULL;
+}
+
+PVRSRV_ERROR PDumpOSCreateLock(void)
+{
+ mutex_init(&gsPDumpMutex);
+ return PVRSRV_OK;
+}
+
+void PDumpOSDestroyLock(void)
+{
+ /* no destruction work to be done, just assert
+ * the lock is not held */
+ PVR_ASSERT(mutex_is_locked(&gsPDumpMutex) == 0);
+}
+
+void PDumpOSLock(void)
+{
+ mutex_lock(&gsPDumpMutex);
+}
+
+void PDumpOSUnlock(void)
+{
+ mutex_unlock(&gsPDumpMutex);
+}
+
+IMG_UINT32 PDumpOSGetCtrlState(IMG_HANDLE hDbgStream,
+ IMG_UINT32 ui32StateID)
+{
+ return (gpfnDbgDrv->pfnGetCtrlState((PDBG_STREAM)hDbgStream, ui32StateID));
+}
+
+void PDumpOSSetFrame(IMG_UINT32 ui32Frame)
+{
+ gpfnDbgDrv->pfnSetFrame(ui32Frame);
+ return;
+}
+
+IMG_BOOL PDumpOSAllowInitPhaseToComplete(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient)
+{
+ return (bInitClient);
+}
+
+#if defined(PVR_TESTING_UTILS)
+void PDumpOSDumpState(void);
+
+void PDumpOSDumpState(void)
+{
+ PVR_LOG(("---- PDUMP LINUX: gpfnDbgDrv( %p ) gpfnDbgDrv.ui32Size( %d )",
+ gpfnDbgDrv, gpfnDbgDrv->ui32Size));
+
+ PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState( %p )",
+ &gsDBGPdumpState));
+
+ PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState.psStream[0]( %p )",
+ gsDBGPdumpState.psStream[0]));
+
+ (void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[0], 0xFE);
+
+ PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState.psStream[1]( %p )",
+ gsDBGPdumpState.psStream[1]));
+
+ (void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[1], 0xFE);
+
+ /* Now dump non-stream specific info */
+ (void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[1], 0xFF);
+}
+#endif
+
+#endif /* #if defined (PDUMP) */
+/*****************************************************************************
+ End of file (PDUMP.C)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _SERVICES_PDUMP_H_
+#define _SERVICES_PDUMP_H_
+
+#include "img_types.h"
+#include "services_km.h"
+
+typedef IMG_UINT32 PDUMP_FLAGS_T;
+
+#define PDUMP_FLAGS_NONE 0x00000000UL /*<! Output this entry with no special treatment i.e. output only if in frame range */
+
+#define PDUMP_FLAGS_DEINIT 0x20000000UL /*<! Output this entry to the de-initialisation section */
+
+#define PDUMP_FLAGS_POWER 0x08000000UL /*<! Output this entry even when a power transition is ongoing */
+
+#define PDUMP_FLAGS_CONTINUOUS PDUMP_CONT /*<! Defined in serviceS_km.h */
+
+#define PDUMP_FLAGS_PERSISTENT 0x80000000UL /*<! Output this entry always regardless of app and range,
+ used by persistent processes e.g. compositor, window mgr etc/ */
+
+#define PDUMP_FLAGS_DEBUG 0x00010000U /*<! For internal debugging use */
+
+#define PDUMP_FLAGS_NOHW 0x00000001U /* For internal use: Skip sending instructions to the hardware */
+
+#define PDUMP_FILEOFFSET_FMTSPEC "0x%08X"
+typedef IMG_UINT32 PDUMP_FILEOFFSET_T;
+
+#define PDUMP_PARAM_CHANNEL_NAME "ParamChannel2"
+#define PDUMP_SCRIPT_CHANNEL_NAME "ScriptChannel2"
+
+#define PDUMP_CHANNEL_PARAM 0
+#define PDUMP_CHANNEL_SCRIPT 1
+#define PDUMP_NUM_CHANNELS 2
+
+#define PDUMP_PARAM_0_FILE_NAME "%%0%%.prm" /*!< Initial Param filename used in PDump capture */
+#define PDUMP_PARAM_N_FILE_NAME "%%0%%_%02u.prm" /*!< Param filename used when PRM file split */
+#define PDUMP_PARAM_MAX_FILE_NAME 32 /*!< Max Size of parameter name used in out2.txt */
+
+#define PDUMP_IS_CONTINUOUS(flags) ((flags & PDUMP_FLAGS_CONTINUOUS) != 0)
+
+#endif /* _SERVICES_PDUMP_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Common Server PDump functions layer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+
+#if defined(PDUMP)
+#include <stdarg.h>
+
+#include "pvrversion.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "pdump_physmem.h"
+#include "hash.h"
+#include "connection_server.h"
+#include "sync_server.h"
+#include "services_km.h"
+/* pdump headers */
+#include "dbgdrvif_srv5.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+
+/* Allow temporary buffer size override */
+#if !defined(PDUMP_TEMP_BUFFER_SIZE)
+#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024U)
+#endif
+
+/* DEBUG */
+#if 0
+#define PDUMP_DBG(a) PDumpOSDebugPrintf (a)
+#else
+#define PDUMP_DBG(a)
+#endif
+
+
+#define PTR_PLUS(t, p, x) ((t)(((IMG_CHAR *)(p)) + (x)))
+#define VPTR_PLUS(p, x) PTR_PLUS(void *, p, x)
+#define VPTR_INC(p, x) ((p) = VPTR_PLUS(p, x))
+#define MAX_PDUMP_MMU_CONTEXTS (32)
+static void *gpvTempBuffer = NULL;
+
+#define PRM_FILE_SIZE_MAX 0x7FDFFFFFU /*!< Default maximum file size to split output files, 2GB-2MB as fwrite limits it to 2GB-1 on 32bit systems */
+#define FRAME_UNSET 0xFFFFFFFFU /*|< Used to signify no or invalid frame number */
+
+
+static IMG_BOOL g_PDumpInitialised = IMG_FALSE;
+static IMG_UINT32 g_ConnectionCount = 0;
+
+
+typedef struct
+{
+ PDUMP_CHANNEL sCh; /*!< Channel handles */
+} PDUMP_SCRIPT;
+
+typedef struct
+{
+ IMG_UINT32 ui32Init; /*|< Count of bytes written to the init phase stream */
+ IMG_UINT32 ui32Main; /*!< Count of bytes written to the main stream */
+ IMG_UINT32 ui32Deinit; /*!< Count of bytes written to the deinit stream */
+} PDUMP_CHANNEL_WOFFSETS;
+
+typedef struct
+{
+ PDUMP_CHANNEL sCh; /*!< Channel handles */
+ PDUMP_CHANNEL_WOFFSETS sWOff; /*!< Channel file write offsets */
+ IMG_UINT32 ui32FileIdx; /*!< File index used when file size limit reached and a new file is started, parameter channel only */
+ IMG_UINT32 ui32MaxFileSize; /*!< Maximum file size for parameter files */
+
+ PDUMP_FILEOFFSET_T uiZeroPageOffset; /*!< Offset of the zero page in the parameter file */
+ size_t uiZeroPageSize; /*!< Size of the zero page in the parameter file */
+ IMG_CHAR szZeroPageFilename[PDUMP_PARAM_MAX_FILE_NAME]; /*< PRM file name where the zero page was pdumped */
+} PDUMP_PARAMETERS;
+
+static PDUMP_SCRIPT g_PDumpScript = { { 0, 0, 0} };
+static PDUMP_PARAMETERS g_PDumpParameters = { { 0, 0, 0}, {0, 0, 0}, 0, PRM_FILE_SIZE_MAX};
+
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+/* counter increments each time debug write is called */
+IMG_UINT32 g_ui32EveryLineCounter = 1U;
+#endif
+
+#if defined(PDUMP_DEBUG) || defined(REFCOUNT_DEBUG)
+#define PDUMP_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define PDUMP_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+/* Prototype for the test/debug state dump routine used in debugging */
+void PDumpCommonDumpState(IMG_BOOL bDumpOSLayerState);
+#undef PDUMP_TRACE_STATE
+
+
+/*****************************************************************************/
+/* PDump Control Module Definitions */
+/*****************************************************************************/
+
+typedef struct _PDUMP_CAPTURE_RANGE_
+{
+ IMG_UINT32 ui32Start; /*!< Start frame number of range */
+ IMG_UINT32 ui32End; /*!< Send frame number of range */
+ IMG_UINT32 ui32Interval; /*!< Frame sample rate interval */
+} PDUMP_CAPTURE_RANGE;
+
+/* No direct access to members from outside the control module - please */
+typedef struct _PDUMP_CTRL_STATE_
+{
+ IMG_BOOL bInitPhaseActive; /*!< State of driver initialisation phase */
+ IMG_UINT32 ui32Flags; /*!< Unused */
+
+ IMG_UINT32 ui32DefaultCapMode; /*!< Capture mode of the dump */
+ PDUMP_CAPTURE_RANGE sCaptureRange; /*|< The capture range for capture mode 'framed' */
+ IMG_UINT32 ui32CurrentFrame; /*!< Current frame number */
+
+ IMG_BOOL bCaptureOn; /*!< Current capture status, is current frame in range */
+ IMG_BOOL bSuspended; /*!< Suspend flag set on unrecoverable error */
+ IMG_BOOL bInPowerTransition; /*!< Device power transition state */
+ POS_LOCK hLock; /*!< Exclusive lock to this structure */
+} PDUMP_CTRL_STATE;
+
+static PDUMP_CTRL_STATE g_PDumpCtrl =
+{
+ IMG_TRUE,
+ 0,
+
+ 0, /*!< Value obtained from OS PDump layer during initialisation */
+ {
+ FRAME_UNSET,
+ FRAME_UNSET,
+ 1
+ },
+ 0,
+
+ IMG_FALSE,
+ IMG_FALSE,
+ IMG_FALSE,
+ NULL
+};
+
+static PVRSRV_ERROR PDumpCtrlInit(IMG_UINT32 ui32InitCapMode)
+{
+ g_PDumpCtrl.ui32DefaultCapMode = ui32InitCapMode;
+ PVR_ASSERT(g_PDumpCtrl.ui32DefaultCapMode != 0);
+
+ /* Create lock for PDUMP_CTRL_STATE struct, which is shared between pdump client
+ and PDumping app. This lock will help us serialize calls from pdump client
+ and PDumping app */
+ PVR_LOGR_IF_ERROR(OSLockCreate(&g_PDumpCtrl.hLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+ return PVRSRV_OK;
+}
+
+static void PDumpCtrlDeInit(void)
+{
+ if (g_PDumpCtrl.hLock)
+ {
+ OSLockDestroy(g_PDumpCtrl.hLock);
+ g_PDumpCtrl.hLock = NULL;
+ }
+}
+
+static INLINE void PDumpCtrlLockAcquire(void)
+{
+ OSLockAcquire(g_PDumpCtrl.hLock);
+}
+
+static INLINE void PDumpCtrlLockRelease(void)
+{
+ OSLockRelease(g_PDumpCtrl.hLock);
+}
+
+/**********************************************************************************************************
+ NOTE:
+ The following PDumpCtrl*** functions require the PDUMP_CTRL_STATE lock be acquired BEFORE they are
+ called. This is because the PDUMP_CTRL_STATE data is shared between the PDumping App and the PDump
+ client, hence an exclusive access is required. The lock can be acquired and released by using the
+ PDumpCtrlLockAcquire & PDumpCtrlLockRelease functions respectively.
+**********************************************************************************************************/
+
+static void PDumpCtrlUpdateCaptureStatus(void)
+{
+ if (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED)
+ {
+ if ((g_PDumpCtrl.ui32CurrentFrame >= g_PDumpCtrl.sCaptureRange.ui32Start) &&
+ (g_PDumpCtrl.ui32CurrentFrame <= g_PDumpCtrl.sCaptureRange.ui32End) &&
+ (((g_PDumpCtrl.ui32CurrentFrame - g_PDumpCtrl.sCaptureRange.ui32Start) % g_PDumpCtrl.sCaptureRange.ui32Interval) == 0))
+ {
+ g_PDumpCtrl.bCaptureOn = IMG_TRUE;
+ }
+ else
+ {
+ g_PDumpCtrl.bCaptureOn = IMG_FALSE;
+ }
+ }
+ else if (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_CONTINUOUS)
+ {
+ g_PDumpCtrl.bCaptureOn = IMG_TRUE;
+ }
+ else
+ {
+ g_PDumpCtrl.bCaptureOn = IMG_FALSE;
+ PVR_DPF((PVR_DBG_ERROR, "PDumpCtrlSetCurrentFrame: Unexpected capture mode (%x)", g_PDumpCtrl.ui32DefaultCapMode));
+ }
+
+}
+
+static void PDumpCtrlSetCurrentFrame(IMG_UINT32 ui32Frame)
+{
+ g_PDumpCtrl.ui32CurrentFrame = ui32Frame;
+ /* Mirror the value into the debug driver */
+ PDumpOSSetFrame(ui32Frame);
+
+ PDumpCtrlUpdateCaptureStatus();
+
+#if defined(PDUMP_TRACE_STATE)
+ PDumpCommonDumpState(IMG_FALSE);
+#endif
+}
+
+static void PDumpCtrlSetDefaultCaptureParams(IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval)
+{
+ PVR_ASSERT(ui32Interval > 0);
+ PVR_ASSERT(ui32End >= ui32Start);
+ PVR_ASSERT((ui32Mode == DEBUG_CAPMODE_FRAMED) || (ui32Mode == DEBUG_CAPMODE_CONTINUOUS));
+
+ /* Set the capture range to that supplied by the PDump client tool
+ */
+ g_PDumpCtrl.ui32DefaultCapMode = ui32Mode;
+ g_PDumpCtrl.sCaptureRange.ui32Start = ui32Start;
+ g_PDumpCtrl.sCaptureRange.ui32End = ui32End;
+ g_PDumpCtrl.sCaptureRange.ui32Interval = ui32Interval;
+
+ /* Reset the current frame on reset of the capture range, the helps to
+ * avoid inter-pdump start frame issues when the driver is not reloaded.
+ * No need to call PDumpCtrlUpdateCaptureStatus() direct as the set
+ * current frame call will.
+ */
+ PDumpCtrlSetCurrentFrame(0);
+}
+
+static INLINE IMG_BOOL PDumpCtrlCapModIsFramed(void)
+{
+ return g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCapModIsContinuous(void)
+{
+ return g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_CONTINUOUS;
+}
+
+static IMG_UINT32 PDumpCtrlGetCurrentFrame(void)
+{
+ return g_PDumpCtrl.ui32CurrentFrame;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCaptureOn(void)
+{
+ return !g_PDumpCtrl.bSuspended && g_PDumpCtrl.bCaptureOn;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCaptureRangePast(void)
+{
+ return (g_PDumpCtrl.ui32CurrentFrame > g_PDumpCtrl.sCaptureRange.ui32End);
+}
+
+/* Used to imply if the PDump client is connected or not. */
+static INLINE IMG_BOOL PDumpCtrlCaptureRangeUnset(void)
+{
+ return ((g_PDumpCtrl.sCaptureRange.ui32Start == FRAME_UNSET) &&
+ (g_PDumpCtrl.sCaptureRange.ui32End == FRAME_UNSET));
+}
+
+static IMG_BOOL PDumpCtrlIsLastCaptureFrame(void)
+{
+ if (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED)
+ {
+ /* Is the next capture frame within the range end limit? */
+ if ((g_PDumpCtrl.ui32CurrentFrame + g_PDumpCtrl.sCaptureRange.ui32Interval) > g_PDumpCtrl.sCaptureRange.ui32End)
+ {
+ return IMG_TRUE;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpCtrIsLastCaptureFrame: Unexpected capture mode (%x)", g_PDumpCtrl.ui32DefaultCapMode));
+ }
+
+ /* Return false for continuous capture mode or when in framed mode */
+ return IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlInitPhaseComplete(void)
+{
+ return !g_PDumpCtrl.bInitPhaseActive;
+}
+
+static INLINE void PDumpCtrlSetInitPhaseComplete(IMG_BOOL bIsComplete)
+{
+ if (bIsComplete)
+ {
+ g_PDumpCtrl.bInitPhaseActive = IMG_FALSE;
+ PDUMP_HEREA(102);
+ }
+ else
+ {
+ g_PDumpCtrl.bInitPhaseActive = IMG_TRUE;
+ PDUMP_HEREA(103);
+ }
+}
+
+static INLINE void PDumpCtrlSuspend(void)
+{
+ PDUMP_HEREA(104);
+ g_PDumpCtrl.bSuspended = IMG_TRUE;
+}
+
+static INLINE void PDumpCtrlResume(void)
+{
+ PDUMP_HEREA(105);
+ g_PDumpCtrl.bSuspended = IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlIsDumpSuspended(void)
+{
+ return g_PDumpCtrl.bSuspended;
+}
+
+static INLINE void PDumpCtrlPowerTransitionStart(void)
+{
+ g_PDumpCtrl.bInPowerTransition = IMG_TRUE;
+}
+
+static INLINE void PDumpCtrlPowerTransitionEnd(void)
+{
+ g_PDumpCtrl.bInPowerTransition = IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlInPowerTransition(void)
+{
+ return g_PDumpCtrl.bInPowerTransition;
+}
+
+static PVRSRV_ERROR PDumpCtrlIsCaptureFrame(IMG_BOOL *bIsCapturing)
+{
+ *bIsCapturing = PDumpCtrlCaptureOn();
+ return PVRSRV_OK;
+}
+
+/********************************************************************************
+ End of PDumpCtrl*** functions
+*********************************************************************************/
+
+/*
+ Wrapper functions which need to be exposed in pdump_km.h for use in other
+ pdump_*** modules safely. These functions call the specific PDumpCtrl layer
+ function after acquiring the PDUMP_CTRL_STATE lock, hence making the calls
+ from other modules hassle free by avoiding the acquire/release CtrlLock
+ calls.
+*/
+
+void PDumpPowerTransitionStart(void)
+{
+ PDumpCtrlLockAcquire();
+ PDumpCtrlPowerTransitionStart();
+ PDumpCtrlLockRelease();
+}
+
+void PDumpPowerTransitionEnd(void)
+{
+ PDumpCtrlLockAcquire();
+ PDumpCtrlPowerTransitionEnd();
+ PDumpCtrlLockRelease();
+}
+
+IMG_BOOL PDumpInPowerTransition(void)
+{
+ IMG_BOOL bPDumpInPowerTransition = IMG_FALSE;
+
+ PDumpCtrlLockAcquire();
+ bPDumpInPowerTransition = PDumpCtrlInPowerTransition();
+ PDumpCtrlLockRelease();
+
+ return bPDumpInPowerTransition;
+}
+
+IMG_BOOL PDumpIsDumpSuspended(void)
+{
+ IMG_BOOL bPDumpIsDumpSuspended;
+
+ PDumpCtrlLockAcquire();
+ bPDumpIsDumpSuspended = PDumpCtrlIsDumpSuspended();
+ PDumpCtrlLockRelease();
+
+ return bPDumpIsDumpSuspended;
+}
+
+/*****************************************************************************/
+/* PDump Common Write Layer just above PDump OS Layer */
+/*****************************************************************************/
+
+
+/*
+ Checks in this method were seeded from the original PDumpWriteILock()
+ and DBGDrivWriteCM() and have grown since to ensure PDump output
+ matches legacy output.
+ Note: the order of the checks in this method is important as some
+ writes have multiple pdump flags set!
+ */
+static IMG_BOOL PDumpWriteAllowed(IMG_UINT32 ui32Flags)
+{
+ /* Lock down the PDUMP_CTRL_STATE struct before calling the following
+ PDumpCtrl*** functions. This is to avoid updates to the Control data
+ while we are reading from it */
+ PDumpCtrlLockAcquire();
+
+ /* No writes if in framed mode and range pasted */
+ if (PDumpCtrlCaptureRangePast())
+ {
+ PDUMP_HERE(10);
+ goto unlockAndReturnFalse;
+ }
+
+ /* No writes while writing is suspended */
+ if (PDumpCtrlIsDumpSuspended())
+ {
+ PDUMP_HERE(11);
+ goto unlockAndReturnFalse;
+ }
+
+ /* Prevent PDumping during a power transition */
+ if (PDumpCtrlInPowerTransition())
+ { /* except when it's flagged */
+ if (ui32Flags & PDUMP_FLAGS_POWER)
+ {
+ PDUMP_HERE(20);
+ goto unlockAndReturnTrue;
+ }
+ PDUMP_HERE(16);
+ goto unlockAndReturnFalse;
+ }
+
+ /* Always allow dumping in init phase and when persistent flagged */
+ if (ui32Flags & PDUMP_FLAGS_PERSISTENT)
+ {
+ PDUMP_HERE(12);
+ goto unlockAndReturnTrue;
+ }
+ if (!PDumpCtrlInitPhaseComplete())
+ {
+ PDUMP_HERE(15);
+ goto unlockAndReturnTrue;
+ }
+
+ /* The following checks are made when the driver has completed initialisation */
+
+ /* If PDump client connected allow continuous flagged writes */
+ if (PDUMP_IS_CONTINUOUS(ui32Flags))
+ {
+ if (PDumpCtrlCaptureRangeUnset()) /* Is client connected? */
+ {
+ PDUMP_HERE(13);
+ goto unlockAndReturnFalse;
+ }
+ PDUMP_HERE(14);
+ goto unlockAndReturnTrue;
+ }
+
+ /* No last/deinit statements allowed when not in initialisation phase */
+ if (PDUMP_IS_CONTINUOUS(ui32Flags))
+ {
+ if (PDumpCtrlInitPhaseComplete())
+ {
+ PDUMP_HERE(17);
+ PVR_DPF((PVR_DBG_ERROR, "PDumpWriteAllowed: DEINIT flag used at the wrong time outside of initialisation!"));
+ goto unlockAndReturnFalse;
+ }
+ }
+
+ /*
+ If no flags are provided then it is FRAMED output and the frame
+ range must be checked matching expected behaviour.
+ */
+ if (PDumpCtrlCapModIsFramed() && !PDumpCtrlCaptureOn())
+ {
+ PDUMP_HERE(18);
+ goto unlockAndReturnFalse;
+ }
+
+ PDUMP_HERE(19);
+
+unlockAndReturnTrue:
+ /* Allow the write to take place */
+ PDumpCtrlLockRelease();
+ return IMG_TRUE;
+
+unlockAndReturnFalse:
+ PDumpCtrlLockRelease();
+ return IMG_FALSE;
+}
+
+#undef PDUMP_DEBUG_SCRIPT_LINES
+
+#if defined(PDUMP_DEBUG_SCRIPT_LINES)
+#define PDUMPOSDEBUGDRIVERWRITE(a,b,c,d) _PDumpOSDebugDriverWrite(a,b,c,d)
+static IMG_UINT32 _PDumpOSDebugDriverWrite( IMG_HANDLE psStream,
+ IMG_UINT8 *pui8Data,
+ IMG_UINT32 ui32BCount,
+ IMG_UINT32 ui32Flags)
+{
+ IMG_CHAR tmp1[80];
+ IMG_CHAR* streamName = "unkn";
+
+ if (g_PDumpScript.sCh.hDeinit == psStream)
+ streamName = "dein";
+ else if (g_PDumpScript.sCh.hInit == psStream)
+ streamName = "init";
+ else if (g_PDumpScript.sCh.hMain == psStream)
+ streamName = "main";
+
+ (void) PDumpOSSprintf(tmp1, 80, "-- %s, %x\n", streamName, ui32Flags);
+ (void) PDumpOSDebugDriverWrite(psStream, tmp1, OSStringLength(tmp1));
+
+ return PDumpOSDebugDriverWrite(psStream, pui8Data, ui32BCount);
+}
+#else
+#define PDUMPOSDEBUGDRIVERWRITE(a,b,c,d) PDumpOSDebugDriverWrite(a,b,c)
+#endif
+
+
+/**************************************************************************/ /*!
+ @Function PDumpWriteToBuffer
+ @Description Write the supplied data to the PDump stream buffer and attempt
+ to handle any buffer full conditions to ensure all the data
+ requested to be written, is.
+
+ @Input psStream The address of the PDump stream buffer to write to
+ @Input pui8Data Pointer to the data to be written
+ @Input ui32BCount Number of bytes to write
+ @Input ui32Flags PDump statement flags.
+
+ @Return IMG_UINT32 Actual number of bytes written, may be less than
+ ui32BCount when buffer full condition could not
+ be avoided.
+*/ /***************************************************************************/
+static IMG_UINT32 PDumpWriteToBuffer(IMG_HANDLE psStream, IMG_UINT8 *pui8Data,
+ IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags)
+{
+ IMG_UINT32 ui32BytesWritten = 0;
+ IMG_UINT32 ui32Off = 0;
+
+ while (ui32BCount > 0)
+ {
+ ui32BytesWritten = PDUMPOSDEBUGDRIVERWRITE(psStream, &pui8Data[ui32Off], ui32BCount, ui32Flags);
+
+ if (ui32BytesWritten == 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "PDumpWriteToBuffer: Zero bytes written - release execution"));
+ PDumpOSReleaseExecution();
+ }
+
+ if (ui32BytesWritten != 0xFFFFFFFFU)
+ {
+ if (ui32BCount != ui32BytesWritten)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "PDumpWriteToBuffer: partial write of %d bytes of %d bytes", ui32BytesWritten, ui32BCount));
+ }
+ ui32Off += ui32BytesWritten;
+ ui32BCount -= ui32BytesWritten;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToBuffer: Unrecoverable error received from the debug driver"));
+ if( PDumpOSGetCtrlState(psStream, DBG_GET_STATE_FLAG_IS_READONLY) )
+ {
+ /* Fatal -suspend PDump to prevent flooding kernel log buffer */
+ PVR_LOG(("PDump suspended, debug driver out of memory"));
+ /*
+ Acquire the control lock before updating "suspended" state. This may not be required
+ because "this" is the context which checks the "suspended" state in PDumpWriteAllowed
+ before calling this function. So, this update is mainly for other contexts.
+ Also, all the other contexts which will/wish-to read the "suspended" state ought to be
+ waiting on the bridge lock first and then the PDUMP_OSLOCK (to pdump into script or
+ parameter buffer). However, this acquire may be useful incase the PDump call is being
+ made from a direct bridge
+ */
+ PDumpCtrlLockAcquire();
+ PDumpCtrlSuspend();
+ PDumpCtrlLockRelease();
+ }
+ return 0;
+ }
+ }
+
+ /* reset buffer counters */
+ ui32BCount = ui32Off; ui32Off = 0; ui32BytesWritten = 0;
+
+ return ui32BCount;
+}
+
+
+/**************************************************************************/ /*!
+ @Function PDumpWriteToChannel
+ @Description Write the supplied data to the PDump channel specified obeying
+ flags to write to the necessary channel buffers.
+
+ @Input psChannel The address of the script or parameter channel object
+ @Input/Output psWOff The address of the channel write offsets object to
+ update on successful writing
+ @Input pui8Data Pointer to the data to be written
+ @Input ui32Size Number of bytes to write
+ @Input ui32Flags PDump statement flags, they may be clear (no flags)
+ which implies framed data, continuous flagged,
+ persistent flagged, or continuous AND persistent
+ flagged and they determine how the data is output.
+ On the first test app run after driver load, the
+ Display Controller dumps a resource that is both
+ continuous and persistent and this needs writing to
+ both the init (persistent) and main (continuous)
+ channel buffers to ensure the data is dumped in
+ subsequent test runs without reloading the driver.
+ In subsequent runs the PDump client 'freezes' the
+ init buffer so that only one dump of persistent data
+ for the "extended init phase" is captured to the
+ init buffer.
+
+ @Return IMG_BOOL True when the data has been consumed, false otherwise
+*/ /***************************************************************************/
+static IMG_BOOL PDumpWriteToChannel(PDUMP_CHANNEL* psChannel, PDUMP_CHANNEL_WOFFSETS* psWOff,
+ IMG_UINT8* pui8Data, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags)
+{
+ IMG_UINT32 ui32BytesWritten = 0;
+
+ PDUMP_HERE(210);
+
+ /* Dump data to deinit buffer when flagged as deinit */
+ if (ui32Flags & PDUMP_FLAGS_DEINIT)
+ {
+ PDUMP_HERE(211);
+ ui32BytesWritten = PDumpWriteToBuffer(psChannel->hDeinit, pui8Data, ui32Size, ui32Flags);
+ if (ui32BytesWritten != ui32Size)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: DEINIT Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+ PDUMP_HERE(212);
+ return IMG_FALSE;
+ }
+
+ if (psWOff)
+ {
+ psWOff->ui32Deinit += ui32Size;
+ }
+
+ }
+ else
+ {
+ IMG_BOOL bDumpedToInitAlready = IMG_FALSE;
+ IMG_HANDLE* phStream = NULL;
+ IMG_UINT32* pui32Offset = NULL;
+
+ /* Always append persistent data to init phase so it's available on
+ * subsequent app runs, but also to the main stream if client connected */
+ if (ui32Flags & PDUMP_FLAGS_PERSISTENT)
+ {
+ PDUMP_HERE(213);
+ ui32BytesWritten = PDumpWriteToBuffer( psChannel->hInit, pui8Data, ui32Size, ui32Flags);
+ if (ui32BytesWritten != ui32Size)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: PERSIST Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+ PDUMP_HERE(214);
+ return IMG_FALSE;
+ }
+
+ bDumpedToInitAlready = IMG_TRUE;
+ if (psWOff)
+ {
+ psWOff->ui32Init += ui32Size;
+ }
+
+ /* Don't write continuous data if client not connected */
+ PDumpCtrlLockAcquire();
+ if (PDUMP_IS_CONTINUOUS(ui32Flags) && PDumpCtrlCaptureRangeUnset())
+ {
+ PDumpCtrlLockRelease();
+ return IMG_TRUE;
+ }
+ PDumpCtrlLockRelease();
+ }
+
+ /* Prepare to write the data to the main stream for
+ * persistent, continuous or framed data. Override and use init
+ * stream if driver still in init phase and we have not written
+ * to it yet.*/
+ PDumpCtrlLockAcquire();
+ if (!PDumpCtrlInitPhaseComplete() && !bDumpedToInitAlready)
+ {
+ PDUMP_HERE(215);
+ phStream = &psChannel->hInit;
+ if (psWOff)
+ {
+ pui32Offset = &psWOff->ui32Init;
+ }
+ }
+ else
+ {
+ PDUMP_HERE(216);
+ phStream = &psChannel->hMain;
+ if (psWOff)
+ {
+ pui32Offset = &psWOff->ui32Main;
+ }
+ }
+ PDumpCtrlLockRelease();
+
+ /* Write the data to the stream */
+ ui32BytesWritten = PDumpWriteToBuffer(*phStream, pui8Data, ui32Size, ui32Flags);
+ if (ui32BytesWritten != ui32Size)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: MAIN Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+ PDUMP_HERE(217);
+ return IMG_FALSE;
+ }
+
+ if (pui32Offset)
+ {
+ *pui32Offset += ui32BytesWritten;
+ }
+ }
+
+ return IMG_TRUE;
+}
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+
+static IMG_UINT32 _GenerateChecksum(void *pvData, size_t uiSize)
+{
+ IMG_UINT32 ui32Sum = 0;
+ IMG_UINT32 *pui32Data = pvData;
+ IMG_UINT8 *pui8Data = pvData;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32LeftOver;
+
+ for(i = 0; i < uiSize / sizeof(IMG_UINT32); i++)
+ {
+ ui32Sum += pui32Data[i];
+ }
+
+ ui32LeftOver = uiSize % sizeof(IMG_UINT32);
+
+ while(ui32LeftOver)
+ {
+ ui32Sum += pui8Data[uiSize - ui32LeftOver];
+ ui32LeftOver--;
+ }
+
+ return ui32Sum;
+}
+
+#endif
+
+PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *pui8Data, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags,
+ IMG_UINT32* pui32FileOffset, IMG_CHAR* aszFilenameStr)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BOOL bPDumpCtrlInitPhaseComplete = IMG_FALSE;
+
+ PVR_ASSERT(pui8Data && (ui32Size!=0));
+ PVR_ASSERT(pui32FileOffset && aszFilenameStr);
+
+ PDUMP_HERE(1);
+
+ if (!PDumpWriteAllowed(ui32Flags))
+ {
+ /* Abort write for the above reason but indicate what happened to
+ * caller to avoid disrupting the driver, caller should treat it as OK
+ * but skip any related PDump writes to the script file. */
+ return PVRSRV_ERROR_PDUMP_NOT_ALLOWED;
+ }
+
+ PDUMP_HERE(2);
+
+ PDumpCtrlLockAcquire();
+ bPDumpCtrlInitPhaseComplete = PDumpCtrlInitPhaseComplete();
+ PDumpCtrlLockRelease();
+
+ if (!bPDumpCtrlInitPhaseComplete || (ui32Flags & PDUMP_FLAGS_PERSISTENT))
+ {
+ PDUMP_HERE(3);
+
+ /* Init phase stream not expected to get above the file size max */
+ PVR_ASSERT(g_PDumpParameters.sWOff.ui32Init < g_PDumpParameters.ui32MaxFileSize);
+
+ /* Return the file write offset at which the parameter data was dumped */
+ *pui32FileOffset = g_PDumpParameters.sWOff.ui32Init;
+ }
+ else
+ {
+ PDUMP_HERE(4);
+
+ /* Do we need to signal the PDump client that a split is required? */
+ if (g_PDumpParameters.sWOff.ui32Main + ui32Size > g_PDumpParameters.ui32MaxFileSize)
+ {
+ PDUMP_HERE(5);
+ PDumpOSSetSplitMarker(g_PDumpParameters.sCh.hMain, g_PDumpParameters.sWOff.ui32Main);
+ g_PDumpParameters.ui32FileIdx++;
+ g_PDumpParameters.sWOff.ui32Main = 0;
+ }
+
+ /* Return the file write offset at which the parameter data was dumped */
+ *pui32FileOffset = g_PDumpParameters.sWOff.ui32Main;
+ }
+
+ /* Create the parameter file name, based on index, to be used in the script */
+ if (g_PDumpParameters.ui32FileIdx == 0)
+ {
+ eError = PDumpOSSprintf(aszFilenameStr, PDUMP_PARAM_MAX_FILE_NAME, PDUMP_PARAM_0_FILE_NAME);
+ }
+ else
+ {
+ PDUMP_HERE(6);
+ eError = PDumpOSSprintf(aszFilenameStr, PDUMP_PARAM_MAX_FILE_NAME, PDUMP_PARAM_N_FILE_NAME, g_PDumpParameters.ui32FileIdx);
+ }
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSSprintf", errExit);
+
+ /* Write the parameter data to the parameter channel */
+ eError = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+ if (!PDumpWriteToChannel(&g_PDumpParameters.sCh, &g_PDumpParameters.sWOff, pui8Data, ui32Size, ui32Flags))
+ {
+ PDUMP_HERE(7);
+ PVR_LOGG_IF_ERROR(eError, "PDumpWrite", errExit);
+ }
+#if defined(PDUMP_DEBUG_OUTFILES)
+ else
+ {
+ IMG_UINT32 ui32Checksum;
+ PDUMP_GET_SCRIPT_STRING();
+
+ ui32Checksum = _GenerateChecksum(pui8Data, ui32Size);
+
+ /* CHK CHKSUM SIZE PRMOFFSET PRMFILE */
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "-- CHK 0x%08X 0x%08X 0x%08X %s",
+ ui32Checksum,
+ ui32Size,
+ *pui32FileOffset,
+ aszFilenameStr);
+ if(eError != PVRSRV_OK)
+ {
+ goto errExit;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ }
+#endif
+
+ return PVRSRV_OK;
+
+errExit:
+ return eError;
+}
+
+
+IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags)
+{
+ PVR_ASSERT(hString);
+
+ PDUMP_HERE(201);
+
+ if (!PDumpWriteAllowed(ui32Flags))
+ {
+ /* Abort write for the above reasons but indicated it was OK to
+ * caller to avoid disrupting the driver */
+ return IMG_TRUE;
+ }
+
+ return PDumpWriteToChannel(&g_PDumpScript.sCh, NULL, (IMG_UINT8*) hString, (IMG_UINT32) OSStringLength((IMG_CHAR*) hString), ui32Flags);
+}
+
+
+/*****************************************************************************/
+
+
+
+
+
+
+struct _PDUMP_CONNECTION_DATA_ {
+ IMG_UINT32 ui32RefCount;
+ POS_LOCK hLock;
+ DLLIST_NODE sListHead;
+ IMG_BOOL bLastInto;
+ IMG_UINT32 ui32LastSetFrameNumber;
+ IMG_BOOL bWasInCaptureRange;
+ IMG_BOOL bIsInCaptureRange;
+ IMG_BOOL bLastTransitionFailed;
+ SYNC_CONNECTION_DATA *psSyncConnectionData;
+};
+
+static PDUMP_CONNECTION_DATA * _PDumpConnectionAcquire(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+ IMG_UINT32 ui32RefCount;
+
+ OSLockAcquire(psPDumpConnectionData->hLock);
+ ui32RefCount = ++psPDumpConnectionData->ui32RefCount;
+ OSLockRelease(psPDumpConnectionData->hLock);
+
+ PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d",
+ __FUNCTION__, psPDumpConnectionData, ui32RefCount);
+
+ return psPDumpConnectionData;
+}
+
+static void _PDumpConnectionRelease(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+ IMG_UINT32 ui32RefCount;
+
+ OSLockAcquire(psPDumpConnectionData->hLock);
+ ui32RefCount = --psPDumpConnectionData->ui32RefCount;
+ OSLockRelease(psPDumpConnectionData->hLock);
+
+ if (ui32RefCount == 0)
+ {
+ OSLockDestroy(psPDumpConnectionData->hLock);
+ PVR_ASSERT(dllist_is_empty(&psPDumpConnectionData->sListHead));
+ OSFreeMem(psPDumpConnectionData);
+ }
+
+ PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d",
+ __FUNCTION__, psPDumpConnectionData, ui32RefCount);
+}
+
+/**************************************************************************
+ * Function Name : GetTempBuffer
+ * Inputs : None
+ * Outputs : None
+ * Returns : Temporary buffer address, or NULL
+ * Description : Get temporary buffer address.
+**************************************************************************/
+static void *GetTempBuffer(void)
+{
+ /*
+ * Allocate the temporary buffer, if it hasn't been allocated already.
+ * Return the address of the temporary buffer, or NULL if it
+ * couldn't be allocated.
+ * It is expected that the buffer will be allocated once, at driver
+ * load time, and left in place until the driver unloads.
+ */
+
+ if (gpvTempBuffer == NULL)
+ {
+ gpvTempBuffer = OSAllocMem(PDUMP_TEMP_BUFFER_SIZE);
+ if (gpvTempBuffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "GetTempBuffer: OSAllocMem failed"));
+ }
+ }
+
+ return gpvTempBuffer;
+}
+
+static void FreeTempBuffer(void)
+{
+
+ if (gpvTempBuffer != NULL)
+ {
+ OSFreeMem(gpvTempBuffer);
+ gpvTempBuffer = NULL;
+ }
+}
+
+/**************************************************************************
+ * Function Name : PDumpParameterChannelZeroedPageBlock
+ * Inputs : None
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Set up the zero page block in the parameter stream
+**************************************************************************/
+static PVRSRV_ERROR PDumpParameterChannelZeroedPageBlock(void)
+{
+ IMG_UINT8 aui8Zero[32] = { 0 };
+ size_t uiBytesToWrite;
+ PVRSRV_ERROR eError;
+
+ g_PDumpParameters.uiZeroPageSize = OSGetPageSize();
+
+ /* ensure the zero page size of a multiple of the zero source on the stack */
+ PVR_ASSERT(g_PDumpParameters.uiZeroPageSize % sizeof(aui8Zero) == 0);
+
+ /* the first write gets the parameter file name and stream offset,
+ * then subsequent writes do not need to know this as the data is
+ * contiguous in the stream
+ */
+ PDUMP_LOCK();
+ eError = PDumpWriteParameter(aui8Zero,
+ sizeof(aui8Zero),
+ 0,
+ &g_PDumpParameters.uiZeroPageOffset,
+ g_PDumpParameters.szZeroPageFilename);
+
+ if(eError != PVRSRV_OK)
+ {
+ /* Also treat PVRSRV_ERROR_PDUMP_NOT_ALLOWED as an error in this case
+ * as it should never happen since all writes during driver Init are allowed.
+ */
+ goto err_write;
+ }
+
+ uiBytesToWrite = g_PDumpParameters.uiZeroPageSize - sizeof(aui8Zero);
+
+ while(uiBytesToWrite)
+ {
+ IMG_BOOL bOK;
+
+ bOK = PDumpWriteToChannel(&g_PDumpParameters.sCh, &g_PDumpParameters.sWOff,
+ aui8Zero,
+ sizeof(aui8Zero), 0);
+
+ if(!bOK)
+ {
+ eError = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+ goto err_write;
+ }
+
+ uiBytesToWrite -= sizeof(aui8Zero);
+ }
+
+err_write:
+ PDUMP_UNLOCK();
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to initialise parameter stream zero block"));
+ }
+
+ return eError;
+}
+
+/**************************************************************************
+ * Function Name : PDumpGetParameterZeroPageInfo
+ * Inputs : None
+ * Outputs : puiZeroPageOffset: will be set to the offset of the zero page
+ * : puiZeroPageSize: will be set to the size of the zero page
+ * : ppszZeroPageFilename: will be set to a pointer to the PRM file name
+ * : containing the zero page
+ * Returns : None
+ * Description : Get information about the zero page
+**************************************************************************/
+void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
+ size_t *puiZeroPageSize,
+ const IMG_CHAR **ppszZeroPageFilename)
+{
+ *puiZeroPageOffset = g_PDumpParameters.uiZeroPageOffset;
+ *puiZeroPageSize = g_PDumpParameters.uiZeroPageSize;
+ *ppszZeroPageFilename = g_PDumpParameters.szZeroPageFilename;
+}
+
+PVRSRV_ERROR PDumpInitCommon(void)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32InitCapMode = 0;
+ IMG_CHAR* pszEnvComment = NULL;
+
+ PDUMP_HEREA(2010);
+
+ /* Allocate temporary buffer for copying from user space */
+ (void) GetTempBuffer();
+
+ /* create the global PDump lock */
+ eError = PDumpCreateLockKM();
+ PVR_LOGG_IF_ERROR(eError, "PDumpCreateLockKM", errExit);
+
+ /* Call environment specific PDump initialisation */
+ eError = PDumpOSInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh, &ui32InitCapMode, &pszEnvComment);
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSInit", errExitLock);
+
+ /* Initialise PDump control module in common layer */
+ eError = PDumpCtrlInit(ui32InitCapMode);
+ PVR_LOGG_IF_ERROR(eError, "PDumpCtrlInit", errExitOSDeInit);
+
+ /* Test PDump initialised and ready by logging driver details */
+ eError = PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Driver Product Version: %s - %s (%s)", PVRVERSION_STRING, PVR_BUILD_DIR, PVR_BUILD_TYPE);
+ PVR_LOGG_IF_ERROR(eError, "PDumpCommentWithFlags", errExitCtrl);
+ if (pszEnvComment != NULL)
+ {
+ eError = PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "%s", pszEnvComment);
+ PVR_LOGG_IF_ERROR(eError, "PDumpCommentWithFlags", errExitCtrl);
+ }
+ eError = PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Start of Init Phase");
+ PVR_LOGG_IF_ERROR(eError, "PDumpCommentWithFlags", errExitCtrl);
+
+ eError = PDumpParameterChannelZeroedPageBlock();
+ PVR_LOGG_IF_ERROR(eError, "PDumpParameterChannelZeroedPageBlock", errExitCtrl);
+
+ g_PDumpInitialised = IMG_TRUE;
+
+ PDUMP_HEREA(2011);
+
+ return PVRSRV_OK;
+
+errExitCtrl:
+ PDumpCtrlDeInit();
+errExitOSDeInit:
+ PDUMP_HEREA(2018);
+ PDumpOSDeInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh);
+errExitLock:
+ PDUMP_HEREA(2019);
+ PDumpDestroyLockKM();
+errExit:
+ return eError;
+}
+
+void PDumpDeInitCommon(void)
+{
+ PDUMP_HEREA(2020);
+
+ g_PDumpInitialised = IMG_FALSE;
+
+ /* Free temporary buffer */
+ FreeTempBuffer();
+
+ /* DeInit the PDUMP_CTRL_STATE data */
+ PDumpCtrlDeInit();
+
+ /* Call environment specific PDump Deinitialisation */
+ PDumpOSDeInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh);
+
+ /* take down the global PDump lock */
+ PDumpDestroyLockKM();
+}
+
+IMG_BOOL PDumpReady(void)
+{
+ return g_PDumpInitialised;
+}
+
+void PDumpStopInitPhase(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient)
+{
+ /* Check with the OS we a running on */
+ if (PDumpOSAllowInitPhaseToComplete(bPDumpClient, bInitClient))
+ {
+ if (bInitClient)
+ {
+ /* We only ouptut this once for bInitClient init phase ending OSs */
+ PDUMPCOMMENT("Stop Init Phase");
+ }
+ PDumpCtrlLockAcquire();
+ PDumpCtrlSetInitPhaseComplete(IMG_TRUE);
+ PDumpCtrlLockRelease();
+ }
+}
+
+PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame)
+{
+ PDumpCtrlLockAcquire();
+ *pbIsLastCaptureFrame = PDumpCtrlIsLastCaptureFrame();
+ PDumpCtrlLockRelease();
+
+ return PVRSRV_OK;
+}
+
+
+
+typedef struct _PDUMP_Transition_DATA_ {
+ PFN_PDUMP_TRANSITION pfnCallback;
+ void *hPrivData;
+ PDUMP_CONNECTION_DATA *psPDumpConnectionData;
+ DLLIST_NODE sNode;
+} PDUMP_Transition_DATA;
+
+PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+ PFN_PDUMP_TRANSITION pfnCallback,
+ void *hPrivData,
+ void **ppvHandle)
+{
+ PDUMP_Transition_DATA *psData;
+ PVRSRV_ERROR eError;
+
+ psData = OSAllocMem(sizeof(*psData));
+ if (psData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ /* Setup the callback and add it to the list for this process */
+ psData->pfnCallback = pfnCallback;
+ psData->hPrivData = hPrivData;
+ dllist_add_to_head(&psPDumpConnectionData->sListHead, &psData->sNode);
+
+ /* Take a reference on the connection so it doesn't get freed too early */
+ psData->psPDumpConnectionData =_PDumpConnectionAcquire(psPDumpConnectionData);
+ *ppvHandle = psData;
+
+ return PVRSRV_OK;
+
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void PDumpUnregisterTransitionCallback(void *pvHandle)
+{
+ PDUMP_Transition_DATA *psData = pvHandle;
+
+ dllist_remove_node(&psData->sNode);
+ _PDumpConnectionRelease(psData->psPDumpConnectionData);
+ OSFreeMem(psData);
+}
+
+PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags)
+{
+ DLLIST_NODE *psNode, *psNext;
+ PVRSRV_ERROR eError;
+
+ /* Only call the callbacks if we've really done a Transition */
+ if (bInto != psPDumpConnectionData->bLastInto)
+ {
+ /* We're Transitioning either into or out of capture range */
+ dllist_foreach_node(&psPDumpConnectionData->sListHead, psNode, psNext)
+ {
+ PDUMP_Transition_DATA *psData =
+ IMG_CONTAINER_OF(psNode, PDUMP_Transition_DATA, sNode);
+
+ eError = psData->pfnCallback(psData->hPrivData, bInto, ui32PDumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ if (bInto)
+ {
+ SyncConnectionPDumpSyncBlocks(psPDumpConnectionData->psSyncConnectionData);
+ }
+ psPDumpConnectionData->bLastInto = bInto;
+ }
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing)
+{
+ PDumpCtrlLockAcquire();
+ PDumpCtrlIsCaptureFrame(bIsCapturing);
+ PDumpCtrlLockRelease();
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32Frame)
+{
+ PDUMP_CONNECTION_DATA *psPDumpConnectionData = psConnection->psPDumpConnectionData;
+ IMG_BOOL bWasInCaptureRange = IMG_FALSE;
+ IMG_BOOL bIsInCaptureRange = IMG_FALSE;
+ PVRSRV_ERROR eError;
+
+ /*
+ Note:
+ As we can't test to see if the new frame will be in capture range
+ before we set the frame number and we don't want to roll back the
+ frame number if we fail then we have to save the "transient" data
+ which decides if we're entering or exiting capture range along
+ with a failure boolean so we know what's required on a retry
+ */
+ if (psPDumpConnectionData->ui32LastSetFrameNumber != ui32Frame)
+ {
+ (void) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Set pdump frame %u", ui32Frame);
+
+ /*
+ The boolean values below decide if the PDump transition
+ should trigger because of the current context setting the
+ frame number, hence the functions below should execute
+ atomically and do not give a chance to some other context
+ to transition
+ */
+ PDumpCtrlLockAcquire();
+
+ PDumpCtrlIsCaptureFrame(&bWasInCaptureRange);
+ PDumpCtrlSetCurrentFrame(ui32Frame);
+ PDumpCtrlIsCaptureFrame(&bIsInCaptureRange);
+
+ PDumpCtrlLockRelease();
+
+ psPDumpConnectionData->ui32LastSetFrameNumber = ui32Frame;
+
+ /* Save the Transition data incase we fail the Transition */
+ psPDumpConnectionData->bWasInCaptureRange = bWasInCaptureRange;
+ psPDumpConnectionData->bIsInCaptureRange = bIsInCaptureRange;
+ }
+ else if (psPDumpConnectionData->bLastTransitionFailed)
+ {
+ /* Load the Transition data so we can try again */
+ bWasInCaptureRange = psPDumpConnectionData->bWasInCaptureRange;
+ bIsInCaptureRange = psPDumpConnectionData->bIsInCaptureRange;
+ }
+ else
+ {
+ /* New frame is the same as the last frame set and the last
+ * transition succeeded, no need to perform another transition.
+ */
+ return PVRSRV_OK;
+ }
+
+ if (!bWasInCaptureRange && bIsInCaptureRange)
+ {
+ eError = PDumpTransition(psPDumpConnectionData, IMG_TRUE, PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_Transition;
+ }
+ }
+ else if (bWasInCaptureRange && !bIsInCaptureRange)
+ {
+ eError = PDumpTransition(psPDumpConnectionData, IMG_FALSE, PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_Transition;
+ }
+ }
+ else
+ {
+ /* Here both previous and current frames are in or out of range.
+ * There is no transition in this case.
+ */
+ }
+
+ psPDumpConnectionData->bLastTransitionFailed = IMG_FALSE;
+ return PVRSRV_OK;
+
+fail_Transition:
+ psPDumpConnectionData->bLastTransitionFailed = IMG_TRUE;
+ return eError;
+}
+
+PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Frame)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if defined(PDUMP_TRACE_STATE)
+ PVR_DPF((PVR_DBG_WARNING, "PDumpSetFrameKM: ui32Frame( %d )", ui32Frame));
+#endif
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+ (void) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Set pdump frame %u (pre)", ui32Frame);
+#endif
+
+ eError = _PDumpSetFrameKM(psConnection, ui32Frame);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_LOG_ERROR(eError, "_PDumpSetFrameKM");
+ }
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+ (void) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Set pdump frame %u (post)", ui32Frame);
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32* pui32Frame)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /*
+ It may be safe to avoid acquiring this lock here as all the other calls
+ which read/modify current frame will wait on the PDump Control bridge
+ lock first. Also, in no way as of now, does the PDumping app modify the
+ current frame through a call which acquires the global bridge lock.
+ Still, as a legacy we acquire and then read.
+ */
+ PDumpCtrlLockAcquire();
+
+ *pui32Frame = PDumpCtrlGetCurrentFrame();
+
+ PDumpCtrlLockRelease();
+ return eError;
+}
+
+PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize)
+{
+ /*
+ Acquire PDUMP_CTRL_STATE struct lock before modifications as a
+ PDumping app may be reading the state data for some checks
+ */
+ PDumpCtrlLockAcquire();
+ PDumpCtrlSetDefaultCaptureParams(ui32Mode, ui32Start, ui32End, ui32Interval);
+ PDumpCtrlLockRelease();
+
+ if (ui32MaxParamFileSize == 0)
+ {
+ g_PDumpParameters.ui32MaxFileSize = PRM_FILE_SIZE_MAX;
+ }
+ else
+ {
+ g_PDumpParameters.ui32MaxFileSize = ui32MaxParamFileSize;
+ }
+ return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpReg32
+ * Inputs : pszPDumpDevName, Register offset, and value to write
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a register write
+**************************************************************************/
+PVRSRV_ERROR PDumpReg32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32Data,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpReg32"));
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X", pszPDumpRegName, ui32Reg, ui32Data);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpReg64
+ * Inputs : pszPDumpDevName, Register offset, and value to write
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a register write
+**************************************************************************/
+PVRSRV_ERROR PDumpReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT64 ui64Data,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpRegKM"));
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X 0x%010llX", pszPDumpRegName, ui32Reg, ui64Data);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpRegLabelToReg64
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a register write from a register label
+**************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegDst,
+ IMG_UINT32 ui32RegSrc,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpRegLabelToReg64"));
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X :%s:0x%08X", pszPDumpRegName, ui32RegDst, pszPDumpRegName, ui32RegSrc);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+
+}
+
+/**************************************************************************
+ * Function Name : PDumpRegLabelToMem32
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a memory write from a register label
+**************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpRegLabelToMem32"));
+
+ eErr = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceName,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicName,
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%llX :%s:0x%08X",aszMemspaceName, aszSymbolicName,
+ uiPDumpSymbolicOffset, pszPDumpRegName, ui32Reg);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpRegLabelToMem64
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a memory write from a register label
+**************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpRegLabelToMem64"));
+
+ eErr = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceName,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicName,
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:%s:0x%llX :%s:0x%08X",aszMemspaceName, aszSymbolicName,
+ uiPDumpSymbolicOffset, pszPDumpRegName, ui32Reg);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMemLabelToInternalVar
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents an internal var write using a memory label
+**************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToInternalVar(IMG_CHAR *pszInternalVar,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpMemLabelToInternalVar"));
+
+ eErr = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceName,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicName,
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW %s :%s:%s:0x%llX", pszInternalVar,
+ aszMemspaceName, aszSymbolicName, uiPDumpSymbolicOffset);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PDumpWriteRegANDValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical OR operation
+ Var <- Var OR Value
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarORValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "OR %s %s 0x%llX",
+ pszInternalVariable,
+ pszInternalVariable,
+ ui64Value);
+
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+/*******************************************************************************************************
+ * Function Name : PDumpRegLabelToInternalVar
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which writes a register label into an internal variable
+********************************************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToInternalVar(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags)
+
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpRegLabelToInternalVar"));
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW %s :%s:0x%08X", pszInternalVar, pszPDumpRegName, ui32Reg);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+
+}
+
+/*******************************************************************************************************
+ * Function Name : PDumpInternalVarToReg32
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a register write from an internal variable
+********************************************************************************************************/
+PVRSRV_ERROR PDumpInternalVarToReg32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpInternalVarToReg32"));
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/*******************************************************************************************************
+ * Function Name : PDumpInternalVarToReg64
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a register write from an internal variable
+********************************************************************************************************/
+PVRSRV_ERROR PDumpInternalVarToReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpInternalVarToReg64"));
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+
+/*******************************************************************************************************
+ * Function Name : PDumpMemLabelToMem32
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a memory write from a memory label
+********************************************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource;
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameSource;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpMemLabelToMem32"));
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRSource,
+ uiLogicalOffsetSource,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameSource,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameSource,
+ &uiPDumpSymbolicOffsetSource,
+ &uiNextSymNameSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+ uiLogicalOffsetDest,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameDest,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameDest,
+ &uiPDumpSymbolicOffsetDest,
+ &uiNextSymNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%llX :%s:%s:0x%llX",aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest, aszMemspaceNameSource, aszSymbolicNameSource,
+ uiPDumpSymbolicOffsetSource);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/*******************************************************************************************************
+ * Function Name : PDumpMemLabelToMem64
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a memory write from a memory label
+********************************************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource;
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameSource;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpMemLabelToMem64"));
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRSource,
+ uiLogicalOffsetSource,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameSource,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameSource,
+ &uiPDumpSymbolicOffsetSource,
+ &uiNextSymNameSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+ uiLogicalOffsetDest,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameDest,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameDest,
+ &uiPDumpSymbolicOffsetDest,
+ &uiNextSymNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:%s:0x%llX :%s:%s:0x%llX",aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest, aszMemspaceNameSource, aszSymbolicNameSource,
+ uiPDumpSymbolicOffsetSource);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+
+/*!
+******************************************************************************
+
+ @Function PDumpWriteVarSHRValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical SHR operation
+ Var <- Var SHR Value
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarSHRValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHR %s %s 0x%llX",
+ pszInternalVariable,
+ pszInternalVariable,
+ ui64Value);
+
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function PDumpWriteRegANDValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical AND operation
+ Var <- Var AND Value
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarANDValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "AND %s %s 0x%llX",
+ pszInternalVariable,
+ pszInternalVariable,
+ ui64Value);
+
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpSAW
+ * Inputs : pszDevSpaceName -- device space from which to output
+ * ui32Offset -- offset value from register base
+ * ui32NumSaveBytes -- number of bytes to output
+ * pszOutfileName -- name of file to output to
+ * ui32OutfileOffsetByte -- offset into output file to write
+ * uiPDumpFlags -- flags to pass to PDumpOSWriteScript
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Dumps the contents of a register bank into a file
+ * NB: ui32NumSaveBytes must be divisible by 4
+**************************************************************************/
+PVRSRV_ERROR PDumpSAW(IMG_CHAR *pszDevSpaceName,
+ IMG_UINT32 ui32HPOffsetBytes,
+ IMG_UINT32 ui32NumSaveBytes,
+ IMG_CHAR *pszOutfileName,
+ IMG_UINT32 ui32OutfileOffsetByte,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PVR_DPF((PVR_DBG_ERROR, "PDumpSAW\n"));
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAW :%s:0x%x 0x%x 0x%x %s\n",
+ pszDevSpaceName,
+ ui32HPOffsetBytes,
+ ui32NumSaveBytes / (IMG_UINT32)sizeof(IMG_UINT32),
+ ui32OutfileOffsetByte,
+ pszOutfileName);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpOSBufprintf failed: eError=%u\n", eError));
+ PDUMP_UNLOCK();
+ return eError;
+ }
+
+ if(! PDumpWriteScript(hScript, uiPDumpFlags))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpWriteScript failed!\n"));
+ }
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpRegPolKM
+ * Inputs : Description of what this register read is trying to do
+ * pszPDumpDevName
+ * Register offset
+ * expected value
+ * mask for that value
+ * Outputs : None
+ * Returns : None
+ * Description : Create a PDUMP string which represents a register read
+ * with the expected value
+**************************************************************************/
+PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Flags,
+ PDUMP_POLL_OPERATOR eOperator)
+{
+ /* Timings correct for linux and XP */
+ /* Timings should be passed in */
+ #define POLL_DELAY 1000U
+ #define POLL_COUNT_LONG (2000000000U / POLL_DELAY)
+ #define POLL_COUNT_SHORT (1000000U / POLL_DELAY)
+
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32PollCount;
+
+ PDUMP_GET_SCRIPT_STRING();
+ PDUMP_DBG(("PDumpRegPolKM"));
+
+ ui32PollCount = POLL_COUNT_LONG;
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "POL :%s:0x%08X 0x%08X 0x%08X %d %u %d",
+ pszPDumpRegName, ui32RegAddr, ui32RegValue,
+ ui32Mask, eOperator, ui32PollCount, POLL_DELAY);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ PDUMP_UNLOCK();
+ return PVRSRV_OK;
+}
+
+/* Never call direct, needs caller to hold OS Lock.
+ * Use PDumpCommentWithFlags() from within the server.
+ * Clients call this via the bridge and PDumpCommentKM().
+ */
+static PVRSRV_ERROR _PDumpWriteComment(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+#if defined(PDUMP_DEBUG_OUTFILES)
+ IMG_CHAR pszTemp[256];
+#endif
+ PDUMP_GET_SCRIPT_STRING();
+ PDUMP_DBG(("PDumpCommentKM"));
+
+ if((pszComment == NULL) || (PDumpOSBuflen(pszComment, ui32MaxLen) == 0))
+ {
+ /* PDumpOSVerifyLineEnding silently fails if pszComment is too short to
+ actually hold the line endings that it's trying to enforce, so
+ short circuit it and force safety */
+ pszComment = "\n";
+ }
+ else
+ {
+ /* Put line ending sequence at the end if it isn't already there */
+ PDumpOSVerifyLineEnding(pszComment, ui32MaxLen);
+ }
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+ /* Prefix comment with PID and line number */
+ eErr = PDumpOSSprintf(pszTemp, 256, "%u %u:%lu %s: %s",
+ g_ui32EveryLineCounter,
+ OSGetCurrentClientProcessIDKM(),
+ (unsigned long)OSGetCurrentClientThreadIDKM(),
+ OSGetCurrentClientProcessNameKM(),
+ pszComment);
+
+ /* Append the comment to the script stream */
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- %s",
+ pszTemp);
+#else
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- %s",
+ pszComment);
+#endif
+ if( (eErr != PVRSRV_OK) &&
+ (eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW))
+ {
+ PVR_LOGG_IF_ERROR(eErr, "PDumpOSBufprintf", ErrUnlock);
+ }
+
+ if (!PDumpWriteScript(hScript, ui32Flags))
+ {
+ if(PDUMP_IS_CONTINUOUS(ui32Flags))
+ {
+ eErr = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+ PVR_LOGG_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock);
+ }
+ else
+ {
+ eErr = PVRSRV_ERROR_CMD_NOT_PROCESSED;
+ PVR_LOGG_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock);
+ }
+ }
+
+ErrUnlock:
+ return eErr;
+}
+
+/**************************************************************************
+ * Function Name : PDumpCommentKM
+ * Inputs : pszComment, ui32Flags
+ * Outputs : None
+ * Returns : None
+ * Description : Dumps a pre-formatted comment, primarily called from the
+ * : bridge.
+**************************************************************************/
+PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+
+ PDUMP_LOCK();
+
+ eErr = _PDumpWriteComment(pszComment, ui32Flags);
+
+ PDUMP_UNLOCK();
+ return eErr;
+}
+
+/**************************************************************************
+ * Function Name : PDumpCommentWithFlags
+ * Inputs : psPDev - PDev for PDump device
+ * : pszFormat - format string for comment
+ * : ... - args for format string
+ * Outputs : None
+ * Returns : None
+ * Description : PDumps a comments
+**************************************************************************/
+PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ va_list args;
+
+ va_start(args, pszFormat);
+ PDumpCommentWithFlagsVA(ui32Flags, pszFormat, args);
+ va_end(args);
+
+ return eErr;
+}
+
+/**************************************************************************
+ * Function Name : PDumpCommentWithFlagsVA
+ * Inputs : psPDev - PDev for PDump device
+ * : pszFormat - format string for comment
+ * : args - pre-started va_list args for format string
+ * Outputs : None
+ * Returns : None
+ * Description : PDumps a comments
+**************************************************************************/
+PVRSRV_ERROR PDumpCommentWithFlagsVA(IMG_UINT32 ui32Flags, const IMG_CHAR * pszFormat, va_list args)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ PDUMP_GET_MSG_STRING();
+
+ PDUMP_LOCK();
+
+ /* Construct the string */
+ eErr = PDumpOSVSprintf(pszMsg, ui32MaxLen, pszFormat, args);
+
+ if(eErr != PVRSRV_OK)
+ {
+ goto Unlock;
+ }
+
+ eErr = _PDumpWriteComment(pszMsg, ui32Flags);
+
+Unlock:
+ PDUMP_UNLOCK();
+ return eErr;
+}
+
+/*************************************************************************/ /*!
+ * Function Name : PDumpPanic
+ * Inputs : ui32PanicNo - Unique number for panic condition
+ * : pszPanicMsg - Panic reason message limited to ~90 chars
+ * : pszPPFunc - Function name string where panic occurred
+ * : ui32PPline - Source line number where panic occurred
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : PDumps a panic assertion. Used when the host driver
+ * : detects a condition that will lead to an invalid PDump
+ * : script that cannot be played back off-line.
+ */ /*************************************************************************/
+PVRSRV_ERROR PDumpPanic(IMG_UINT32 ui32PanicNo,
+ IMG_CHAR* pszPanicMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PDUMP_FLAGS_T uiPDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+ IMG_CHAR pszConsoleMsg[] =
+"COM ***************************************************************************\n"
+"COM Script invalid and not compatible with off-line playback. Check test \n"
+"COM parameters and driver configuration, stop imminent.\n"
+"COM ***************************************************************************\n";
+ PDUMP_GET_SCRIPT_STRING();
+
+ /* Log the panic condition to the live kern.log in both REL and DEB mode
+ * to aid user PDump trouble shooting. */
+ PVR_LOG(("PDUMP PANIC %08x: %s", ui32PanicNo, pszPanicMsg));
+ PVR_DPF((PVR_DBG_MESSAGE, "PDUMP PANIC start %s:%d", pszPPFunc, ui32PPline));
+
+ /* Check the supplied panic reason string is within length limits */
+ PVR_ASSERT(OSStringLength(pszPanicMsg)+sizeof("PANIC ") < PVRSRV_PDUMP_MAX_COMMENT_SIZE-1);
+
+ /* Obtain lock to keep the multi-line
+ * panic statement together in a single atomic write */
+ PDUMP_LOCK();
+
+
+ /* Write -- Panic start (Function:line) */
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "-- Panic start (%s:%d)", pszPPFunc, ui32PPline);
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+ (void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+ /* Write COM <message> x4 */
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, pszConsoleMsg);
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+ (void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+ /* Write PANIC no msg command */
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "PANIC %08x %s", ui32PanicNo, pszPanicMsg);
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+ (void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+ /* Write -- Panic end */
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "-- Panic end");
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+ (void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+e1:
+ PDUMP_UNLOCK();
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+ * Function Name : PDumpCaptureError
+ * Inputs : ui32ErrorNo - Unique number for panic condition
+ * : pszErrorMsg - Panic reason message limited to ~90 chars
+ * : pszPPFunc - Function name string where panic occurred
+ * : ui32PPline - Source line number where panic occurred
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : PDumps an error string to the script file to interrupt
+ * : play back to inform user of a fatal issue that occurred
+ * : during PDump capture.
+ */ /*************************************************************************/
+PVRSRV_ERROR PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo,
+ IMG_CHAR* pszErrorMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline)
+{
+ IMG_CHAR* pszFormatStr = "DRIVER_ERROR: %3d: %s";
+ PDUMP_FLAGS_T uiPDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+
+ /* Need to return an error using this macro */
+ PDUMP_GET_SCRIPT_STRING();
+
+ /* Check the supplied panic reason string is within length limits */
+ PVR_ASSERT(OSStringLength(pszErrorMsg)+sizeof(pszFormatStr) < PVRSRV_PDUMP_MAX_COMMENT_SIZE-1);
+
+ /* Obtain lock to keep the multi-line
+ * panic statement together in a single atomic write */
+ PDUMP_LOCK();
+
+ /* Write driver error message to the script file */
+ (void) PDumpOSBufprintf(hScript, ui32MaxLen, pszFormatStr, ui32ErrorNo, pszErrorMsg);
+ (void) PDumpWriteScript(hScript, uiPDumpFlags);
+
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PDumpBitmapKM
+
+ @Description
+
+ Dumps a bitmap from device memory to a file
+
+ @Input psDevId
+ @Input pszFileName
+ @Input ui32FileOffset
+ @Input ui32Width
+ @Input ui32Height
+ @Input ui32StrideInBytes
+ @Input sDevBaseAddr
+ @Input ui32Size
+ @Input ePixelFormat
+ @Input eMemFormat
+ @Input ui32PDumpFlags
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR PDumpBitmapKM( PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psDeviceNode->sDevId;
+ PVRSRV_ERROR eErr=0;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "Dump bitmap of render.");
+
+ switch (ePixelFormat)
+ {
+ case PVRSRV_PDUMP_PIXEL_FORMAT_YUV8:
+ {
+ PDumpCommentWithFlags(ui32PDumpFlags, "YUV data. Switching from SII to SAB. Width=0x%08X Height=0x%08X Stride=0x%08X",
+ ui32Width, ui32Height, ui32StrideInBytes);
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:v%x:0x%010llX 0x%08X 0x%08X %s.bin\n",
+ psDevId->pszPDumpDevName,
+ ui32MMUContextID,
+ sDevBaseAddr.uiAddr,
+ ui32Size,
+ ui32FileOffset,
+ pszFileName);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+ break;
+ }
+ case PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8: // YUV420 2 planes
+ {
+ const IMG_UINT32 ui32Plane0Size = ui32StrideInBytes*ui32Height;
+ const IMG_UINT32 ui32Plane1Size = ui32Plane0Size>>1; // YUV420
+ const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32Plane0Size;
+ const IMG_UINT32 ui32Plane1MemOffset = ui32Plane0Size;
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "YUV420 2-plane. Width=0x%08X Height=0x%08X Stride=0x%08X",
+ ui32Width, ui32Height, ui32StrideInBytes);
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+ pszFileName,
+ pszFileName,
+
+ // Plane 0 (Y)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // Context id
+ sDevBaseAddr.uiAddr, // virtaddr
+ ui32Plane0Size, // size
+ ui32FileOffset, // fileoffset
+
+ // Plane 1 (UV)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // Context id
+ sDevBaseAddr.uiAddr+ui32Plane1MemOffset, // virtaddr
+ ui32Plane1Size, // size
+ ui32Plane1FileOffset, // fileoffset
+
+ ePixelFormat,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ ui32AddrMode);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+ break;
+ }
+
+ case PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12: // YUV420 3 planes
+ {
+ const IMG_UINT32 ui32Plane0Size = ui32StrideInBytes*ui32Height;
+ const IMG_UINT32 ui32Plane1Size = ui32Plane0Size>>2; // YUV420
+ const IMG_UINT32 ui32Plane2Size = ui32Plane1Size;
+ const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32Plane0Size;
+ const IMG_UINT32 ui32Plane2FileOffset = ui32Plane1FileOffset + ui32Plane1Size;
+ const IMG_UINT32 ui32Plane1MemOffset = ui32Plane0Size;
+ const IMG_UINT32 ui32Plane2MemOffset = ui32Plane0Size+ui32Plane1Size;
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "YUV420 3-plane. Width=0x%08X Height=0x%08X Stride=0x%08X",
+ ui32Width, ui32Height, ui32StrideInBytes);
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+ pszFileName,
+ pszFileName,
+
+ // Plane 0 (Y)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr, // virtaddr
+ ui32Plane0Size, // size
+ ui32FileOffset, // fileoffset
+
+ // Plane 1 (U)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane1MemOffset, // virtaddr
+ ui32Plane1Size, // size
+ ui32Plane1FileOffset, // fileoffset
+
+ // Plane 2 (V)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane2MemOffset, // virtaddr
+ ui32Plane2Size, // size
+ ui32Plane2FileOffset, // fileoffset
+
+ ePixelFormat,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ ui32AddrMode);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+ break;
+ }
+
+ case PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32: // YV32 - 4 contiguous planes in the order VUYA, stride can be > width.
+ {
+ const IMG_UINT32 ui32PlaneSize = ui32StrideInBytes*ui32Height; // All 4 planes are the same size
+ const IMG_UINT32 ui32Plane0FileOffset = ui32FileOffset + (ui32PlaneSize<<1); // SII plane 0 is Y, which is YV32 plane 2
+ const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32PlaneSize; // SII plane 1 is U, which is YV32 plane 1
+ const IMG_UINT32 ui32Plane2FileOffset = ui32FileOffset; // SII plane 2 is V, which is YV32 plane 0
+ const IMG_UINT32 ui32Plane3FileOffset = ui32Plane0FileOffset + ui32PlaneSize; // SII plane 3 is A, which is YV32 plane 3
+ const IMG_UINT32 ui32Plane0MemOffset = ui32PlaneSize<<1;
+ const IMG_UINT32 ui32Plane1MemOffset = ui32PlaneSize;
+ const IMG_UINT32 ui32Plane2MemOffset = 0;
+ const IMG_UINT32 ui32Plane3MemOffset = ui32Plane0MemOffset + ui32PlaneSize;
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 4 planes. Width=0x%08X Height=0x%08X Stride=0x%08X",
+ ui32Width, ui32Height, ui32StrideInBytes);
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 plane size is 0x%08X", ui32PlaneSize);
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 0 Mem Offset=0x%08X", ui32Plane0MemOffset);
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 1 Mem Offset=0x%08X", ui32Plane1MemOffset);
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 2 Mem Offset=0x%08X", ui32Plane2MemOffset);
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 3 Mem Offset=0x%08X", ui32Plane3MemOffset);
+
+ /*
+ SII <imageset> <filename> :<memsp1>:v<id1>:<virtaddr1> <size1> <fileoffset1> Y
+ :<memsp2>:v<id2>:<virtaddr2> <size2> <fileoffset2> U
+ :<memsp3>:v<id3>:<virtaddr3> <size3> <fileoffset3> V
+ :<memsp4>:v<id4>:<virtaddr4> <size4> <fileoffset4> A
+ <pixfmt> <width> <height> <stride> <addrmode>
+ */
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+ pszFileName,
+ pszFileName,
+
+ // Plane 0 (V)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane0MemOffset, // virtaddr
+ ui32PlaneSize, // size
+ ui32Plane0FileOffset, // fileoffset
+
+ // Plane 1 (U)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane1MemOffset, // virtaddr
+ ui32PlaneSize, // size
+ ui32Plane1FileOffset, // fileoffset
+
+ // Plane 2 (Y)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane2MemOffset, // virtaddr
+ ui32PlaneSize, // size
+ ui32Plane2FileOffset, // fileoffset
+
+ // Plane 3 (A)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane3MemOffset, // virtaddr
+ ui32PlaneSize, // size
+ ui32Plane3FileOffset, // fileoffset
+
+ ePixelFormat,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ ui32AddrMode);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+ break;
+ }
+
+ default: // Single plane formats
+ {
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+ pszFileName,
+ pszFileName,
+ psDevId->pszPDumpDevName,
+ ui32MMUContextID,
+ sDevBaseAddr.uiAddr,
+ ui32Size,
+ ui32FileOffset,
+ ePixelFormat,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ ui32AddrMode);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+ break;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PDumpReadRegKM
+
+ @Description
+
+ Dumps a read from a device register to a file
+
+ @Input psConnection : connection info
+ @Input pszFileName
+ @Input ui32FileOffset
+ @Input ui32Address
+ @Input ui32Size
+ @Input ui32PDumpFlags
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR PDumpReadRegKM ( IMG_CHAR *pszPDumpRegName,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Address,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:0x%08X 0x%08X %s",
+ pszPDumpRegName,
+ ui32Address,
+ ui32FileOffset,
+ pszFileName);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ @name PDumpRegRead32
+ @brief Dump 32-bit register read to script
+ @param pszPDumpDevName - pdump device name
+ @param ui32RegOffset - register offset
+ @param ui32Flags - pdump flags
+ @return Error
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName,
+ const IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :%s:0x%X",
+ pszPDumpRegName,
+ ui32RegOffset);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+ return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ @name PDumpRegRead64
+ @brief Dump 64-bit register read to script
+ @param pszPDumpDevName - pdump device name
+ @param ui32RegOffset - register offset
+ @param ui32Flags - pdump flags
+ @return Error
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName,
+ const IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW64 :%s:0x%X",
+ pszPDumpRegName,
+ ui32RegOffset);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+ return PVRSRV_OK;
+}
+
+
+/*****************************************************************************
+ FUNCTION : PDumpWriteShiftedMaskedValue
+
+ PURPOSE : Emits the PDump commands for writing a masked shifted address
+ into another location
+
+ PARAMETERS : PDump symbolic name and offset of target word
+ PDump symbolic name and offset of source address
+ right shift amount
+ left shift amount
+ mask
+
+ RETURNS : None
+*****************************************************************************/
+PVRSRV_ERROR
+PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName,
+ const IMG_CHAR *pszDestSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiDestOffset,
+ const IMG_CHAR *pszRefRegspaceName,
+ const IMG_CHAR *pszRefSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiRefOffset,
+ IMG_UINT32 uiSHRAmount,
+ IMG_UINT32 uiSHLAmount,
+ IMG_UINT32 uiMask,
+ IMG_DEVMEM_SIZE_T uiWordSize,
+ IMG_UINT32 uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ /* Suffix of WRW command in PDump (i.e. WRW or WRW64) */
+ const IMG_CHAR *pszWrwSuffix;
+
+ /* Internal PDump register used for interim calculation */
+ const IMG_CHAR *pszPDumpIntRegSpace;
+ IMG_UINT32 uiPDumpIntRegNum;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if ((uiWordSize != 4) && (uiWordSize != 8))
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
+ pszWrwSuffix = (uiWordSize == 8) ? "64" : "";
+
+ /* Should really "Acquire" a pdump register here */
+ pszPDumpIntRegSpace = pszDestRegspaceName;
+ uiPDumpIntRegNum = 1;
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ /* Should this be "MOV" instead? */
+ "WRW :%s:$%d :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+ /* dest */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src */
+ pszRefRegspaceName,
+ pszRefSymbolicName,
+ uiRefOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ if (uiSHRAmount > 0)
+ {
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHR :%s:$%d :%s:$%d 0x%X\n",
+ /* dest */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src A */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src B */
+ uiSHRAmount);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+ }
+
+ if (uiSHLAmount > 0)
+ {
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHL :%s:$%d :%s:$%d 0x%X\n",
+ /* dest */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src A */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src B */
+ uiSHLAmount);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+ }
+
+ if (uiMask != (1ULL << (8*uiWordSize))-1)
+ {
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "AND :%s:$%d :%s:$%d 0x%X\n",
+ /* dest */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src A */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src B */
+ uiMask);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+ }
+
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " :%s:$%d\n",
+ pszWrwSuffix,
+ /* dest */
+ pszDestRegspaceName,
+ pszDestSymbolicName,
+ uiDestOffset,
+ /* src */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum);
+ if(eError != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+
+PVRSRV_ERROR
+PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName,
+ IMG_DEVMEM_OFFSET_T uiDestOffset,
+ const IMG_CHAR *pszRefSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiRefOffset,
+ const IMG_CHAR *pszPDumpDevName,
+ IMG_UINT32 ui32WordSize,
+ IMG_UINT32 ui32AlignShift,
+ IMG_UINT32 ui32Shift,
+ IMG_UINT32 uiPDumpFlags)
+{
+ const IMG_CHAR *pszWrwSuffix = "";
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (ui32WordSize == 8)
+ {
+ pszWrwSuffix = "64";
+ }
+
+ PDUMP_LOCK();
+
+ if (ui32AlignShift != ui32Shift)
+ {
+ /* Write physical address into a variable */
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:$1 %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+ pszWrwSuffix,
+ /* dest */
+ pszPDumpDevName,
+ /* src */
+ pszRefSymbolicName,
+ uiRefOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto symbAddress_error;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ /* apply address alignment */
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHR :%s:$1 :%s:$1 0x%X",
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ ui32AlignShift);
+ if (eError != PVRSRV_OK)
+ {
+ goto symbAddress_error;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ /* apply address shift */
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHL :%s:$1 :%s:$1 0x%X",
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ ui32Shift);
+ if (eError != PVRSRV_OK)
+ {
+ goto symbAddress_error;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+
+ /* write result to register */
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:0x%08X :%s:$1",
+ pszWrwSuffix,
+ pszDestSpaceName,
+ (IMG_UINT32)uiDestOffset,
+ pszPDumpDevName);
+ if (eError != PVRSRV_OK)
+ {
+ goto symbAddress_error;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+ }
+ else
+ {
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+ pszWrwSuffix,
+ /* dest */
+ pszDestSpaceName,
+ uiDestOffset,
+ /* src */
+ pszRefSymbolicName,
+ uiRefOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto symbAddress_error;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+ }
+
+symbAddress_error:
+
+ PDUMP_UNLOCK();
+
+ return eError;
+}
+
+/**************************************************************************
+ * Function Name : PDumpIDLWithFlags
+ * Inputs : Idle time in clocks
+ * Outputs : None
+ * Returns : Error
+ * Description : Dump IDL command to script
+**************************************************************************/
+PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+ PDUMP_DBG(("PDumpIDLWithFlags"));
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IDL %u", ui32Clocks);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+ return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpIDL
+ * Inputs : Idle time in clocks
+ * Outputs : None
+ * Returns : Error
+ * Description : Dump IDL command to script
+**************************************************************************/
+PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks)
+{
+ return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
+}
+
+/*****************************************************************************
+ FUNCTION : PDumpRegBasedCBP
+
+ PURPOSE : Dump CBP command to script
+
+ PARAMETERS :
+
+ RETURNS : None
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32WPosVal,
+ IMG_UINT32 ui32PacketSize,
+ IMG_UINT32 ui32BufferSize,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "CBP :%s:0x%08X 0x%08X 0x%08X 0x%08X",
+ pszPDumpRegName,
+ ui32RegOffset,
+ ui32WPosVal,
+ ui32PacketSize,
+ ui32BufferSize);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace,
+ IMG_UINT32 ui32MMUCtxID,
+ IMG_UINT32 ui32RegionID,
+ IMG_BOOL bEnable,
+ IMG_UINT64 ui64VAddr,
+ IMG_UINT64 ui64LenBytes,
+ IMG_UINT32 ui32XStride,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ if(bEnable)
+ {
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+ "TRG :%s:v%u %u 0x%08llX 0x%08llX %u",
+ pszMemSpace, ui32MMUCtxID, ui32RegionID,
+ ui64VAddr, ui64LenBytes, ui32XStride);
+ }
+ else
+ {
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+ "TRG :%s:v%u %u",
+ pszMemSpace, ui32MMUCtxID, ui32RegionID);
+
+ }
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpConnectionNotify
+ * Description : Called by the srvcore to tell PDump core that the
+ * PDump capture and control client has connected
+ **************************************************************************/
+void PDumpConnectionNotify(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psThis;
+
+ /* Give PDump control a chance to end the init phase, depends on OS */
+ if (!PDumpCtrlInitPhaseComplete())
+ {
+ PDumpStopInitPhase(IMG_TRUE, IMG_FALSE);
+ }
+
+ g_ConnectionCount++;
+ PVR_LOG(("PDump has connected (%u)", g_ConnectionCount));
+
+ /* Reset the parameter file attributes */
+ g_PDumpParameters.sWOff.ui32Main = g_PDumpParameters.sWOff.ui32Init;
+ g_PDumpParameters.ui32FileIdx = 0;
+
+ /* Loop over all known devices */
+ psThis = psPVRSRVData->psDeviceNodeList;
+ while (psThis)
+ {
+ if (psThis->pfnPDumpInitDevice)
+ {
+ /* Reset pdump according to connected device */
+ psThis->pfnPDumpInitDevice(psThis);
+ }
+ psThis = psThis->psNext;
+ }
+}
+
+/**************************************************************************
+ * Function Name : PDumpDisconnectionNotify
+ * Description : Called by the connection_server to tell PDump core that
+ * the PDump capture and control client has disconnected
+ **************************************************************************/
+void PDumpDisconnectionNotify(void)
+{
+ PVRSRV_ERROR eErr;
+
+ if (PDumpCtrlCaptureOn())
+ {
+ PVR_LOG(("PDump killed, output files may be invalid or incomplete!"));
+
+ /* Disable capture in server, in case PDump client was killed and did
+ * not get a chance to reset the capture parameters.
+ */
+ eErr = PDumpSetDefaultCaptureParamsKM( DEBUG_CAPMODE_FRAMED,
+ FRAME_UNSET, FRAME_UNSET, 1, 0);
+ PVR_LOG_IF_ERROR(eErr, "PVRSRVPDumpSetDefaultCaptureParams");
+ }
+ else
+ {
+ PVR_LOG(("PDump disconnected"));
+ }
+}
+
+/**************************************************************************
+ * Function Name : PDumpIfKM
+ * Inputs : pszPDumpCond - string for condition
+ * Outputs : None
+ * Returns : None
+ * Description : Create a PDUMP string which represents IF command
+ with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpIfKM(IMG_CHAR *pszPDumpCond)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpIfKM"));
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IF %s\n", pszPDumpCond);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpElseKM
+ * Inputs : pszPDumpCond - string for condition
+ * Outputs : None
+ * Returns : None
+ * Description : Create a PDUMP string which represents ELSE command
+ with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpElseKM(IMG_CHAR *pszPDumpCond)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpElseKM"));
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "ELSE %s\n", pszPDumpCond);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpFiKM
+ * Inputs : pszPDumpCond - string for condition
+ * Outputs : None
+ * Returns : None
+ * Description : Create a PDUMP string which represents FI command
+ with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpFiKM(IMG_CHAR *pszPDumpCond)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpFiKM"));
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FI %s\n", pszPDumpCond);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpCreateLockKM(void)
+{
+ return PDumpOSCreateLock();
+}
+
+void PDumpDestroyLockKM(void)
+{
+ PDumpOSDestroyLock();
+}
+
+void PDumpLock(void)
+{
+ PDumpOSLock();
+}
+
+void PDumpUnlock(void)
+{
+ PDumpOSUnlock();
+}
+
+#if defined(PVR_TESTING_UTILS)
+extern void PDumpOSDumpState(void);
+
+#if !defined(LINUX)
+void PDumpOSDumpState(IMG_BOOL bDumpOSLayerState)
+{
+ PVR_UNREFERENCED_PARAMETER(bDumpOSLayerState);
+}
+#endif
+
+void PDumpCommonDumpState(IMG_BOOL bDumpOSLayerState)
+{
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpInitialised( %d )",
+ g_PDumpInitialised) );
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.hInit( %p ) g_PDumpScript.sCh.hMain( %p ) g_PDumpScript.sCh.hDeinit( %p )",
+ g_PDumpScript.sCh.hInit, g_PDumpScript.sCh.hMain, g_PDumpScript.sCh.hDeinit) );
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.hInit( %p ) g_PDumpParameters.sCh.hMain( %p ) g_PDumpParameters.sCh.hDeinit( %p )",
+ g_PDumpParameters.sCh.hInit, g_PDumpParameters.sCh.hMain, g_PDumpParameters.sCh.hDeinit) );
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sWOff.ui32Init( %d ) g_PDumpParameters.sWOff.ui32Main( %d ) g_PDumpParameters.sWOff.ui32Deinit( %d )",
+ g_PDumpParameters.sWOff.ui32Init, g_PDumpParameters.sWOff.ui32Main, g_PDumpParameters.sWOff.ui32Deinit) );
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.ui32FileIdx( %d )",
+ g_PDumpParameters.ui32FileIdx) );
+
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpCtrl( %p ) bInitPhaseActive( %d ) ui32Flags( %x )",
+ &g_PDumpCtrl, g_PDumpCtrl.bInitPhaseActive, g_PDumpCtrl.ui32Flags) );
+ PVR_LOG(("--- PDUMP COMMON: ui32DefaultCapMode( %d ) ui32CurrentFrame( %d )",
+ g_PDumpCtrl.ui32DefaultCapMode, g_PDumpCtrl.ui32CurrentFrame) );
+ PVR_LOG(("--- PDUMP COMMON: sCaptureRange.ui32Start( %x ) sCaptureRange.ui32End( %x ) sCaptureRange.ui32Interval( %u )",
+ g_PDumpCtrl.sCaptureRange.ui32Start, g_PDumpCtrl.sCaptureRange.ui32End, g_PDumpCtrl.sCaptureRange.ui32Interval) );
+ PVR_LOG(("--- PDUMP COMMON: bCaptureOn( %d ) bSuspended( %d ) bInPowerTransition( %d )",
+ g_PDumpCtrl.bCaptureOn, g_PDumpCtrl.bSuspended, g_PDumpCtrl.bInPowerTransition) );
+
+ if (bDumpOSLayerState)
+ {
+ PDumpOSDumpState();
+ }
+}
+#endif
+
+
+PVRSRV_ERROR PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+ PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
+{
+ PDUMP_CONNECTION_DATA *psPDumpConnectionData;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(ppsPDumpConnectionData != NULL);
+
+ psPDumpConnectionData = OSAllocMem(sizeof(*psPDumpConnectionData));
+ if (psPDumpConnectionData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ eError = OSLockCreate(&psPDumpConnectionData->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lockcreate;
+ }
+
+ dllist_init(&psPDumpConnectionData->sListHead);
+ psPDumpConnectionData->ui32RefCount = 1;
+ psPDumpConnectionData->bLastInto = IMG_FALSE;
+ psPDumpConnectionData->ui32LastSetFrameNumber = FRAME_UNSET;
+ psPDumpConnectionData->bLastTransitionFailed = IMG_FALSE;
+
+ /*
+ * Although we don't take a ref count here, handle base destruction
+ * will ensure that any resource that might trigger us to do a
+ * Transition will have been freed before the sync blocks which
+ * are keeping the sync connection data alive.
+ */
+ psPDumpConnectionData->psSyncConnectionData = psSyncConnectionData;
+ *ppsPDumpConnectionData = psPDumpConnectionData;
+
+ return PVRSRV_OK;
+
+fail_lockcreate:
+ OSFreeMem(psPDumpConnectionData);
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+ _PDumpConnectionRelease(psPDumpConnectionData);
+}
+
+
+
+#else /* defined(PDUMP) */
+/* disable warning about empty module */
+#ifdef _WIN32
+#pragma warning (disable:4206)
+#endif
+#endif /* defined(PDUMP) */
+/*****************************************************************************
+ End of file (pdump_common.c)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title pdump functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main APIs for pdump functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PDUMP_KM_H_
+#define _PDUMP_KM_H_
+
+#if defined(PDUMP)
+#include <stdarg.h>
+#endif
+
+/* services/srvkm/include/ */
+#include "device.h"
+
+/* include/ */
+#include "pvrsrv_error.h"
+
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "connection_server.h"
+#include "sync_server.h"
+/*
+ * Pull in pdump flags from services include
+ */
+#include "pdump.h"
+#include "pdumpdefs.h"
+
+/* Define this to enable the PDUMP_HERE trace in the server */
+#undef PDUMP_TRACE
+
+#if defined(PDUMP_TRACE)
+#define PDUMP_HERE(a) if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a)))
+#define PDUMP_HEREA(a) PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a)))
+#else
+#define PDUMP_HERE(a) (void)(a);
+#define PDUMP_HEREA(a) (void)(a);
+#endif
+
+#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0
+#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0
+
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+/* counter increments each time debug write is called */
+extern IMG_UINT32 g_ui32EveryLineCounter;
+#endif
+
+typedef struct _PDUMP_CONNECTION_DATA_ PDUMP_CONNECTION_DATA;
+typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION)(void **pvData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags);
+
+#ifdef PDUMP
+
+/*! Macro used to record a panic in the PDump script stream */
+#define PDUMP_PANIC(_id, _msg) do \
+ { PVRSRV_ERROR _eE;\
+ _eE = PDumpPanic(((RGX_PDUMP_PANIC_ ## _id) & 0xFFFF), _msg, __FUNCTION__, __LINE__); \
+ PVR_LOG_IF_ERROR(_eE, "PDumpPanic");\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */
+#define PDUMP_ERROR(_err, _msg) do \
+ { (void) PDumpCaptureError((_err), (_msg), __FUNCTION__, __LINE__);\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ /* Shared across pdump_x files */
+ PVRSRV_ERROR PDumpInitCommon(void);
+ void PDumpDeInitCommon(void);
+ IMG_BOOL PDumpReady(void);
+ void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
+ size_t *puiZeroPageSize,
+ const IMG_CHAR **ppszZeroPageFilename);
+
+ void PDumpConnectionNotify(void);
+ void PDumpDisconnectionNotify(void);
+
+ void PDumpStopInitPhase(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient);
+ PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Frame);
+ PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32* pui32Frame);
+ PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize);
+
+
+ PVRSRV_ERROR PDumpReg32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64RegValue,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpRegLabelToReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegDst,
+ IMG_UINT32 ui32RegSrc,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpMemLabelToInternalVar(IMG_CHAR *pszInternalVar,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpWriteVarORValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpWriteVarANDValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpWriteVarSHRValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpInternalVarToReg32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpInternalVarToReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpRegLabelToInternalVar(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpSAW(IMG_CHAR *pszDevSpaceName,
+ IMG_UINT32 ui32HPOffsetBytes,
+ IMG_UINT32 ui32NumSaveBytes,
+ IMG_CHAR *pszOutfileName,
+ IMG_UINT32 ui32OutfileOffsetByte,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+ PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Flags,
+ PDUMP_POLL_OPERATOR eOperator);
+
+ IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags);
+
+ IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszPDumpRegName,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Address,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags,
+ IMG_CHAR* pszFormat,
+ ...) __printf(2, 3);
+
+ PVRSRV_ERROR PDumpCommentWithFlagsVA(IMG_UINT32 ui32Flags,
+ const IMG_CHAR * pszFormat,
+ va_list args);
+
+ PVRSRV_ERROR PDumpPanic(IMG_UINT32 ui32PanicNo,
+ IMG_CHAR* pszPanicMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline);
+
+ PVRSRV_ERROR PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo,
+ IMG_CHAR* pszErrorMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline);
+
+ PVRSRV_ERROR PDumpPDReg(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32dwData,
+ IMG_HANDLE hUniqueTag);
+ PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32Data,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag);
+
+ PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame);
+
+ PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing);
+
+ PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName,
+ const IMG_UINT32 dwRegOffset,
+ IMG_UINT32 ui32Flags);
+ PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName,
+ const IMG_UINT32 dwRegOffset,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
+ PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks);
+
+ PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32WPosVal,
+ IMG_UINT32 ui32PacketSize,
+ IMG_UINT32 ui32BufferSize,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace,
+ IMG_UINT32 ui32MMUCtxID,
+ IMG_UINT32 ui32RegionID,
+ IMG_BOOL bEnable,
+ IMG_UINT64 ui64VAddr,
+ IMG_UINT64 ui64LenBytes,
+ IMG_UINT32 ui32XStride,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpCreateLockKM(void);
+ void PDumpDestroyLockKM(void);
+ void PDumpLock(void);
+ void PDumpUnlock(void);
+
+ PVRSRV_ERROR PDumpIfKM(IMG_CHAR *pszPDumpCond);
+ PVRSRV_ERROR PDumpElseKM(IMG_CHAR *pszPDumpCond);
+ PVRSRV_ERROR PDumpFiKM(IMG_CHAR *pszPDumpCond);
+
+ void PDumpPowerTransitionStart(void);
+ void PDumpPowerTransitionEnd(void);
+ IMG_BOOL PDumpInPowerTransition(void);
+ IMG_BOOL PDumpIsDumpSuspended(void);
+
+ /*!
+ * @name PDumpWriteParameter
+ * @brief General function for writing to PDump stream. Used
+ * mainly for memory dumps to parameter stream.
+ * Usually more convenient to use PDumpWriteScript below
+ * for the script stream.
+ * @param psui8Data - data to write
+ * @param ui32Size - size of write
+ * @param ui32Flags - PDump flags
+ * @param pui32FileOffset - on return contains the file offset to
+ * the start of the parameter data
+ * @param aszFilenameStr - pointer to at least a 20 char buffer to
+ * return the parameter filename
+ * @return error
+ */
+ PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *psui8Data, IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Flags, IMG_UINT32* pui32FileOffset,
+ IMG_CHAR* aszFilenameStr);
+
+ /*!
+ * @name PDumpWriteScript
+ * @brief Write an PDumpOS created string to the "script" output stream
+ * @param hString - PDump OS layer handle of string buffer to write
+ * @param ui32Flags - PDump flags
+ * @return IMG_TRUE on success.
+ */
+ IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags);
+
+ /*
+ PDumpWriteShiftedMaskedValue():
+
+ loads the "reference" address into an internal PDump register,
+ optionally shifts it right,
+ optionally shifts it left,
+ optionally masks it
+ then finally writes the computed value to the given destination address
+
+ i.e. it emits pdump language equivalent to this expression:
+
+ dest = ((&ref) >> SHRamount << SHLamount) & MASK
+ */
+extern PVRSRV_ERROR
+PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName,
+ const IMG_CHAR *pszDestSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiDestOffset,
+ const IMG_CHAR *pszRefRegspaceName,
+ const IMG_CHAR *pszRefSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiRefOffset,
+ IMG_UINT32 uiSHRAmount,
+ IMG_UINT32 uiSHLAmount,
+ IMG_UINT32 uiMask,
+ IMG_DEVMEM_SIZE_T uiWordSize,
+ IMG_UINT32 uiPDumpFlags);
+
+ /*
+ PDumpWriteSymbAddress():
+
+ writes the address of the "reference" to the offset given
+ */
+extern PVRSRV_ERROR
+PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName,
+ IMG_DEVMEM_OFFSET_T uiDestOffset,
+ const IMG_CHAR *pszRefSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiRefOffset,
+ const IMG_CHAR *pszPDumpDevName,
+ IMG_UINT32 ui32WordSize,
+ IMG_UINT32 ui32AlignShift,
+ IMG_UINT32 ui32Shift,
+ IMG_UINT32 uiPDumpFlags);
+
+/* Register the connection with the PDump subsystem */
+extern PVRSRV_ERROR PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+ PDUMP_CONNECTION_DATA **ppsPDumpConnectionData);
+
+/* Unregister the connection with the PDump subsystem */
+extern void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData);
+
+/* Register for notification of PDump Transition into/out of capture range */
+extern PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+ PFN_PDUMP_TRANSITION pfnCallback,
+ void *hPrivData,
+ void **ppvHandle);
+
+/* Unregister notification of PDump Transition */
+extern void PDumpUnregisterTransitionCallback(void *pvHandle);
+
+/* Notify PDump of a Transition into/out of capture range */
+extern PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags);
+
+/* Wires-up a MIPS TLB in the page table*/
+extern PVRSRV_ERROR PdumpWireUpMipsTLB(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32AllocationFlags,
+ IMG_UINT32 ui32Flags);
+
+/*Invalidate a MIPS TLB in the page table */
+PVRSRV_ERROR PdumpInvalidateMipsTLB(PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32MipsTLBValidClearMask,
+ IMG_UINT32 ui32Flags);
+
+
+
+ #define PDUMP_LOCK PDumpLock
+ #define PDUMP_UNLOCK PDumpUnlock
+
+ #define PDUMPINIT PDumpInitCommon
+ #define PDUMPDEINIT PDumpDeInitCommon
+ #define PDUMPREG32 PDumpReg32
+ #define PDUMPREG64 PDumpReg64
+ #define PDUMPREGREAD32 PDumpRegRead32
+ #define PDUMPREGREAD64 PDumpRegRead64
+ #define PDUMPCOMMENT(...) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, __VA_ARGS__)
+ #define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags
+ #define PDUMPREGPOL PDumpRegPolKM
+ #define PDUMPPDREG PDumpPDReg
+ #define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags
+ #define PDUMPREGBASEDCBP PDumpRegBasedCBP
+ #define PDUMPENDINITPHASE PDumpStopInitPhase
+ #define PDUMPIDLWITHFLAGS PDumpIDLWithFlags
+ #define PDUMPIDL PDumpIDL
+ #define PDUMPPOWCMDSTART PDumpPowerTransitionStart
+ #define PDUMPPOWCMDEND PDumpPowerTransitionEnd
+ #define PDUMPPOWCMDINTRANS PDumpInPowerTransition
+ #define PDUMPIF PDumpIfKM
+ #define PDUMPELSE PDumpElseKM
+ #define PDUMPFI PDumpFiKM
+#else
+ /*
+ We should be clearer about which functions can be called
+ across the bridge as this looks rather unbalanced
+ */
+
+/*! Macro used to record a panic in the PDump script stream */
+#define PDUMP_PANIC(_id, _msg) ((void)0);
+
+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */
+#define PDUMP_ERROR(_err, _msg) ((void)0);
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpInitCommon)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpInitCommon(void)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpConnectionNotify)
+#endif
+static INLINE void
+PDumpConnectionNotify(void)
+{
+ return;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpDisconnectionNotify)
+#endif
+static INLINE void
+PDumpDisconnectionNotify(void)
+{
+ return;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCreateLockKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCreateLockKM(void)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpDestroyLockKM)
+#endif
+static INLINE void
+PDumpDestroyLockKM(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpLock)
+#endif
+static INLINE void
+PDumpLock(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnlock)
+#endif
+static INLINE void
+PDumpUnlock(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpStopInitPhase)
+#endif
+static INLINE void
+PDumpStopInitPhase(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient)
+{
+ PVR_UNREFERENCED_PARAMETER(bPDumpClient);
+ PVR_UNREFERENCED_PARAMETER(bInitClient);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpSetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32Frame)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(ui32Frame);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32* pui32Frame)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(pui32Frame);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCommentKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+ PVR_UNREFERENCED_PARAMETER(pszComment);
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+ return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCommentKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32Mode);
+ PVR_UNREFERENCED_PARAMETER(ui32Start);
+ PVR_UNREFERENCED_PARAMETER(ui32End);
+ PVR_UNREFERENCED_PARAMETER(ui32Interval);
+ PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize);
+
+ return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPanic)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpPanic(IMG_UINT32 ui32PanicNo,
+ IMG_CHAR* pszPanicMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32PanicNo);
+ PVR_UNREFERENCED_PARAMETER(pszPanicMsg);
+ PVR_UNREFERENCED_PARAMETER(pszPPFunc);
+ PVR_UNREFERENCED_PARAMETER(ui32PPline);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCaptureError)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo,
+ IMG_CHAR* pszErrorMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32ErrorNo);
+ PVR_UNREFERENCED_PARAMETER(pszErrorMsg);
+ PVR_UNREFERENCED_PARAMETER(pszPPFunc);
+ PVR_UNREFERENCED_PARAMETER(ui32PPline);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsLastCaptureFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame)
+{
+ *pbIsLastCaptureFrame = IMG_FALSE;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsCaptureFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing)
+{
+ *bIsCapturing = IMG_FALSE;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpBitmapKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Width);
+ PVR_UNREFERENCED_PARAMETER(ui32Height);
+ PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes);
+ PVR_UNREFERENCED_PARAMETER(sDevBaseAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(ePixelFormat);
+ PVR_UNREFERENCED_PARAMETER(ui32AddrMode);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterConnection)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+ PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSyncConnectionData);
+ PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterConnection)
+#endif
+static INLINE
+void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterTransitionCallback)
+#endif
+static INLINE
+PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+ PFN_PDUMP_TRANSITION pfnCallback,
+ void *hPrivData,
+ void **ppvHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+ PVR_UNREFERENCED_PARAMETER(pfnCallback);
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+ PVR_UNREFERENCED_PARAMETER(ppvHandle);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterTransitionCallback)
+#endif
+static INLINE
+void PDumpUnregisterTransitionCallback(void *pvHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(pvHandle);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpTransition)
+#endif
+static INLINE
+PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+ PVR_UNREFERENCED_PARAMETER(bInto);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+ #if defined WIN32
+ #define PDUMPINIT PDumpInitCommon
+ #define PDUMPDEINIT(...) / ## * PDUMPDEINIT(__VA_ARGS__) * ## /
+ #define PDUMPREG32(...) / ## * PDUMPREG32(__VA_ARGS__) * ## /
+ #define PDUMPREG64(...) / ## * PDUMPREG64(__VA_ARGS__) * ## /
+ #define PDUMPREGREAD32(...) / ## * PDUMPREGREAD32(__VA_ARGS__) * ## /
+ #define PDUMPREGREAD64(...) / ## * PDUMPREGREAD64(__VA_ARGS__) * ## /
+ #define PDUMPCOMMENT(...) / ## * PDUMPCOMMENT(__VA_ARGS__) * ## /
+ #define PDUMPREGPOL(...) / ## * PDUMPREGPOL(__VA_ARGS__) * ## /
+ #define PDUMPPDREG(...) / ## * PDUMPPDREG(__VA_ARGS__) * ## /
+ #define PDUMPPDREGWITHFLAGS(...) / ## * PDUMPPDREGWITHFLAGS(__VA_ARGS__) * ## /
+ #define PDUMPSYNC(...) / ## * PDUMPSYNC(__VA_ARGS__) * ## /
+ #define PDUMPCOPYTOMEM(...) / ## * PDUMPCOPYTOMEM(__VA_ARGS__) * ## /
+ #define PDUMPWRITE(...) / ## * PDUMPWRITE(__VA_ARGS__) * ## /
+ #define PDUMPCBP(...) / ## * PDUMPCBP(__VA_ARGS__) * ## /
+ #define PDUMPREGBASEDCBP(...) / ## * PDUMPREGBASEDCBP(__VA_ARGS__) * ## /
+ #define PDUMPCOMMENTWITHFLAGS(...) / ## * PDUMPCOMMENTWITHFLAGS(__VA_ARGS__) * ## /
+ #define PDUMPMALLOCPAGESPHYS(...) / ## * PDUMPMALLOCPAGESPHYS(__VA_ARGS__) * ## /
+ #define PDUMPENDINITPHASE(...) / ## * PDUMPENDINITPHASE(__VA_ARGS__) * ## /
+ #define PDUMPMSVDXREG(...) / ## * PDUMPMSVDXREG(__VA_ARGS__) * ## /
+ #define PDUMPMSVDXREGWRITE(...) / ## * PDUMPMSVDXREGWRITE(__VA_ARGS__) * ## /
+ #define PDUMPMSVDXREGREAD(...) / ## * PDUMPMSVDXREGREAD(__VA_ARGS__) * ## /
+ #define PDUMPMSVDXPOLEQ(...) / ## * PDUMPMSVDXPOLEQ(__VA_ARGS__) * ## /
+ #define PDUMPMSVDXPOL(...) / ## * PDUMPMSVDXPOL(__VA_ARGS__) * ## /
+ #define PDUMPIDLWITHFLAGS(...) / ## * PDUMPIDLWITHFLAGS(__VA_ARGS__) * ## /
+ #define PDUMPIDL(...) / ## * PDUMPIDL(__VA_ARGS__) * ## /
+ #define PDUMPPOWCMDSTART(...) / ## * PDUMPPOWCMDSTART(__VA_ARGS__) * ## /
+ #define PDUMPPOWCMDEND(...) / ## * PDUMPPOWCMDEND(__VA_ARGS__) * ## /
+ #define PDUMP_LOCK / ## * PDUMP_LOCK(__VA_ARGS__) * ## /
+ #define PDUMP_UNLOCK / ## * PDUMP_UNLOCK(__VA_ARGS__) * ## /
+ #else
+ #if defined LINUX || defined GCC_IA32 || defined GCC_ARM || defined __QNXNTO__ || defined(INTEGRITY_OS)
+ #define PDUMPINIT PDumpInitCommon
+ #define PDUMPDEINIT(args...)
+ #define PDUMPREG32(args...)
+ #define PDUMPREG64(args...)
+ #define PDUMPREGREAD32(args...)
+ #define PDUMPREGREAD64(args...)
+ #define PDUMPCOMMENT(args...)
+ #define PDUMPREGPOL(args...)
+ #define PDUMPPDREG(args...)
+ #define PDUMPPDREGWITHFLAGS(args...)
+ #define PDUMPSYNC(args...)
+ #define PDUMPCOPYTOMEM(args...)
+ #define PDUMPWRITE(args...)
+ #define PDUMPREGBASEDCBP(args...)
+ #define PDUMPCOMMENTWITHFLAGS(args...)
+ #define PDUMPENDINITPHASE(args...)
+ #define PDUMPIDLWITHFLAGS(args...)
+ #define PDUMPIDL(args...)
+ #define PDUMPPOWCMDSTART(args...)
+ #define PDUMPPOWCMDEND(args...)
+ #define PDUMP_LOCK(args...)
+ #define PDUMP_UNLOCK(args...)
+
+ #else
+ #error Compiler not specified
+ #endif
+ #endif
+#endif
+
+
+#endif /* _PDUMP_KM_H_ */
+
+/******************************************************************************
+ End of file (pdump_km.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title MMU PDump functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common PDump (MMU specific) functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if defined (PDUMP)
+
+#include "img_types.h"
+#include "pdump_mmu.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#define MAX_PDUMP_MMU_CONTEXTS (10)
+static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1<<MAX_PDUMP_MMU_CONTEXTS)-1;
+
+
+#define MMUPX_FMT(X) ((X<3) ? ((X<2) ? "MMUPT_\0" : "MMUPD_\0") : "MMUPC_\0")
+#define MIPSMMUPX_FMT(X) ((X<3) ? ((X<2) ? "MIPSMMUPT_\0" : "MIPSMMUPD_\0") : "MIPSMMUPC_\0")
+
+
+/* Array used to look-up debug strings from MMU_LEVEL */
+static IMG_CHAR ai8MMULevelStringLookup[MMU_LEVEL_LAST][15] =
+ {
+ "MMU_LEVEL_0",
+ "PAGE_TABLE",
+ "PAGE_DIRECTORY",
+ "PAGE_CATALOGUE",
+ };
+
+static PVRSRV_ERROR
+_ContiguousPDumpBytes(const IMG_CHAR *pszSymbolicName,
+ IMG_UINT32 ui32SymAddrOffset,
+ IMG_BOOL bFlush,
+ IMG_UINT32 uiNumBytes,
+ void *pvBytes,
+ IMG_UINT32 ui32Flags)
+{
+ static const IMG_CHAR *pvBeyondLastPointer;
+ static const IMG_CHAR *pvBasePointer;
+ static IMG_UINT32 ui32BeyondLastOffset;
+ static IMG_UINT32 ui32BaseOffset;
+ static IMG_UINT32 uiAccumulatedBytes = 0;
+ IMG_UINT32 ui32ParamOutPos;
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_AND_FILE_STRING();
+ PVR_UNREFERENCED_PARAMETER(ui32MaxLenFileName);
+
+ /* Caller has PDUMP_LOCK */
+
+ if (!bFlush && uiAccumulatedBytes > 0)
+ {
+ /* do some tests for contiguity. If it fails, we flush anyway */
+
+ if (pvBeyondLastPointer != pvBytes ||
+ ui32SymAddrOffset != ui32BeyondLastOffset
+ /* NB: ought to check that symbolic name agrees too, but
+ we know this always to be the case in the current use-case */
+ )
+ {
+ bFlush = IMG_TRUE;
+ }
+ }
+
+ /* Flush if necessary */
+ if (bFlush && uiAccumulatedBytes > 0)
+ {
+ eErr = PDumpWriteParameter((IMG_UINT8 *)(uintptr_t)pvBasePointer,
+ uiAccumulatedBytes, ui32Flags,
+ &ui32ParamOutPos, pszFileName);
+ if (eErr == PVRSRV_OK)
+ {
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript,
+ "LDB %s:0x%X 0x%X 0x%X %s",
+ /* dest */
+ pszSymbolicName,
+ ui32BaseOffset,
+ /* size */
+ uiAccumulatedBytes,
+ /* file offset */
+ ui32ParamOutPos,
+ /* filename */
+ pszFileName);
+ PVR_LOGG_IF_ERROR(eErr, "PDumpOSBufprintf", ErrOut);
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ }
+ else if (eErr != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+ {
+ PVR_LOGG_IF_ERROR(eErr, "PDumpWriteParameter", ErrOut);
+ }
+ else
+ {
+ /* else Write to parameter file prevented under the flags and
+ * current state of the driver so skip write to script and error IF.
+ */
+ eErr = PVRSRV_OK;
+ }
+
+ uiAccumulatedBytes = 0;
+ }
+
+
+ /* Initialise offsets and pointers if necessary */
+ if (uiAccumulatedBytes == 0)
+ {
+ ui32BaseOffset = ui32BeyondLastOffset = ui32SymAddrOffset;
+ pvBeyondLastPointer = pvBasePointer = (const IMG_CHAR *)pvBytes;
+ }
+
+ /* Accumulate some bytes */
+ ui32BeyondLastOffset += uiNumBytes;
+ pvBeyondLastPointer += uiNumBytes;
+ uiAccumulatedBytes += uiNumBytes;
+
+ErrOut:
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUMalloc
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUMalloc(const IMG_CHAR *pszPDumpDevName,
+ MMU_LEVEL eMMULevel,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Align,
+ PDUMP_MMU_TYPE eMMUType)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+ IMG_UINT64 ui64SymbolicAddr;
+ IMG_CHAR *pszMMUPX;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (eMMULevel >= MMU_LEVEL_LAST)
+ {
+ eErr = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+
+ /*
+ Write a comment to the PDump2 script streams indicating the memory allocation
+ */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "-- MALLOC :%s:%s Size=0x%08X Alignment=0x%08X DevPAddr=0x%08llX",
+ pszPDumpDevName,
+ ai8MMULevelStringLookup[eMMULevel],
+ ui32Size,
+ ui32Align,
+ psDevPAddr->uiAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ /*
+ construct the symbolic address
+ */
+ ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr;
+
+ /*
+ Write to the MMU script stream indicating the memory allocation
+ */
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(eMMULevel);
+ }
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:%s%016llX 0x%X 0x%X",
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64SymbolicAddr,
+ ui32Size,
+ ui32Align
+ /* don't need this sDevPAddr.uiAddr*/);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUFree
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFree(const IMG_CHAR *pszPDumpDevName,
+ MMU_LEVEL eMMULevel,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ PDUMP_MMU_TYPE eMMUType)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_UINT64 ui64SymbolicAddr;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+ IMG_CHAR *pszMMUPX;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (eMMULevel >= MMU_LEVEL_LAST)
+ {
+ eErr = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+ /*
+ Write a comment to the PDUMP2 script streams indicating the memory free
+ */
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:%s",
+ pszPDumpDevName, ai8MMULevelStringLookup[eMMULevel]);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ /*
+ construct the symbolic address
+ */
+ ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr;
+
+ /*
+ Write to the MMU script stream indicating the memory free
+ */
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(eMMULevel);
+ }
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:%s%016llX",
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64SymbolicAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUMalloc2
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUMalloc2(const IMG_CHAR *pszPDumpDevName,
+ const IMG_CHAR *pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Align)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ /*
+ Write a comment to the PDump2 script streams indicating the memory allocation
+ */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "-- MALLOC :%s:%s Size=0x%08X Alignment=0x%08X\n",
+ pszPDumpDevName,
+ pszTableType,
+ ui32Size,
+ ui32Align);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ /*
+ Write to the MMU script stream indicating the memory allocation
+ */
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:%s 0x%X 0x%X\n",
+ pszPDumpDevName,
+ pszSymbolicAddr,
+ ui32Size,
+ ui32Align
+ /* don't need this sDevPAddr.uiAddr*/);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUFree2
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFree2(const IMG_CHAR *pszPDumpDevName,
+ const IMG_CHAR *pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+ const IMG_CHAR *pszSymbolicAddr)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ /*
+ Write a comment to the PDUMP2 script streams indicating the memory free
+ */
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:%s\n",
+ pszPDumpDevName, pszTableType);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ /*
+ Write to the MMU script stream indicating the memory free
+ */
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:%s\n",
+ pszPDumpDevName,
+ pszSymbolicAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ return eErr;
+}
+
+/*******************************************************************************************************
+ * Function Name : PDumpPTBaseObjectToMem64
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a memory write from the baseobject
+ * for MIPS MMU device type
+********************************************************************************************************/
+PVRSRV_ERROR PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags,
+ MMU_LEVEL eMMULevel,
+ IMG_UINT64 ui64PxSymAddr)
+{
+
+ IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+ uiLogicalOffsetDest,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameDest,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameDest,
+ &uiPDumpSymbolicOffsetDest,
+ &uiNextSymNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:%s:0x%llX :%s:%s%016llX:0x%llX",aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest, pszPDumpDevName, MIPSMMUPX_FMT(eMMULevel), ui64PxSymAddr,
+ (IMG_UINT64)0);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUDumpPxEntries
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel,
+ const IMG_CHAR *pszPDumpDevName,
+ void *pvPxMem,
+ IMG_DEV_PHYADDR sPxDevPAddr,
+ IMG_UINT32 uiFirstEntry,
+ IMG_UINT32 uiNumEntries,
+ const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_UINT64 uiSymbolicAddrOffset,
+ IMG_UINT32 uiBytesPerEntry,
+ IMG_UINT32 uiLog2Align,
+ IMG_UINT32 uiAddrShift,
+ IMG_UINT64 uiAddrMask,
+ IMG_UINT64 uiPxEProtMask,
+ IMG_UINT64 uiDataValidEnable,
+ IMG_UINT32 ui32Flags,
+ PDUMP_MMU_TYPE eMMUType)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_UINT64 ui64PxSymAddr;
+ IMG_UINT64 ui64PxEValueSymAddr;
+ IMG_UINT32 ui32SymAddrOffset = 0;
+ IMG_UINT32 *pui32PxMem;
+ IMG_UINT64 *pui64PxMem;
+ IMG_BOOL bPxEValid;
+ IMG_UINT32 uiPxEIdx;
+ IMG_INT32 iShiftAmount;
+ IMG_CHAR *pszWrwSuffix = 0;
+ void *pvRawBytes = 0;
+ IMG_CHAR aszPxSymbolicAddr[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_UINT64 ui64PxE64;
+ IMG_UINT64 ui64Protflags64;
+ IMG_CHAR *pszMMUPX;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (!PDumpReady())
+ {
+ eErr = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ goto ErrOut;
+ }
+
+
+ if (PDumpIsDumpSuspended())
+ {
+ eErr = PVRSRV_OK;
+ goto ErrOut;
+ }
+
+ if (pvPxMem == NULL)
+ {
+ eErr = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ErrOut;
+ }
+
+
+ /*
+ create the symbolic address of the Px
+ */
+ ui64PxSymAddr = sPxDevPAddr.uiAddr;
+
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(eMMULevel);
+ }
+ OSSNPrintf(aszPxSymbolicAddr,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ ":%s:%s%016llX",
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64PxSymAddr);
+
+ PDUMP_LOCK();
+
+ /*
+ traverse PxEs, dumping entries
+ */
+ for(uiPxEIdx = uiFirstEntry;
+ uiPxEIdx < uiFirstEntry + uiNumEntries;
+ uiPxEIdx++)
+ {
+ /* Calc the symbolic address offset of the PxE location
+ This is what we have to add to the table address to get to a certain entry */
+ ui32SymAddrOffset = (uiPxEIdx*uiBytesPerEntry);
+
+ /* Calc the symbolic address of the PxE value and HW protflags */
+ /* just read it here */
+ switch(uiBytesPerEntry)
+ {
+ case 4:
+ {
+ pui32PxMem = pvPxMem;
+ ui64PxE64 = pui32PxMem[uiPxEIdx];
+ pszWrwSuffix = "";
+ pvRawBytes = &pui32PxMem[uiPxEIdx];
+ break;
+ }
+ case 8:
+ {
+ pui64PxMem = pvPxMem;
+ ui64PxE64 = pui64PxMem[uiPxEIdx];
+ pszWrwSuffix = "64";
+ pvRawBytes = &pui64PxMem[uiPxEIdx];
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpMMUPxEntries: error"));
+ ui64PxE64 = 0;
+ //!!error
+ break;
+ }
+ }
+
+ ui64PxEValueSymAddr = (ui64PxE64 & uiAddrMask) >> uiAddrShift << uiLog2Align;
+ ui64Protflags64 = ui64PxE64 & uiPxEProtMask;
+ bPxEValid = (ui64Protflags64 & uiDataValidEnable) ? IMG_TRUE : IMG_FALSE;
+ if(bPxEValid)
+ {
+ _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_TRUE,
+ 0, 0,
+ ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ iShiftAmount = (IMG_INT32)(uiLog2Align - uiAddrShift);
+
+ /* First put the symbolic representation of the actual
+ address of the entry into a pdump internal register */
+ /* MOV seemed cleaner here, since (a) it's 64-bit; (b) the
+ target is not memory. However, MOV cannot do the
+ "reference" of the symbolic address. Apparently WRW is
+ correct. */
+
+ if (pszSymbolicAddr == NULL)
+ {
+ pszSymbolicAddr = "none";
+ }
+
+ if (eMMULevel == MMU_LEVEL_1)
+ {
+ if (iShiftAmount == 0)
+ {
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:%s%016llX:0x%08X :%s:%s:0x%llx | 0x%llX\n",
+ pszWrwSuffix,
+ /* dest */
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64PxSymAddr,
+ ui32SymAddrOffset,
+ /* src */
+ pszMemspaceName,
+ pszSymbolicAddr,
+ uiSymbolicAddrOffset,
+ /* ORing prot flags */
+ ui64Protflags64);
+ }
+ else
+ {
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW :%s:$1 :%s:%s:0x%llx\n",
+ /* dest */
+ pszPDumpDevName,
+ /* src */
+ pszMemspaceName,
+ pszSymbolicAddr,
+ uiSymbolicAddrOffset);
+ }
+ }
+ else
+ {
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(eMMULevel - 1);
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(eMMULevel - 1);
+ }
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW :%s:$1 :%s:%s%016llX:0x0",
+ /* dest */
+ pszPDumpDevName,
+ /* src */
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64PxEValueSymAddr);
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(eMMULevel);
+ }
+ }
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ /* Now shift it to the right place, if necessary: */
+ /* Now shift that value down, by the "Align shift"
+ amount, to get it into units (ought to assert that
+ we get an integer - i.e. we don't shift any bits
+ off the bottom, don't know how to do PDUMP
+ assertions yet) and then back up by the right
+ amount to get it into the position of the field.
+ This is optimised into a single shift right by the
+ difference between the two. */
+ if (iShiftAmount > 0)
+ {
+ /* Page X Address is specified in units larger
+ than the position in the PxE would suggest. */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHR :%s:$1 :%s:$1 0x%X",
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ iShiftAmount);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ }
+ else if (iShiftAmount < 0)
+ {
+ /* Page X Address is specified in units smaller
+ than the position in the PxE would suggest. */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHL :%s:$1 :%s:$1 0x%X",
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ -iShiftAmount);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ }
+
+ if (eMMULevel == MMU_LEVEL_1)
+ {
+ if( iShiftAmount != 0)
+ {
+ /* Now we can "or" in the protection flags */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "OR :%s:$1 :%s:$1 0x%llX",
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ ui64Protflags64);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:%s%016llX:0x%08X :%s:$1 ",
+ pszWrwSuffix,
+ /* dest */
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64PxSymAddr,
+ ui32SymAddrOffset,
+ /* src */
+ pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ }
+ }
+ else
+ {
+ /* Now we can "or" in the protection flags */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "OR :%s:$1 :%s:$1 0x%llX",
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ ui64Protflags64);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ /* Finally, we write the register into the actual PxE */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:%s%016llX:0x%08X :%s:$1",
+ pszWrwSuffix,
+ /* dest */
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64PxSymAddr,
+ ui32SymAddrOffset,
+ /* src */
+ pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ }
+ }
+ else
+ {
+ /* If the entry was "invalid", simply write the actual
+ value found to the memory location */
+ eErr = _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_FALSE,
+ uiBytesPerEntry, pvRawBytes,
+ ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ }
+ }
+
+ /* flush out any partly accumulated stuff for LDB */
+ _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_TRUE,
+ 0, 0,
+ ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : _PdumpAllocMMUContext
+ * Inputs : pui32MMUContextID
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : pdump util to allocate MMU contexts
+**************************************************************************/
+static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID)
+{
+ IMG_UINT32 i;
+
+ /* there are MAX_PDUMP_MMU_CONTEXTS contexts available, find one */
+ for(i=0; i<MAX_PDUMP_MMU_CONTEXTS; i++)
+ {
+ if((guiPDumpMMUContextAvailabilityMask & (1U << i)))
+ {
+ /* mark in use */
+ guiPDumpMMUContextAvailabilityMask &= ~(1U << i);
+ *pui32MMUContextID = i;
+ return PVRSRV_OK;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "_PdumpAllocMMUContext: no free MMU context ids"));
+
+ return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+
+/**************************************************************************
+ * Function Name : _PdumpFreeMMUContext
+ * Inputs : ui32MMUContextID
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : pdump util to free MMU contexts
+**************************************************************************/
+static PVRSRV_ERROR _PdumpFreeMMUContext(IMG_UINT32 ui32MMUContextID)
+{
+ if(ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS)
+ {
+ /* free the id */
+ PVR_ASSERT (!(guiPDumpMMUContextAvailabilityMask & (1U << ui32MMUContextID)));
+ guiPDumpMMUContextAvailabilityMask |= (1U << ui32MMUContextID);
+ return PVRSRV_OK;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "_PdumpFreeMMUContext: MMU context ids invalid"));
+
+ return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUAllocMMUContext
+ * Inputs :
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Alloc MMU Context
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUAllocMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+ IMG_DEV_PHYADDR sPCDevPAddr,
+ PDUMP_MMU_TYPE eMMUType,
+ IMG_UINT32 *pui32MMUContextID)
+{
+ IMG_UINT64 ui64PCSymAddr;
+ IMG_CHAR *pszMMUPX;
+
+ IMG_UINT32 ui32MMUContextID;
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ PDUMP_GET_SCRIPT_STRING();
+
+ eErr = _PdumpAllocMMUContext(&ui32MMUContextID);
+ if(eErr != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: _PdumpAllocMMUContext failed: %d",
+ __func__, eErr));
+ PVR_DBG_BREAK;
+ goto ErrOut;
+ }
+
+ /*
+ create the symbolic address of the PC
+ */
+ ui64PCSymAddr = sPCDevPAddr.uiAddr;
+
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(1);
+ /* Giving it a mock value until the Pdump player implements
+ the support for the MIPS microAptiv MMU*/
+ eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(3);
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "MMU :%s:v%d %d :%s:%s%016llX",
+ /* mmu context */
+ pszPDumpMemSpaceName,
+ ui32MMUContextID,
+ /* mmu type */
+ eMMUType,
+ /* PC base address */
+ pszPDumpMemSpaceName,
+ pszMMUPX,
+ ui64PCSymAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ PVR_DBG_BREAK;
+ goto ErrOut;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+ PDUMP_UNLOCK();
+
+ /* return the MMU Context ID */
+ *pui32MMUContextID = ui32MMUContextID;
+
+ErrOut:
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUFreeMMUContext
+ * Inputs :
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Free MMU Context
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFreeMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+ IMG_UINT32 ui32MMUContextID)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "-- Clear MMU Context for memory space %s", pszPDumpMemSpaceName);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "MMU :%s:v%d",
+ pszPDumpMemSpaceName,
+ ui32MMUContextID);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+
+ eErr = _PdumpFreeMMUContext(ui32MMUContextID);
+ if(eErr != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: _PdumpFreeMMUContext failed: %d",
+ __func__, eErr));
+ goto ErrUnlock;
+ }
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUActivateCatalog
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUActivateCatalog(const IMG_CHAR *pszPDumpRegSpaceName,
+ const IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 uiRegAddr,
+ const IMG_CHAR *pszPDumpPCSymbolicName)
+{
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (!PDumpReady())
+ {
+ eErr = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ goto ErrOut;
+ }
+
+
+ if (PDumpIsDumpSuspended())
+ {
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+ "-- Write Page Catalogue Address to %s",
+ pszPDumpRegName);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW :%s:0x%04X %s:0",
+ /* dest */
+ pszPDumpRegSpaceName,
+ uiRegAddr,
+ /* src */
+ pszPDumpPCSymbolicName);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+
+PVRSRV_ERROR
+PDumpMMUSAB(const IMG_CHAR *pszPDumpMemNamespace,
+ IMG_UINT32 uiPDumpMMUCtx,
+ IMG_DEV_VIRTADDR sDevAddrStart,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+
+ // "SAB :%s:v%x:0x%010llX 0x%08X 0x%08X %s.bin",
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (!PDumpReady())
+ {
+ eErr = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ goto ErrOut;
+ }
+
+
+ if (PDumpIsDumpSuspended())
+ {
+ eErr = PVRSRV_OK;
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:v%x:" IMG_DEV_VIRTADDR_FMTSPEC " "
+ IMG_DEVMEM_SIZE_FMTSPEC " "
+ "0x%x %s.bin\n",
+ pszPDumpMemNamespace,
+ uiPDumpMMUCtx,
+ sDevAddrStart.uiAddr,
+ uiSize,
+ uiFileOffset,
+ pszFilename);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32PDumpFlags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+/**************************************************************************
+ * Function Name : PdumpWireUpMipsTLB
+**************************************************************************/
+PVRSRV_ERROR PdumpWireUpMipsTLB(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32AllocationFlags,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource;
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameSource;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRSource,
+ uiLogicalOffsetSource,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameSource,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameSource,
+ &uiPDumpSymbolicOffsetSource,
+ &uiNextSymNameSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrOut;
+ }
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+ uiLogicalOffsetDest,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameDest,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameDest,
+ &uiPDumpSymbolicOffsetDest,
+ &uiNextSymNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:$1 :%s:%s:0x%llX", aszMemspaceNameSource,
+ aszMemspaceNameSource, aszSymbolicNameSource,
+ uiPDumpSymbolicOffsetSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :%s:$1 :%s:$1 0x6", aszMemspaceNameSource,
+ aszMemspaceNameSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "AND :%s:$1 :%s:$1 0x03FFFFC0", aszMemspaceNameSource,
+ aszMemspaceNameSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "OR :%s:$1 :%s:$1 0x%X", aszMemspaceNameSource,
+ aszMemspaceNameSource, ui32AllocationFlags);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%llX :%s:$1",aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest, aszMemspaceNameSource);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+/**************************************************************************
+ * Function Name : PdumpInvalidateMipsTLB
+**************************************************************************/
+PVRSRV_ERROR PdumpInvalidateMipsTLB(PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32MipsTLBValidClearMask,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+ uiLogicalOffsetDest,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameDest,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameDest,
+ &uiPDumpSymbolicOffsetDest,
+ &uiNextSymNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:$1 :%s:%s:0x%llX", aszMemspaceNameDest,
+ aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "AND :%s:$1 :%s:$1 0x%X", aszMemspaceNameDest,
+ aszMemspaceNameDest, ui32MipsTLBValidClearMask);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%llX :%s:$1",aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest, aszMemspaceNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+
+#endif /* #if defined (PDUMP) */
+
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Common MMU Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements basic low level control of MMU.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_MMU_H
+#define SRVKM_PDUMP_MMU_H
+
+/* services/server/include/ */
+#include "pdump_symbolicaddr.h"
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+
+/*
+ PDUMP MMU attributes
+*/
+typedef struct _PDUMP_MMU_ATTRIB_DEVICE_
+{
+ /* Per-Device Pdump attribs */
+
+ /*!< Pdump memory bank name */
+ IMG_CHAR *pszPDumpMemDevName;
+
+ /*!< Pdump register bank name */
+ IMG_CHAR *pszPDumpRegDevName;
+
+} PDUMP_MMU_ATTRIB_DEVICE;
+
+typedef struct _PDUMP_MMU_ATTRIB_CONTEXT_
+{
+ IMG_UINT32 ui32Dummy;
+} PDUMP_MMU_ATTRIB_CONTEXT;
+
+typedef struct _PDUMP_MMU_ATTRIB_HEAP_
+{
+ /* data page info */
+ IMG_UINT32 ui32DataPageMask;
+} PDUMP_MMU_ATTRIB_HEAP;
+
+typedef struct _PDUMP_MMU_ATTRIB_
+{
+ /* FIXME: would these be better as pointers rather than copies? */
+ struct _PDUMP_MMU_ATTRIB_DEVICE_ sDevice;
+ struct _PDUMP_MMU_ATTRIB_CONTEXT_ sContext;
+ struct _PDUMP_MMU_ATTRIB_HEAP_ sHeap;
+} PDUMP_MMU_ATTRIB;
+
+#if defined(PDUMP)
+ extern PVRSRV_ERROR PDumpMMUMalloc(const IMG_CHAR *pszPDumpDevName,
+ MMU_LEVEL eMMULevel,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Align,
+ PDUMP_MMU_TYPE eMMUType);
+
+ extern PVRSRV_ERROR PDumpMMUFree(const IMG_CHAR *pszPDumpDevName,
+ MMU_LEVEL eMMULevel,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ PDUMP_MMU_TYPE eMMUType);
+
+ extern PVRSRV_ERROR PDumpMMUMalloc2(const IMG_CHAR *pszPDumpDevName,
+ const IMG_CHAR *pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Align);
+
+ extern PVRSRV_ERROR PDumpMMUFree2(const IMG_CHAR *pszPDumpDevName,
+ const IMG_CHAR *pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+ const IMG_CHAR *pszSymbolicAddr);
+
+
+ extern PVRSRV_ERROR PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags,
+ MMU_LEVEL eMMULevel,
+ IMG_UINT64 ui64PxSymAddr);
+
+ extern PVRSRV_ERROR PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel,
+ const IMG_CHAR *pszPDumpDevName,
+ void *pvPxMem,
+ IMG_DEV_PHYADDR sPxDevPAddr,
+ IMG_UINT32 uiFirstEntry,
+ IMG_UINT32 uiNumEntries,
+ const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_UINT64 uiSymbolicAddrOffset,
+ IMG_UINT32 uiBytesPerEntry,
+ IMG_UINT32 uiLog2Align,
+ IMG_UINT32 uiAddrShift,
+ IMG_UINT64 uiAddrMask,
+ IMG_UINT64 uiPxEProtMask,
+ IMG_UINT64 uiDataValidEnable,
+ IMG_UINT32 ui32Flags,
+ PDUMP_MMU_TYPE eMMUType);
+
+
+ extern PVRSRV_ERROR PDumpMMUAllocMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+ IMG_DEV_PHYADDR sPCDevPAddr,
+ PDUMP_MMU_TYPE eMMUType,
+ IMG_UINT32 *pui32MMUContextID);
+
+ extern PVRSRV_ERROR PDumpMMUFreeMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+ IMG_UINT32 ui32MMUContextID);
+
+ extern PVRSRV_ERROR PDumpMMUActivateCatalog(const IMG_CHAR *pszPDumpRegSpaceName,
+ const IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 uiRegAddr,
+ const IMG_CHAR *pszPDumpPCSymbolicName);
+
+ /* FIXME: split to separate file... (debatable whether this is anything to do with MMU) */
+extern PVRSRV_ERROR
+PDumpMMUSAB(const IMG_CHAR *pszPDumpMemNamespace,
+ IMG_UINT32 uiPDumpMMUCtx,
+ IMG_DEV_VIRTADDR sDevAddrStart,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset,
+ IMG_UINT32 ui32PDumpFlags);
+
+ #define PDUMP_MMU_MALLOC_DP(pszPDumpMemDevName, aszSymbolicAddr, ui32Size, ui32Align) \
+ PDumpMMUMalloc2(pszPDumpMemDevName, "DATA_PAGE", aszSymbolicAddr, ui32Size, ui32Align)
+ #define PDUMP_MMU_FREE_DP(pszPDumpMemDevName, aszSymbolicAddr) \
+ PDumpMMUFree2(pszPDumpMemDevName, "DATA_PAGE", aszSymbolicAddr)
+
+ #define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID) \
+ PDumpMMUAllocMMUContext(pszPDumpMemDevName, \
+ sPCDevPAddr, \
+ eMMUType, \
+ puiPDumpCtxID)
+
+ #define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID) \
+ PDumpMMUFreeMMUContext(pszPDumpMemDevName, uiPDumpCtxID)
+#else
+
+ #define PDUMP_MMU_MALLOC_DP(pszPDumpMemDevName, pszDevPAddr, ui32Size, ui32Align) \
+ ((void)0)
+ #define PDUMP_MMU_FREE_DP(pszPDumpMemDevName, psDevPAddr) \
+ ((void)0)
+ #define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID) \
+ ((void)0)
+ #define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID) \
+ ((void)0)
+
+#endif // defined(PDUMP)
+
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS-independent interface to helper functions for pdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stdarg.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_device_types.h"
+
+
+/* FIXME
+ * Some OSes (WinXP,CE) allocate the string on the stack, but some
+ * (Linux) use a global variable/lock instead.
+ * Would be good to use the same across all OSes.
+ *
+ * A handle is returned which represents IMG_CHAR* type on all OSes.
+ *
+ * The allocated buffer length is also returned on OSes where it's
+ * supported (e.g. Linux).
+ */
+#define MAX_PDUMP_STRING_LENGTH (256)
+#if defined(WIN32)
+#define PDUMP_GET_SCRIPT_STRING() \
+ IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_HANDLE hScript = (IMG_HANDLE)pszScript;
+
+#define PDUMP_GET_MSG_STRING() \
+ IMG_CHAR pszMsg[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \
+ IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_CHAR pszFileName[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLenScript = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_UINT32 ui32MaxLenFileName = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_HANDLE hScript = (IMG_HANDLE)pszScript;
+
+#else /* WIN32 */
+
+#if defined(__QNXNTO__)
+
+#define PDUMP_GET_SCRIPT_STRING() \
+ IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_HANDLE hScript = (IMG_HANDLE)pszScript;
+
+#define PDUMP_GET_MSG_STRING() \
+ IMG_CHAR pszMsg[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \
+ IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_CHAR pszFileName[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLenScript = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_UINT32 ui32MaxLenFileName = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_HANDLE hScript = (IMG_HANDLE)pszScript;
+
+#else /* __QNXNTO__ */
+
+ /*
+ * Linux
+ */
+#define PDUMP_GET_SCRIPT_STRING() \
+ IMG_HANDLE hScript; \
+ IMG_UINT32 ui32MaxLen; \
+ PVRSRV_ERROR eErrorPDump; \
+ eErrorPDump = PDumpOSGetScriptString(&hScript, &ui32MaxLen);\
+ PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetScriptString");
+
+#define PDUMP_GET_MSG_STRING() \
+ IMG_CHAR *pszMsg; \
+ IMG_UINT32 ui32MaxLen; \
+ PVRSRV_ERROR eErrorPDump; \
+ eErrorPDump = PDumpOSGetMessageString(&pszMsg, &ui32MaxLen);\
+ PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetMessageString");
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \
+ IMG_HANDLE hScript; \
+ IMG_CHAR *pszFileName; \
+ IMG_UINT32 ui32MaxLenScript; \
+ IMG_UINT32 ui32MaxLenFileName; \
+ PVRSRV_ERROR eErrorPDump; \
+ eErrorPDump = PDumpOSGetScriptString(&hScript, &ui32MaxLenScript);\
+ PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetScriptString");\
+ eErrorPDump = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLenFileName);\
+ PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetFilenameString");
+
+ /**************************************************************************/ /*!
+ @Function PDumpOSGetScriptString
+ @Description Get the handle of the PDump "script" buffer.
+ This function is only called if PDUMP is defined.
+ @Output phScript Handle of the PDump script buffer
+ @Output pui32MaxLen max length the script buffer can be
+ @Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+ PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, IMG_UINT32 *pui32MaxLen);
+
+ /**************************************************************************/ /*!
+ @Function PDumpOSGetMessageString
+ @Description Get the PDump "message" buffer.
+ This function is only called if PDUMP is defined.
+ @Output ppszMsg Pointer to the PDump message buffer
+ @Output pui32MaxLen max length the message buffer can be
+ @Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+ PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg, IMG_UINT32 *pui32MaxLen);
+
+ /**************************************************************************/ /*!
+ @Function PDumpOSGetFilenameString
+ @Description Get the PDump "filename" buffer.
+ This function is only called if PDUMP is defined.
+ @Output ppszFile Pointer to the PDump filename buffer
+ @Output pui32MaxLen max length the filename buffer can be
+ @Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+ PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, IMG_UINT32 *pui32MaxLen);
+
+#endif /* __QNXNTO__ */
+#endif /* WIN32 */
+
+
+/*
+ * PDump streams, channels, init and deinit routines (common to all OSes)
+ */
+
+typedef struct
+{
+ IMG_HANDLE hInit; /*!< Driver initialisation PDump stream */
+ IMG_HANDLE hMain; /*!< App framed PDump stream */
+ IMG_HANDLE hDeinit; /*!< Driver/HW de-initialisation PDump stream */
+} PDUMP_CHANNEL;
+
+/**************************************************************************/ /*!
+@Function PDumpOSInit
+@Description Reset the connection to vldbgdrv, then try to connect to
+ PDump streams. This function is only called if PDUMP is
+ defined.
+@Input psParam PDump channel to be used for logging
+ parameters
+@Input psScript PDump channel to be used for logging
+ commands / events
+@Output pui32InitCapMode The initial PDump capture mode.
+@Output ppszEnvComment Environment-specific comment that is
+ output when writing to the PDump
+ stream (this may be NULL).
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript,
+ IMG_UINT32* pui32InitCapMode, IMG_CHAR** ppszEnvComment);
+
+/**************************************************************************/ /*!
+@Function PDumpOSDeInit
+@Description Disconnect the PDump streams and close the connection to
+ vldbgdrv. This function is only called if PDUMP is defined.
+@Input psParam PDump parameter channel to be closed
+@Input psScript PDump command channel to be closed
+@Return None
+*/ /**************************************************************************/
+void PDumpOSDeInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript);
+
+/**************************************************************************/ /*!
+@Function PDumpOSSetSplitMarker
+@Description Inform the PDump client to start a new file at the given
+ marker. This function is only called if PDUMP is defined.
+@Input hStream handle of PDump stream
+@Input ui32Marker byte file position
+@Return IMG_TRUE
+*/ /**************************************************************************/
+IMG_BOOL PDumpOSSetSplitMarker(IMG_HANDLE hStream, IMG_UINT32 ui32Marker);
+
+/**************************************************************************/ /*!
+@Function PDumpOSDebugDriverWrite
+@Description Writes a given number of bytes from the specified buffer
+ to a PDump stream. This function is only called if PDUMP
+ is defined.
+@Input psStream handle of PDump stream to write into
+@Input pui8Data buffer to write data from
+@Input ui32BCount number of bytes to write
+@Return The number of bytes actually written (may be less than
+ ui32BCount if there is insufficient space in the target
+ PDump stream buffer)
+*/ /**************************************************************************/
+IMG_UINT32 PDumpOSDebugDriverWrite(IMG_HANDLE psStream,
+ IMG_UINT8 *pui8Data,
+ IMG_UINT32 ui32BCount);
+
+/*
+ * Define macro for processing variable args list in OS-independent
+ * manner. See e.g. PDumpCommentWithFlags().
+ */
+#define PDUMP_va_list va_list
+#define PDUMP_va_start va_start
+#define PDUMP_va_end va_end
+
+
+/**************************************************************************/ /*!
+@Function PDumpOSBufprintf
+@Description Printf to OS-specific PDump state buffer. This function is
+ only called if PDUMP is defined.
+@Input hBuf handle of buffer to write into
+@Input ui32ScriptSizeMax maximum size of data to write (chars)
+@Input pszFormat format string
+@Return None
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...) __printf(3, 4);
+
+/**************************************************************************/ /*!
+@Function PDumpOSDebugPrintf
+@Description Debug message during PDumping. This function is only called
+ if PDUMP is defined.
+@Input pszFormat format string
+@Return None
+*/ /**************************************************************************/
+void PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...) __printf(1, 2);
+
+/*
+ * Write into a IMG_CHAR* on all OSes. Can be allocated on the stack or heap.
+ */
+/**************************************************************************/ /*!
+@Function PDumpOSSprintf
+@Description Printf to IMG char array. This function is only called if
+ PDUMP is defined.
+@Input ui32ScriptSizeMax maximum size of data to write (chars)
+@Input pszFormat format string
+@Output pszComment char array to print into
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...) __printf(3, 4);
+
+/**************************************************************************/ /*!
+@Function PDumpOSVSprintf
+@Description Printf to IMG string using variable args (see stdarg.h).
+ This is necessary because the '...' notation does not
+ support nested function calls.
+ This function is only called if PDUMP is defined.
+@Input ui32ScriptSizeMax maximum size of data to write (chars)
+@Input pszFormat format string
+@Input vaArgs variable args structure (from stdarg.h)
+@Output pszMsg char array to print into
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszMsg, IMG_UINT32 ui32ScriptSizeMax, const IMG_CHAR* pszFormat, PDUMP_va_list vaArgs) __printf(3, 0);
+
+/**************************************************************************/ /*!
+@Function PDumpOSBuflen
+@Description Returns the length of the specified buffer (in chars).
+ This function is only called if PDUMP is defined.
+@Input hBuffer handle to buffer
+@Input ui32BufferSizeMax max size of buffer (chars)
+@Return The length of the buffer, will always be <= ui32BufferSizeMax
+*/ /**************************************************************************/
+IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
+
+/**************************************************************************/ /*!
+@Function PDumpOSVerifyLineEnding
+@Description Put line ending sequence at the end if it isn't already
+ there. This function is only called if PDUMP is defined.
+@Input hBuffer handle to buffer
+@Input ui32BufferSizeMax max size of buffer (chars)
+@Return None
+*/ /**************************************************************************/
+void PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
+
+/**************************************************************************/ /*!
+@Function PDumpOSReleaseExecution
+@Description OS function to switch to another process, to clear PDump
+ buffers.
+ This function can simply wrap OSReleaseThreadQuanta.
+ This function is only called if PDUMP is defined.
+@Return None
+*/ /**************************************************************************/
+void PDumpOSReleaseExecution(void);
+
+/**************************************************************************/ /*!
+@Function PDumpOSCreateLock
+@Description Create the global pdump lock. This function is only called
+ if PDUMP is defined.
+@Return None
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSCreateLock(void);
+
+/**************************************************************************/ /*!
+@Function PDumpOSDestroyLock
+@Description Destroy the global pdump lock This function is only called
+ if PDUMP is defined.
+@Return None
+*/ /**************************************************************************/
+void PDumpOSDestroyLock(void);
+
+/**************************************************************************/ /*!
+@Function PDumpOSLock
+@Description Acquire the global pdump lock. This function is only called
+ if PDUMP is defined.
+@Return None
+*/ /**************************************************************************/
+void PDumpOSLock(void);
+
+/**************************************************************************/ /*!
+@Function PDumpOSUnlock
+@Description Release the global pdump lock. This function is only called
+ if PDUMP is defined.
+@Return None
+*/ /**************************************************************************/
+void PDumpOSUnlock(void);
+
+/*!
+ * @name PDumpOSGetCtrlState
+ * @brief Retrieve some state from the debug driver or debug driver stream
+ */
+IMG_UINT32 PDumpOSGetCtrlState(IMG_HANDLE hDbgStream, IMG_UINT32 ui32StateID);
+
+/*!
+ * @name PDumpOSSetFrame
+ * @brief Set the current frame value mirrored in the debug driver
+ */
+void PDumpOSSetFrame(IMG_UINT32 ui32Frame);
+
+/*!
+ * @name PDumpOSAllowInitPhaseToComplete
+ * @brief Some platforms wish to control when the init phase is marked as
+ * complete depending on who is instructing it so.
+ */
+IMG_BOOL PDumpOSAllowInitPhaseToComplete(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient);
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Physmem PDump functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common PDump (PMR specific) functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if defined(PDUMP)
+
+#if defined(LINUX)
+#include <linux/ctype.h>
+#else
+#include <ctype.h>
+#endif
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pdump_physmem.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+
+/* #define MAX_PDUMP_MMU_CONTEXTS (10) */
+/* static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1<<MAX_PDUMP_MMU_CONTEXTS)-1; */
+
+
+struct _PDUMP_PHYSMEM_INFO_T_
+{
+ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+ IMG_UINT64 ui64Size;
+ IMG_UINT32 ui32Align;
+ IMG_UINT32 ui32SerialNum;
+};
+
+static IMG_BOOL _IsAllowedSym(IMG_CHAR sym)
+{
+ /* Numbers, Characters or '_' are allowed */
+ if (isalnum(sym) || sym == '_')
+ return IMG_TRUE;
+ else
+ return IMG_FALSE;
+}
+
+static IMG_BOOL _IsLowerCaseSym(IMG_CHAR sym)
+{
+ if (sym >= 'a' && sym <= 'z')
+ return IMG_TRUE;
+ else
+ return IMG_FALSE;
+}
+
+void PDumpMakeStringValid(IMG_CHAR *pszString,
+ IMG_UINT32 ui32StrLen)
+{
+ IMG_UINT32 i;
+ for (i = 0; i < ui32StrLen; i++)
+ {
+ if (_IsAllowedSym(pszString[i]))
+ {
+ if (_IsLowerCaseSym(pszString[i]))
+ pszString[i] = pszString[i]-32;
+ else
+ pszString[i] = pszString[i];
+ }
+ else
+ {
+ pszString[i] = '_';
+ }
+ }
+}
+
+/**************************************************************************
+ * Function Name : PDumpMalloc
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMalloc(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_UINT64 ui64Size,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_BOOL bForcePersistent,
+ IMG_HANDLE *phHandlePtr)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+ PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ psPDumpAllocationInfo = OSAllocMem(sizeof*psPDumpAllocationInfo);
+ PVR_ASSERT(psPDumpAllocationInfo != NULL);
+
+ if (bForcePersistent)
+ {
+ ui32Flags |= PDUMP_FLAGS_PERSISTENT;
+ }
+
+ /*
+ construct the symbolic address
+ */
+
+ OSSNPrintf(psPDumpAllocationInfo->aszSymbolicAddress,
+ sizeof(psPDumpAllocationInfo->aszSymbolicAddress)+sizeof(pszDevSpace),
+ ":%s:%s",
+ pszDevSpace,
+ pszSymbolicAddress);
+
+ /*
+ Write to the MMU script stream indicating the memory allocation
+ */
+ PDUMP_LOCK();
+ if (bInitialise)
+ {
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "CALLOC %s 0x%llX 0x%llX 0x%X\n",
+ psPDumpAllocationInfo->aszSymbolicAddress,
+ ui64Size,
+ uiAlign,
+ ui32InitValue);
+ }
+ else
+ {
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC %s 0x%llX 0x%llX\n",
+ psPDumpAllocationInfo->aszSymbolicAddress,
+ ui64Size,
+ uiAlign);
+ }
+
+ if(eError != PVRSRV_OK)
+ {
+ OSFreeMem(psPDumpAllocationInfo);
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ psPDumpAllocationInfo->ui64Size = ui64Size;
+ psPDumpAllocationInfo->ui32Align = TRUNCATE_64BITS_TO_32BITS(uiAlign);
+
+ *phHandlePtr = (IMG_HANDLE)psPDumpAllocationInfo;
+
+_return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpFree
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+ PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ psPDumpAllocationInfo = (PDUMP_PHYSMEM_INFO_T *)hPDumpAllocationInfoHandle;
+
+ /*
+ Write to the MMU script stream indicating the memory free
+ */
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE %s\n",
+ psPDumpAllocationInfo->aszSymbolicAddress);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ OSFreeMem(psPDumpAllocationInfo);
+
+_return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW32(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ PMR_VALUE32_FMTSPEC " ",
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset,
+ ui32Value);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW64(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW64 :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ PMR_VALUE64_FMTSPEC " ",
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset,
+ ui64Value);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRLDB(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "LDB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ IMG_DEVMEM_SIZE_FMTSPEC " "
+ PDUMP_FILEOFFSET_FMTSPEC " %s\n",
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset,
+ uiSize,
+ uiFileOffset,
+ pszFilename);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR PDumpPMRSAB(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFileName,
+ IMG_UINT32 uiFileOffset)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 uiPDumpFlags;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ uiPDumpFlags = 0;
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ IMG_DEVMEM_SIZE_FMTSPEC " "
+ "0x%08X %s.bin\n",
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset,
+ uiSize,
+ uiFileOffset,
+ pszFileName);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRPOL(const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 uiCount,
+ IMG_UINT32 uiDelay,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "POL :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ "0x%08X 0x%08X %d %d %d\n",
+ pszMemspaceName,
+ pszSymbolicName,
+ uiOffset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ uiCount,
+ uiDelay);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRCBP(const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PDUMP_FLAGS_T uiPDumpFlags = 0;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "CBP :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ IMG_DEVMEM_OFFSET_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC "\n",
+ pszMemspaceName,
+ pszSymbolicName,
+ uiReadOffset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpWriteBuffer(IMG_UINT8 *pcBuffer,
+ size_t uiNumBytes,
+ PDUMP_FLAGS_T uiPDumpFlags,
+ IMG_CHAR *pszFilenameOut,
+ size_t uiFilenameBufSz,
+ PDUMP_FILEOFFSET_T *puiOffsetOut)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(uiFilenameBufSz);
+
+ if (!PDumpReady())
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ }
+
+ PVR_ASSERT(uiNumBytes > 0);
+
+ /* PRQA S 3415 1 */ /* side effects desired */
+ if (PDumpIsDumpSuspended())
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_ALLOWED;
+ }
+
+ PVR_ASSERT(uiFilenameBufSz <= PDUMP_PARAM_MAX_FILE_NAME);
+
+ PDUMP_LOCK();
+
+ eError = PDumpWriteParameter(pcBuffer, uiNumBytes, uiPDumpFlags, puiOffsetOut, pszFilenameOut);
+
+ PDUMP_UNLOCK();
+
+ if ((eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED) && (eError != PVRSRV_OK))
+ {
+ PVR_LOGR_IF_ERROR(eError, "PDumpWriteParameter");
+ }
+ /* else Write to parameter file Ok or Prevented under the flags and
+ * current state of the driver so skip further writes and let caller know.
+ */
+ return eError;
+}
+
+#endif /* PDUMP */
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title pdump functions to assist with physmem allocations
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements basic low level control of MMU.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVSRV_PDUMP_PHYSMEM_H
+#define SRVSRV_PDUMP_PHYSMEM_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pmr.h"
+
+#define PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH 40
+#define PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH 60
+#define PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH (PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH)
+
+typedef struct _PDUMP_PHYSMEM_INFO_T_ PDUMP_PHYSMEM_INFO_T;
+
+#if defined(PDUMP)
+extern PVRSRV_ERROR
+PDumpMalloc(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_UINT64 ui64Size,
+ /* alignment is alignment of start of buffer _and_
+ minimum contiguity - i.e. smallest allowable
+ page-size. */
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_BOOL bForcePersistent,
+ IMG_HANDLE *phHandlePtr);
+
+extern
+PVRSRV_ERROR PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle);
+
+IMG_INTERNAL void
+PDumpMakeStringValid(IMG_CHAR *pszString,
+ IMG_UINT32 ui32StrLen);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpMalloc(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_UINT64 ui64Size,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_BOOL bForcePersistent,
+ IMG_HANDLE *phHandlePtr)
+{
+ PVR_UNREFERENCED_PARAMETER(pszDevSpace);
+ PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress);
+ PVR_UNREFERENCED_PARAMETER(ui64Size);
+ PVR_UNREFERENCED_PARAMETER(uiAlign);
+ PVR_UNREFERENCED_PARAMETER(bInitialise);
+ PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+ PVR_UNREFERENCED_PARAMETER(bForcePersistent);
+ PVR_UNREFERENCED_PARAMETER(phHandlePtr);
+ PVR_UNREFERENCED_PARAMETER(bForcePersistent);
+ return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+#define PMR_DEFAULT_PREFIX "PMR"
+#define PMR_SYMBOLICADDR_FMTSPEC "%s%llu_%llu_%s"
+#define PMR_MEMSPACE_FMTSPEC "%s"
+#define PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC "CC_%s"
+
+#if defined(PDUMP)
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
+ PDumpMalloc(pszPDumpMemDevName, PMR_OSALLOCPAGES_PREFIX, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr)
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+ PDumpFree(hHandle)
+#else
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
+ ((void)(*phHandlePtr=NULL))
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+ ((void)(0))
+#endif // defined(PDUMP)
+
+extern PVRSRV_ERROR
+PDumpPMRWRW32(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRWRW64(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRLDB(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRSAB(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFileName,
+ IMG_UINT32 uiFileOffset);
+
+/*
+ PDumpPMRPOL()
+
+ emits a POL to the PDUMP.
+*/
+extern PVRSRV_ERROR
+PDumpPMRPOL(const IMG_CHAR *pszMempaceName,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 uiCount,
+ IMG_UINT32 uiDelay,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRCBP(const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+
+/*
+ * PDumpWriteBuffer()
+ *
+ * writes a binary blob to the pdump param stream containing the
+ * current contents of the memory, and returns the filename and offset
+ * of where that blob is located (for use in a subsequent LDB, for
+ * example)
+ *
+ * Caller to provide buffer to receive filename, and declare the size
+ * of that buffer
+ */
+extern PVRSRV_ERROR
+PDumpWriteBuffer(IMG_UINT8 *pcBuffer,
+ size_t uiNumBytes,
+ PDUMP_FLAGS_T uiPDumpFlags,
+ IMG_CHAR *pszFilenameOut,
+ size_t uiFilenameBufSz,
+ PDUMP_FILEOFFSET_T *puiOffsetOut);
+
+#endif /* #ifndef SRVSRV_PDUMP_PHYSMEM_H */
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Abstraction of PDUMP symbolic address derivation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Allows pdump functions to derive symbolic addresses on-the-fly
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_SYMBOLICADDR_H
+#define SRVKM_PDUMP_SYMBOLICADDR_H
+
+#include "img_types.h"
+
+#include "pvrsrv_error.h"
+
+/* pdump symbolic addresses are generated on-the-fly with a callback */
+
+typedef PVRSRV_ERROR (*PVRSRV_SYMADDRFUNCPTR)(IMG_HANDLE hPriv, IMG_UINT32 uiOffset, IMG_CHAR *pszSymbolicAddr, IMG_UINT32 ui32SymbolicAddrLen, IMG_UINT32 *pui32NewOffset);
+
+#endif /* #ifndef SRVKM_PDUMP_SYMBOLICADDR_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title PDUMP definitions header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description PDUMP definitions header
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PDUMPDEFS_H__)
+#define __PDUMPDEFS_H__
+
+/*! PDump Pixel Format Enumeration */
+typedef enum _PDUMP_PIXEL_FORMAT_
+{
+ PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
+ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
+// PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
+ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
+ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
+ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
+ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
+ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
+ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
+ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
+ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
+ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
+ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
+ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
+ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
+ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
+ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46,
+ PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16F16 = 49,
+ PVRSRV_PDUMP_PIXEL_FORMAT_A4 = 50,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB2101010 = 51,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RSGSBS888 = 52,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32F32 = 53,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16 = 54,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32 = 55,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16 = 56,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32 = 57,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U8 = 58,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U8U8 = 59,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U16 = 60,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U16U16 = 61,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U16U16U16U16 = 62,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U32 = 63,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66,
+
+ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
+
+} PDUMP_PIXEL_FORMAT;
+
+/*! PDump addrmode */
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT 0
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_MASK 0x000000FF
+
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT 8
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE (1 << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT 12
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK 0x000FF000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT 20
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_MASK 0x00F00000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT 24
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT 28
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK 0xF0000000
+
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_STRIDE (0 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE1 (1 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE2 (2 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE3 (3 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE4 (4 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE5 (5 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE6 (6 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE7 (7 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_TWIDDLED (9 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED (11 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE (0 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT (1 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT (2 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_32X2_DIRECT (3 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT (4 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT (5 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE (6 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE (7 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR (1 << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_BASE (1 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_ENHANCED (2 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V2 (3 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_SURFACE (4 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_RESOURCE (5 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+
+
+/*! PDump Poll Operator */
+typedef enum _PDUMP_POLL_OPERATOR
+{
+ PDUMP_POLL_OPERATOR_EQUAL = 0,
+ PDUMP_POLL_OPERATOR_LESS = 1,
+ PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
+ PDUMP_POLL_OPERATOR_GREATER = 3,
+ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
+ PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
+} PDUMP_POLL_OPERATOR;
+
+
+#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 75 /*!< Max length of a pdump log file name */
+#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 350 /*!< Max length of a pdump comment */
+
+/*!
+ PDump MMU type
+ (Maps to values listed in "PowerVR Tools.Pdump2 Script Functions.doc" Sec 2.13)
+*/
+typedef enum
+{
+ PDUMP_MMU_TYPE_4KPAGE_32BIT_STDTILE = 1,
+ PDUMP_MMU_TYPE_VARPAGE_32BIT_STDTILE = 2,
+ PDUMP_MMU_TYPE_4KPAGE_36BIT_EXTTILE = 3,
+ PDUMP_MMU_TYPE_4KPAGE_32BIT_EXTTILE = 4,
+ PDUMP_MMU_TYPE_4KPAGE_36BIT_STDTILE = 5,
+ PDUMP_MMU_TYPE_VARPAGE_40BIT = 6,
+ PDUMP_MMU_TYPE_VIDEO_40BIT_STDTILE = 7,
+ PDUMP_MMU_TYPE_VIDEO_40BIT_EXTTILE = 8,
+ PDUMP_MMU_TYPE_MIPS_MICROAPTIV = 9,
+ PDUMP_MMU_TYPE_LAST
+} PDUMP_MMU_TYPE;
+
+#endif /* __PDUMPDEFS_H__ */
+
+/*****************************************************************************
+ End of file (pdumpdefs.h)
+*****************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File physheap.c
+@Title Physical heap management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Management functions for the physical heap(s). A heap contains
+ all the information required by services when using memory from
+ that heap (such as CPU <> Device physical address translation).
+ A system must register one heap but can have more then one which
+ is why a heap must register with a (system) unique ID.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "physheap.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+struct _PHYS_HEAP_
+{
+ /*! ID of this physical memory heap */
+ IMG_UINT32 ui32PhysHeapID;
+ /*! The type of this heap */
+ PHYS_HEAP_TYPE eType;
+
+ /*! PDump name of this physical memory heap */
+ IMG_CHAR *pszPDumpMemspaceName;
+ /*! Private data for the translate routines */
+ IMG_HANDLE hPrivData;
+ /*! Function callbacks */
+ PHYS_HEAP_FUNCTIONS *psMemFuncs;
+
+ /*! Array of sub-regions of the heap */
+ PHYS_HEAP_REGION *pasRegions;
+ IMG_UINT32 ui32NumOfRegions;
+
+ /*! Refcount */
+ IMG_UINT32 ui32RefCount;
+ /*! Pointer to next physical heap */
+ struct _PHYS_HEAP_ *psNext;
+};
+
+static PHYS_HEAP *g_psPhysHeapList;
+
+#if defined(REFCOUNT_DEBUG)
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) \
+ PVRSRVDebugPrintf(PVR_DBG_WARNING, \
+ __FILE__, \
+ __LINE__, \
+ fmt, \
+ __VA_ARGS__)
+#else
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+
+PVRSRV_ERROR PhysHeapRegister(PHYS_HEAP_CONFIG *psConfig,
+ PHYS_HEAP **ppsPhysHeap)
+{
+ PHYS_HEAP *psNew;
+ PHYS_HEAP *psTmp;
+
+ PVR_DPF_ENTERED;
+
+ if (psConfig->eType == PHYS_HEAP_TYPE_UNKNOWN)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Check this heap ID isn't already in use */
+ psTmp = g_psPhysHeapList;
+ while (psTmp)
+ {
+ if (psTmp->ui32PhysHeapID == psConfig->ui32PhysHeapID)
+ {
+ return PVRSRV_ERROR_PHYSHEAP_ID_IN_USE;
+ }
+ psTmp = psTmp->psNext;
+ }
+
+ psNew = OSAllocMem(sizeof(PHYS_HEAP));
+ if (psNew == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psNew->ui32PhysHeapID = psConfig->ui32PhysHeapID;
+ psNew->eType = psConfig->eType;
+ psNew->psMemFuncs = psConfig->psMemFuncs;
+ psNew->hPrivData = psConfig->hPrivData;
+ psNew->ui32RefCount = 0;
+ psNew->pszPDumpMemspaceName = psConfig->pszPDumpMemspaceName;
+
+ psNew->pasRegions = psConfig->pasRegions;
+ psNew->ui32NumOfRegions = psConfig->ui32NumOfRegions;
+
+ psNew->psNext = g_psPhysHeapList;
+ g_psPhysHeapList = psNew;
+
+ *ppsPhysHeap = psNew;
+
+ PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsPhysHeap);
+}
+
+void PhysHeapUnregister(PHYS_HEAP *psPhysHeap)
+{
+ PVR_DPF_ENTERED1(psPhysHeap);
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+#endif
+ {
+ PVR_ASSERT(psPhysHeap->ui32RefCount == 0);
+ }
+
+ if (g_psPhysHeapList == psPhysHeap)
+ {
+ g_psPhysHeapList = psPhysHeap->psNext;
+ }
+ else
+ {
+ PHYS_HEAP *psTmp = g_psPhysHeapList;
+
+ while(psTmp->psNext != psPhysHeap)
+ {
+ psTmp = psTmp->psNext;
+ }
+ psTmp->psNext = psPhysHeap->psNext;
+ }
+
+ OSFreeMem(psPhysHeap);
+
+ PVR_DPF_RETURN;
+}
+
+PVRSRV_ERROR PhysHeapAcquire(IMG_UINT32 ui32PhysHeapID,
+ PHYS_HEAP **ppsPhysHeap)
+{
+ PHYS_HEAP *psTmp = g_psPhysHeapList;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_DPF_ENTERED1(ui32PhysHeapID);
+
+ while (psTmp)
+ {
+ if (psTmp->ui32PhysHeapID == ui32PhysHeapID)
+ {
+ break;
+ }
+ psTmp = psTmp->psNext;
+ }
+
+ if (psTmp == NULL)
+ {
+ eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
+ }
+ else
+ {
+ psTmp->ui32RefCount++;
+ PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", __FUNCTION__, psTmp, psTmp->ui32RefCount);
+ }
+
+ *ppsPhysHeap = psTmp;
+ PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap);
+}
+
+void PhysHeapRelease(PHYS_HEAP *psPhysHeap)
+{
+ PVR_DPF_ENTERED1(psPhysHeap);
+
+ psPhysHeap->ui32RefCount--;
+ PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", __FUNCTION__, psPhysHeap, psPhysHeap->ui32RefCount);
+
+ PVR_DPF_RETURN;
+}
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap)
+{
+ return psPhysHeap->eType;
+}
+
+/*
+ * This function will set the psDevPAddr to whatever the system layer
+ * has set it for the referenced region.
+ * It will not fail if the psDevPAddr is invalid.
+ */
+PVRSRV_ERROR PhysHeapRegionGetDevPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+ {
+ *psDevPAddr = psPhysHeap->pasRegions[ui32RegionId].sCardBase;
+ return PVRSRV_OK;
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+}
+
+/*
+ * This function will set the psCpuPAddr to whatever the system layer
+ * has set it for the referenced region.
+ * It will not fail if the psCpuPAddr is invalid.
+ */
+PVRSRV_ERROR PhysHeapRegionGetCpuPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+ {
+ *psCpuPAddr = psPhysHeap->pasRegions[ui32RegionId].sStartAddr;
+ return PVRSRV_OK;
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+}
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_UINT64 *puiSize)
+{
+ if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+ {
+ *puiSize = psPhysHeap->pasRegions[ui32RegionId].uiSize;
+ return PVRSRV_OK;
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+}
+
+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ psPhysHeap->psMemFuncs->pfnCpuPAddrToDevPAddr(psPhysHeap->hPrivData,
+ ui32NumOfAddr,
+ psDevPAddr,
+ psCpuPAddr);
+}
+
+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ psPhysHeap->psMemFuncs->pfnDevPAddrToCpuPAddr(psPhysHeap->hPrivData,
+ ui32NumOfAddr,
+ psCpuPAddr,
+ psDevPAddr);
+}
+
+IMG_UINT32 PhysHeapGetRegionId(PHYS_HEAP *psPhysHeap,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+ if (psPhysHeap->psMemFuncs->pfnGetRegionId == NULL)
+ {
+ return 0;
+ }
+
+ return psPhysHeap->psMemFuncs->pfnGetRegionId(psPhysHeap->hPrivData,
+ uiAllocFlags);
+}
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap)
+{
+ return psPhysHeap->pszPDumpMemspaceName;
+}
+
+PVRSRV_ERROR PhysHeapInit(void)
+{
+ g_psPhysHeapList = NULL;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PhysHeapDeinit(void)
+{
+ PVR_ASSERT(g_psPhysHeapList == NULL);
+
+ return PVRSRV_OK;
+}
+
+IMG_UINT32 PhysHeapNumberOfRegions(PHYS_HEAP *psPhysHeap)
+{
+ return psPhysHeap->ui32NumOfRegions;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Physical heap management header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the interface for the physical heap management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#ifndef _PHYSHEAP_H_
+#define _PHYSHEAP_H_
+
+typedef struct _PHYS_HEAP_ PHYS_HEAP;
+
+typedef void (*CpuPAddrToDevPAddr)(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+typedef void (*DevPAddrToCpuPAddr)(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+typedef IMG_UINT32 (*GetRegionId)(IMG_HANDLE hPrivData,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocationFlags);
+
+typedef struct _PHYS_HEAP_FUNCTIONS_
+{
+ /*! Translate CPU physical address to device physical address */
+ CpuPAddrToDevPAddr pfnCpuPAddrToDevPAddr;
+ /*! Translate device physical address to CPU physical address */
+ DevPAddrToCpuPAddr pfnDevPAddrToCpuPAddr;
+ /*! Return id of heap region to allocate from */
+ GetRegionId pfnGetRegionId;
+} PHYS_HEAP_FUNCTIONS;
+
+typedef enum _PHYS_HEAP_TYPE_
+{
+ PHYS_HEAP_TYPE_UNKNOWN = 0,
+ PHYS_HEAP_TYPE_UMA,
+ PHYS_HEAP_TYPE_LMA,
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ PHYS_HEAP_TYPE_DMA,
+#endif
+} PHYS_HEAP_TYPE;
+
+typedef struct _PHYS_HEAP_REGION_
+{
+ IMG_CPU_PHYADDR sStartAddr;
+ IMG_DEV_PHYADDR sCardBase;
+ IMG_UINT64 uiSize;
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ IMG_HANDLE hPrivData;
+ IMG_BOOL bDynAlloc;
+#endif
+} PHYS_HEAP_REGION;
+
+typedef struct _PHYS_HEAP_CONFIG_
+{
+ IMG_UINT32 ui32PhysHeapID;
+ PHYS_HEAP_TYPE eType;
+ IMG_CHAR *pszPDumpMemspaceName;
+ PHYS_HEAP_FUNCTIONS *psMemFuncs;
+
+ PHYS_HEAP_REGION *pasRegions;
+ IMG_UINT32 ui32NumOfRegions;
+
+ IMG_HANDLE hPrivData;
+} PHYS_HEAP_CONFIG;
+
+PVRSRV_ERROR PhysHeapRegister(PHYS_HEAP_CONFIG *psConfig,
+ PHYS_HEAP **ppsPhysHeap);
+
+void PhysHeapUnregister(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapAcquire(IMG_UINT32 ui32PhysHeapID,
+ PHYS_HEAP **ppsPhysHeap);
+
+void PhysHeapRelease(PHYS_HEAP *psPhysHeap);
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapRegionGetCpuPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_UINT64 *puiSize);
+
+PVRSRV_ERROR PhysHeapRegionGetDevPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_UINT64 *puiSize);
+
+IMG_UINT32 PhysHeapNumberOfRegions(PHYS_HEAP *psPhysHeap);
+
+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+IMG_UINT32 PhysHeapGetRegionId(PHYS_HEAP *psPhysHeap,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapInit(void);
+PVRSRV_ERROR PhysHeapDeinit(void);
+
+#endif /* _PHYSHEAP_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File physmem.c
+@Title Physmem
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common entry point for creation of RAM backed PMR's
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "device.h"
+#include "physmem.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "pdump_physmem.h"
+#include "pdump_km.h"
+#include "rgx_heaps.h"
+
+#if defined(DEBUG)
+IMG_UINT32 gPMRAllocFail = 0;
+#endif /* defined(DEBUG) */
+
+PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32MemSize,
+ const IMG_UINT8 u8Value,
+ IMG_BOOL bInitPage,
+#if defined(PDUMP)
+ const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_HANDLE *phHandlePtr,
+#endif
+ IMG_HANDLE hMemHandle,
+ IMG_DEV_PHYADDR *psDevPhysAddr)
+{
+ void *pvCpuVAddr;
+ PVRSRV_ERROR eError;
+#if defined(PDUMP)
+ IMG_CHAR szFilenameOut[PDUMP_PARAM_MAX_FILE_NAME];
+ PDUMP_FILEOFFSET_T uiOffsetOut;
+#endif
+ PG_HANDLE *psMemHandle;
+ IMG_UINT32 ui32PageSize;
+
+ psMemHandle = hMemHandle;
+ ui32PageSize = OSGetPageSize();
+
+ /*Allocate the page */
+ eError = psDevNode->pfnDevPxAlloc(psDevNode,
+ TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize),
+ psMemHandle,
+ psDevPhysAddr);
+ if(PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Unable to allocate the pages"));
+ return eError;
+ }
+
+#if defined(PDUMP)
+ eError = PDumpMalloc(pszDevSpace,
+ pszSymbolicAddress,
+ ui32MemSize,
+ ui32PageSize,
+ IMG_FALSE,
+ 0,
+ IMG_FALSE,
+ phHandlePtr);
+ if(PVRSRV_OK != eError)
+ {
+ PDUMPCOMMENT("Allocating pages failed");
+ *phHandlePtr = NULL;
+ }
+#endif
+
+ if(bInitPage)
+ {
+ /*Map the page to the CPU VA space */
+ eError = psDevNode->pfnDevPxMap(psDevNode,
+ psMemHandle,
+ ui32MemSize,
+ psDevPhysAddr,
+ &pvCpuVAddr);
+ if(PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Unable to map the allocated page"));
+ psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+ return eError;
+ }
+
+ /*Fill the memory with given content */
+ /*NOTE: Wrong for the LMA + ARM64 combination, but this is unlikely */
+ OSCachedMemSet(pvCpuVAddr, u8Value, ui32MemSize);
+
+ /*Map the page to the CPU VA space */
+ eError = psDevNode->pfnDevPxClean(psDevNode,
+ psMemHandle,
+ 0,
+ ui32MemSize);
+ if(PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Unable to clean the allocated page"));
+ psDevNode->pfnDevPxUnMap(psDevNode, psMemHandle, pvCpuVAddr);
+ psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+ return eError;
+ }
+
+#if defined(PDUMP)
+ /*P-Dumping of the page contents can be done in two ways
+ * 1. Store the single byte init value to the .prm file
+ * and load the same value to the entire dummy page buffer
+ * This method requires lot of LDB's inserted into the out2.txt
+ *
+ * 2. Store the entire contents of the buffer to the .prm file
+ * and load them back.
+ * This only needs a single LDB instruction in the .prm file
+ * and chosen this method
+ * size of .prm file might go up but that's not huge at least
+ * for this allocation
+ */
+ /*Write the buffer contents to the prm file */
+ eError = PDumpWriteBuffer(pvCpuVAddr,
+ ui32MemSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ szFilenameOut,
+ sizeof(szFilenameOut),
+ &uiOffsetOut);
+ if(PVRSRV_OK == eError)
+ {
+ /* Load the buffer back to the allocated memory when playing the pdump */
+ eError = PDumpPMRLDB(pszDevSpace,
+ pszSymbolicAddress,
+ 0,
+ ui32MemSize,
+ szFilenameOut,
+ uiOffsetOut,
+ PDUMP_FLAGS_CONTINUOUS);
+ if(PVRSRV_OK != eError)
+ {
+ PDUMP_ERROR(eError, "Failed to write LDB statement to script file");
+ PVR_DPF((PVR_DBG_ERROR, "Failed to write LDB statement to script file, error %d", eError));
+ }
+
+ }
+ else if (eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+ {
+ PDUMP_ERROR(eError, "Failed to write device allocation to parameter file");
+ PVR_DPF((PVR_DBG_ERROR, "Failed to write device allocation to parameter file, error %d", eError));
+ }
+ else
+ {
+ /* else Write to parameter file prevented under the flags and
+ * current state of the driver so skip write to script and error IF.
+ */
+ eError = PVRSRV_OK;
+ }
+#endif
+
+ /*UnMap the page */
+ psDevNode->pfnDevPxUnMap(psDevNode,
+ psMemHandle,
+ pvCpuVAddr);
+ }
+
+ return PVRSRV_OK;
+
+}
+
+void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode,
+#if defined(PDUMP)
+ IMG_HANDLE hPDUMPMemHandle,
+#endif
+ IMG_HANDLE hMemHandle)
+{
+ PG_HANDLE *psMemHandle;
+
+ psMemHandle = hMemHandle;
+ psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+#if defined(PDUMP)
+ if(NULL != hPDUMPMemHandle)
+ {
+ PDumpFree(hPDUMPMemHandle);
+ }
+#endif
+
+}
+
+PVRSRV_ERROR
+PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 uiAnnotationLength,
+ const IMG_CHAR *pszAnnotation,
+ PMR **ppsPMRPtr)
+{
+ PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx;
+ PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize =
+ psDevNode->psDevConfig->pfnCheckMemAllocSize;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(uiAnnotationLength);
+
+ /* We don't currently support sparse memory with non OS page sized heaps */
+ if (ui32NumVirtChunks > 1 && (uiLog2PageSize != OSGetPageShift()))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Requested page size for sparse 2^%u is not OS page size.",
+ uiLog2PageSize));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Protect against ridiculous page sizes */
+ if (uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Page size is too big: 2^%u.", uiLog2PageSize));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Lookup the requested physheap index to use for this PMR allocation */
+ if (PVRSRV_CHECK_FW_LOCAL(uiFlags))
+ {
+ ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+ }
+ else if (PVRSRV_CHECK_CPU_LOCAL(uiFlags))
+ {
+ ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL;
+ }
+ else
+ {
+ ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL;
+ }
+
+ /* Fail if requesting coherency on one side but uncached on the other */
+ if ( (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) &&
+ (PVRSRV_CHECK_GPU_UNCACHED(uiFlags) || PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags))) )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Request for CPU coherency but specifying GPU uncached "
+ "Please use GPU cached flags for coherency."));
+ return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+ }
+
+ if ( (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) &&
+ (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags))) )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Request for GPU coherency but specifying CPU uncached "
+ "Please use CPU cached flags for coherency."));
+ return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+ }
+
+ /* Apply memory budgeting policy */
+ if (pfnCheckMemAllocSize)
+ {
+ IMG_UINT64 uiMemSize = (IMG_UINT64)uiChunkSize * ui32NumPhysChunks;
+ PVRSRV_ERROR eError;
+
+ eError = pfnCheckMemAllocSize(psDevNode->psDevConfig->hSysData, uiMemSize);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+#if defined(DEBUG)
+ if (gPMRAllocFail > 0)
+ {
+ static IMG_UINT32 ui32AllocCount = 1;
+
+ if (ui32AllocCount < gPMRAllocFail)
+ {
+ ui32AllocCount++;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s failed on %d allocation.",
+ __func__, ui32AllocCount));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+#endif /* defined(DEBUG) */
+
+ return psDevNode->pfnCreateRamBackedPMR[ePhysHeapIdx](psDevNode,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2PageSize,
+ uiFlags,
+ pszAnnotation,
+ ppsPMRPtr);
+}
+
+PVRSRV_ERROR
+PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 uiAnnotationLength,
+ const IMG_CHAR *pszAnnotation,
+ PMR **ppsPMRPtr)
+{
+
+ PVRSRV_ERROR eError;
+ eError = PhysmemNewRamBackedPMR(psConnection,
+ psDevNode,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2PageSize,
+ uiFlags,
+ uiAnnotationLength,
+ pszAnnotation,
+ ppsPMRPtr);
+
+ if (eError == PVRSRV_OK)
+ {
+ eError = PMRLockSysPhysAddresses(*ppsPMRPtr);
+ }
+
+ return eError;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Physmem header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for common entry point for creation of RAM backed PMR's
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SRVSRV_PHYSMEM_H_
+#define _SRVSRV_PHYSMEM_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "connection_server.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*************************************************************************/ /*!
+@Function DevPhysMemAlloc
+
+@Description Allocate memory from device specific heaps directly.
+
+@Input psDevNode device node to operate on
+@Input ui32MemSize Size of the memory to be allocated
+@Input u8Value Value to be initialised to.
+@Input bInitPage Flag to control initialisation
+@Input pszDevSpace PDUMP memory space in which the
+ allocation is to be done
+@Input pszSymbolicAddress Symbolic name of the allocation
+@Input phHandlePtr PDUMP handle to the allocation
+@Output psMemHandle Handle to the allocated memory
+@Output psDevPhysAddr Device Physical address of allocated
+ page
+
+@Return PVRSRV_OK if the allocation is successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32MemSize,
+ const IMG_UINT8 u8Value,
+ IMG_BOOL bInitPage,
+#if defined(PDUMP)
+ const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_HANDLE *phHandlePtr,
+#endif
+ IMG_HANDLE hMemHandle,
+ IMG_DEV_PHYADDR *psDevPhysAddr);
+
+/*************************************************************************/ /*!
+@Function DevPhysMemFree
+
+@Description Free memory to device specific heaps directly.
+
+@Input psDevNode device node to operate on
+@Input hPDUMPMemHandle Pdump handle to allocated memory
+@Input hMemHandle Devmem handle to allocated memory
+
+@Return
+*/
+/*****************************************************************************/
+extern void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode,
+#if defined(PDUMP)
+ IMG_HANDLE hPDUMPMemHandle,
+#endif
+ IMG_HANDLE hMemHandle);
+
+/*
+ * PhysmemNewRamBackedPMR
+ *
+ * This function will create a RAM backed PMR using the device specific
+ * callback, this allows control at a per-devicenode level to select the
+ * memory source thus supporting mixed UMA/LMA systems.
+ *
+ * The size must be a multiple of page size. The page size is
+ * specified in log2. It should be regarded as a minimum contiguity
+ * of which the that the resulting memory must be a multiple. It may
+ * be that this should be a fixed number. It may be that the
+ * allocation size needs to be a multiple of some coarser "page size"
+ * than that specified in the page size argument. For example, take
+ * an OS whose page granularity is a fixed 16kB, but the caller
+ * requests memory in page sizes of 4kB. The request can be satisfied
+ * if and only if the SIZE requested is a multiple of 16kB. If the
+ * arguments supplied are such that this OS cannot grant the request,
+ * PVRSRV_ERROR_INVALID_PARAMS will be returned.
+ *
+ * The caller should supply storage of a pointer. Upon successful
+ * return a PMR object will have been created and a pointer to it
+ * returned in the PMROut argument.
+ *
+ * A PMR thusly created should be destroyed with PhysmemUnrefPMR.
+ *
+ * Note that this function may cause memory allocations and on some
+ * OSes this may cause scheduling events, so it is important that this
+ * function be called with interrupts enabled and in a context where
+ * scheduling events and memory allocations are permitted.
+ *
+ * The flags may be used by the implementation to change its behaviour
+ * if required. The flags will also be stored in the PMR as immutable
+ * metadata and returned to mmu_common when it asks for it.
+ *
+ */
+extern PVRSRV_ERROR
+PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 uiAnnotationLength,
+ const IMG_CHAR *pszAnnotation,
+ PMR **ppsPMROut);
+
+
+/*
+ * PhysmemNewRamBackedLockedPMR
+ *
+ * Same as function above but is additionally locking down the PMR.
+ *
+ * Get the physical memory and lock down the PMR directly, we do not want to
+ * defer the actual allocation to mapping time.
+ *
+ * In general the concept of on-demand allocations is not useful for allocations
+ * where we give the users the freedom to map and unmap memory at will. The user
+ * is not expecting his memory contents to suddenly vanish just because he unmapped
+ * the buffer.
+ * Even if he would know and be ok with it, we do not want to check for every page
+ * we unmap whether we have to unlock the underlying PMR.
+*/
+extern PVRSRV_ERROR
+PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 uiAnnotationLength,
+ const IMG_CHAR *pszAnnotation,
+ PMR **ppsPMRPtr);
+
+#endif /* _SRVSRV_PHYSMEM_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File physmem_dmabuf.c
+@Title dmabuf memory allocator
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks for dmabuf memory.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include "physmem_dmabuf.h"
+#include "pvrsrv.h"
+#include "pmr.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP)
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pmr_impl.h"
+#include "hash.h"
+#include "private_data.h"
+#include "module_common.h"
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+/*
+ * dma_buf_ops
+ *
+ * These are all returning errors if used.
+ * The point is to prevent anyone outside of our driver from importing
+ * and using our dmabuf.
+ */
+
+static int PVRDmaBufOpsAttach(struct dma_buf *psDmaBuf, struct device *psDev,
+ struct dma_buf_attachment *psAttachment)
+{
+ return -ENOSYS;
+}
+
+static struct sg_table *PVRDmaBufOpsMap(struct dma_buf_attachment *psAttachment,
+ enum dma_data_direction eDirection)
+{
+ /* Attach hasn't been called yet */
+ return ERR_PTR(-EINVAL);
+}
+
+static void PVRDmaBufOpsUnmap(struct dma_buf_attachment *psAttachment,
+ struct sg_table *psTable,
+ enum dma_data_direction eDirection)
+{
+}
+
+static void PVRDmaBufOpsRelease(struct dma_buf *psDmaBuf)
+{
+ PMR *psPMR = (PMR *) psDmaBuf->priv;
+
+ PMRUnrefPMR(psPMR);
+}
+
+static void *PVRDmaBufOpsKMap(struct dma_buf *psDmaBuf, unsigned long uiPageNum)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static int PVRDmaBufOpsMMap(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA)
+{
+ return -ENOSYS;
+}
+
+static const struct dma_buf_ops sPVRDmaBufOps =
+{
+ .attach = PVRDmaBufOpsAttach,
+ .map_dma_buf = PVRDmaBufOpsMap,
+ .unmap_dma_buf = PVRDmaBufOpsUnmap,
+ .release = PVRDmaBufOpsRelease,
+ .kmap_atomic = PVRDmaBufOpsKMap,
+ .kmap = PVRDmaBufOpsKMap,
+ .mmap = PVRDmaBufOpsMMap,
+};
+
+/* end of dma_buf_ops */
+
+
+typedef struct _PMR_DMA_BUF_DATA_
+{
+ /* Filled in at PMR create time */
+ PHYS_HEAP *psPhysHeap;
+ struct dma_buf_attachment *psAttachment;
+ PFN_DESTROY_DMABUF_PMR pfnDestroy;
+ IMG_BOOL bPoisonOnFree;
+
+ /* Modified by PMR lock/unlock */
+ struct sg_table *psSgTable;
+ IMG_DEV_PHYADDR *pasDevPhysAddr;
+ IMG_UINT32 ui32PhysPageCount;
+ IMG_UINT32 ui32VirtPageCount;
+} PMR_DMA_BUF_DATA;
+
+/* Start size of the g_psDmaBufHash hash table */
+#define DMA_BUF_HASH_SIZE 20
+
+static HASH_TABLE *g_psDmaBufHash = NULL;
+static IMG_UINT32 g_ui32HashRefCount = 0;
+
+#if defined(PVR_ANDROID_ION_USE_SG_LENGTH)
+#define pvr_sg_length(sg) ((sg)->length)
+#else
+#define pvr_sg_length(sg) sg_dma_len(sg)
+#endif
+
+static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
+static const IMG_UINT32 _AllocPoisonSize = 7;
+static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
+static const IMG_UINT32 _FreePoisonSize = 11;
+
+static void _Poison(void *pvKernAddr,
+ IMG_DEVMEM_SIZE_T uiBufferSize,
+ const IMG_CHAR *pacPoisonData,
+ size_t uiPoisonSize)
+{
+ IMG_DEVMEM_SIZE_T uiDestByteIndex;
+ IMG_CHAR *pcDest = pvKernAddr;
+ IMG_UINT32 uiSrcByteIndex = 0;
+
+ for (uiDestByteIndex = 0; uiDestByteIndex < uiBufferSize; uiDestByteIndex++)
+ {
+ pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
+ uiSrcByteIndex++;
+ if (uiSrcByteIndex == uiPoisonSize)
+ {
+ uiSrcByteIndex = 0;
+ }
+ }
+}
+
+
+/*****************************************************************************
+ * PMR callback functions *
+ *****************************************************************************/
+
+static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+ struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
+ struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+ struct sg_table *psSgTable = psPrivData->psSgTable;
+ PVRSRV_ERROR eError;
+
+ psPrivData->ui32PhysPageCount = 0;
+
+ dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL);
+
+
+ if (psPrivData->bPoisonOnFree)
+ {
+ void *pvKernAddr;
+ int i, err;
+
+ err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to begin cpu access for free poisoning (err=%d)",
+ __func__, err));
+ PVR_ASSERT(IMG_FALSE);
+ goto exit;
+ }
+
+ for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
+ {
+ pvKernAddr = dma_buf_kmap(psDmaBuf, i);
+ if (IS_ERR_OR_NULL(pvKernAddr))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to poison allocation before free (err=%ld)",
+ __func__, pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM));
+ PVR_ASSERT(IMG_FALSE);
+ goto exit_end_access;
+ }
+
+ _Poison(pvKernAddr, PAGE_SIZE, _FreePoison, _FreePoisonSize);
+
+ dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
+ }
+
+exit_end_access:
+ do {
+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+ } while (err == -EAGAIN || err == -EINTR);
+ }
+
+exit:
+ if (psPrivData->pfnDestroy)
+ {
+ eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ OSFreeMem(psPrivData->pasDevPhysAddr);
+ OSFreeMem(psPrivData);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PVR_UNREFERENCED_PARAMETER(pvPriv);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PVR_UNREFERENCED_PARAMETER(pvPriv);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+ IMG_UINT32 ui32PageIndex;
+ IMG_UINT32 idx;
+
+ if (ui32Log2PageSize != PAGE_SHIFT)
+ {
+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+ }
+
+ for (idx=0; idx < ui32NumOfPages; idx++)
+ {
+ if (pbValid[idx])
+ {
+ IMG_UINT32 ui32InPageOffset;
+
+ ui32PageIndex = puiOffset[idx] >> PAGE_SHIFT;
+ ui32InPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT);
+
+
+ PVR_ASSERT(ui32PageIndex < psPrivData->ui32VirtPageCount);
+ PVR_ASSERT(ui32InPageOffset < PAGE_SIZE);
+ psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+ size_t uiOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ IMG_HANDLE *phHandleOut,
+ PMR_FLAGS_T ulFlags)
+{
+ PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+ void *pvKernAddr;
+ PVRSRV_ERROR eError;
+ int err;
+
+ if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Kernel mappings for sparse DMABufs "
+ "are not allowed!", __func__));
+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+ goto fail;
+ }
+
+ err = dma_buf_begin_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+ if (err)
+ {
+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+ goto fail;
+ }
+
+ pvKernAddr = dma_buf_vmap(psDmaBuf);
+ if (IS_ERR_OR_NULL(pvKernAddr))
+ {
+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+ goto fail_kmap;
+ }
+
+ *ppvKernelAddressOut = pvKernAddr + uiOffset;
+ *phHandleOut = pvKernAddr;
+
+ return PVRSRV_OK;
+
+fail_kmap:
+ do {
+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+ } while (err == -EAGAIN || err == -EINTR);
+
+fail:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE hHandle)
+{
+ PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+ void *pvKernAddr = hHandle;
+ int err;
+
+ dma_buf_vunmap(psDmaBuf, pvKernAddr);
+
+ do {
+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+ } while (err == -EAGAIN || err == -EINTR);
+}
+
+static PVRSRV_ERROR PMRMMapDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+ PMR *psPMR,
+ PMR_MMAP_DATA pOSMMapData)
+{
+ PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+ struct vm_area_struct *psVma = pOSMMapData;
+ int err;
+
+ if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not possible to MMAP sparse DMABufs",
+ __func__));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ err = dma_buf_mmap(psDmaBuf, psVma, 0);
+ if (err)
+ {
+ return (err == -EINVAL) ? PVRSRV_ERROR_NOT_SUPPORTED : PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab =
+{
+ .pfnLockPhysAddresses = PMRLockPhysAddressesDmaBuf,
+ .pfnUnlockPhysAddresses = PMRUnlockPhysAddressesDmaBuf,
+ .pfnDevPhysAddr = PMRDevPhysAddrDmaBuf,
+ .pfnAcquireKernelMappingData = PMRAcquireKernelMappingDataDmaBuf,
+ .pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataDmaBuf,
+ .pfnMMap = PMRMMapDmaBuf,
+ .pfnFinalize = PMRFinalizeDmaBuf,
+};
+
+/*****************************************************************************
+ * Public facing interface *
+ *****************************************************************************/
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment,
+ PFN_DESTROY_DMABUF_PMR pfnDestroy,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR **ppsPMRPtr)
+{
+ struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+ PMR_DMA_BUF_DATA *psPrivData;
+ PMR_FLAGS_T uiPMRFlags;
+ IMG_BOOL bZeroOnAlloc;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bPoisonOnFree;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i, j;
+ IMG_UINT32 uiPagesPerChunk = uiChunkSize >> PAGE_SHIFT;
+ IMG_UINT32 ui32PageCount = 0;
+ struct scatterlist *sg;
+ struct sg_table *table;
+ IMG_UINT32 uiSglOffset;
+
+ bZeroOnAlloc = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags);
+ bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags);
+ bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags);
+
+ if (bZeroOnAlloc && bPoisonOnFree)
+ {
+ /* Zero on Alloc and Poison on Alloc are mutually exclusive */
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errReturn;
+ }
+
+ psPrivData = OSAllocZMem(sizeof(*psPrivData));
+ if (psPrivData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errReturn;
+ }
+
+ psPrivData->psPhysHeap = psHeap;
+ psPrivData->psAttachment = psAttachment;
+ psPrivData->pfnDestroy = pfnDestroy;
+ psPrivData->bPoisonOnFree = bPoisonOnFree;
+ psPrivData->ui32VirtPageCount =
+ (ui32NumVirtChunks * uiChunkSize) >> PAGE_SHIFT;
+
+ psPrivData->pasDevPhysAddr =
+ OSAllocZMem(sizeof(*(psPrivData->pasDevPhysAddr)) *
+ psPrivData->ui32VirtPageCount);
+ if (!psPrivData->pasDevPhysAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to allocate buffer for physical addresses (oom)",
+ __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errFreePrivData;
+ }
+
+ if (bZeroOnAlloc || bPoisonOnAlloc)
+ {
+ void *pvKernAddr;
+ int i, err;
+
+ err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE);
+ if (err)
+ {
+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+ goto errFreePhysAddr;
+ }
+
+ for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
+ {
+ pvKernAddr = dma_buf_kmap(psDmaBuf, i);
+ if (IS_ERR_OR_NULL(pvKernAddr))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to map page for %s (err=%ld)",
+ __func__, bZeroOnAlloc ? "zeroing" : "poisoning",
+ pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM));
+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+
+ do {
+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+ } while (err == -EAGAIN || err == -EINTR);
+
+ goto errFreePhysAddr;
+ }
+
+ if (bZeroOnAlloc)
+ {
+ memset(pvKernAddr, 0, PAGE_SIZE);
+ }
+ else
+ {
+ _Poison(pvKernAddr, PAGE_SIZE, _AllocPoison, _AllocPoisonSize);
+ }
+
+ dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
+ }
+
+ do {
+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+ } while (err == -EAGAIN || err == -EINTR);
+ }
+
+ table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(table))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errFreePhysAddr;
+ }
+
+ /*
+ * We do a two pass process: first work out how many pages there
+ * are and second, fill in the data.
+ */
+ for_each_sg(table->sgl, sg, table->nents, i)
+ {
+ ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE;
+ }
+
+ if (WARN_ON(!ui32PageCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Number of phys. pages must not be zero",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errUnmap;
+ }
+
+ if (WARN_ON(ui32PageCount != ui32NumPhysChunks * uiPagesPerChunk))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical chunks and actual "
+ "number of physical dma buf pages don't match",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errUnmap;
+ }
+
+ psPrivData->ui32PhysPageCount = ui32PageCount;
+ psPrivData->psSgTable = table;
+ ui32PageCount = 0;
+ sg = table->sgl;
+ uiSglOffset = 0;
+
+
+ /* Fill physical address array */
+ for (i = 0; i < ui32NumPhysChunks; i++)
+ {
+ for (j = 0; j < uiPagesPerChunk; j++)
+ {
+ IMG_UINT32 uiIdx = pui32MappingTable[i] * uiPagesPerChunk + j;
+
+ psPrivData->pasDevPhysAddr[uiIdx].uiAddr =
+ sg_dma_address(sg) + uiSglOffset;
+
+ /* Get the next offset for the current sgl or the next sgl */
+ uiSglOffset += PAGE_SIZE;
+ if (uiSglOffset >= pvr_sg_length(sg))
+ {
+ sg = sg_next(sg);
+ uiSglOffset = 0;
+
+ /* Check that we haven't looped */
+ if (WARN_ON(sg == table->sgl))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to fill phys. address "
+ "array ",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errUnmap;
+ }
+ }
+ }
+ }
+
+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+ /*
+ * Check no significant bits were lost in cast due to different
+ * bit widths for flags
+ */
+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+ eError = PMRCreatePMR(psDevNode,
+ psHeap,
+ ui32NumVirtChunks * uiChunkSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ PAGE_SHIFT,
+ uiPMRFlags,
+ "IMPORTED_DMABUF",
+ &_sPMRDmaBufFuncTab,
+ psPrivData,
+ PMR_TYPE_DMABUF,
+ ppsPMRPtr,
+ IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto errFreePhysAddr;
+ }
+
+ return PVRSRV_OK;
+
+errUnmap:
+ dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL);
+errFreePhysAddr:
+ OSFreeMem(psPrivData->pasDevPhysAddr);
+errFreePrivData:
+ OSFreeMem(psPrivData);
+errReturn:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment)
+{
+ struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+
+ HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+ g_ui32HashRefCount--;
+
+ if (g_ui32HashRefCount == 0)
+ {
+ HASH_Delete(g_psDmaBufHash);
+ g_psDmaBufHash = NULL;
+ }
+
+ PhysHeapRelease(psHeap);
+
+ dma_buf_detach(psDmaBuf, psAttachment);
+ dma_buf_put(psDmaBuf);
+
+ return PVRSRV_OK;
+}
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR)
+{
+ PMR_DMA_BUF_DATA *psPrivData;
+
+ psPrivData = PMRGetPrivateDataHack(psPMR, &_sPMRDmaBufFuncTab);
+ if (psPrivData)
+ {
+ return psPrivData->psAttachment->dmabuf;
+ }
+
+ return NULL;
+}
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PMR *psPMR,
+ IMG_INT *piFd)
+{
+ struct dma_buf *psDmaBuf;
+ IMG_DEVMEM_SIZE_T uiPMRSize;
+ PVRSRV_ERROR eError;
+ IMG_INT iFd;
+
+ PMRRefPMR(psPMR);
+
+ eError = PMR_LogicalSize(psPMR, &uiPMRSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_pmr_ref;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ {
+ DEFINE_DMA_BUF_EXPORT_INFO(sDmaBufExportInfo);
+
+ sDmaBufExportInfo.priv = psPMR;
+ sDmaBufExportInfo.ops = &sPVRDmaBufOps;
+ sDmaBufExportInfo.size = uiPMRSize;
+ sDmaBufExportInfo.flags = O_RDWR;
+
+ psDmaBuf = dma_buf_export(&sDmaBufExportInfo);
+ }
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
+ psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps,
+ uiPMRSize, O_RDWR, NULL);
+#else
+ psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps,
+ uiPMRSize, O_RDWR);
+#endif
+
+ if (IS_ERR_OR_NULL(psDmaBuf))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to export buffer (err=%ld)",
+ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_pmr_ref;
+ }
+
+ iFd = dma_buf_fd(psDmaBuf, O_RDWR);
+ if (iFd < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf fd (err=%d)",
+ __func__, iFd));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_dma_buf;
+ }
+
+ *piFd = iFd;
+ return PVRSRV_OK;
+
+fail_dma_buf:
+ dma_buf_put(psDmaBuf);
+
+fail_pmr_ref:
+ PMRUnrefPMR(psPMR);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32MappingTable = 0;
+ struct dma_buf *psDmaBuf;
+
+ /* Get the buffer handle */
+ psDmaBuf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(psDmaBuf))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)",
+ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+ return PVRSRV_ERROR_BAD_MAPPING;
+
+ }
+
+ uiSize = psDmaBuf->size;
+
+ dma_buf_put(psDmaBuf);
+
+ return PhysmemImportSparseDmaBuf(psConnection,
+ psDevNode,
+ fd,
+ uiFlags,
+ uiSize,
+ 1,
+ 1,
+ &ui32MappingTable,
+ ppsPMRPtr,
+ puiSize,
+ puiAlign);
+
+
+}
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ PMR *psPMR = NULL;
+ struct dma_buf_attachment *psAttachment;
+ struct dma_buf *psDmaBuf;
+ PHYS_HEAP *psHeap;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (!psDevNode)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errReturn;
+ }
+
+ /* Get the buffer handle */
+ psDmaBuf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(psDmaBuf))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)",
+ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto errReturn;
+ }
+
+ if (psDmaBuf->ops == &sPVRDmaBufOps)
+ {
+ PVRSRV_DEVICE_NODE *psPMRDevNode;
+
+ /* We exported this dma_buf, so we can just get its PMR */
+ psPMR = (PMR *) psDmaBuf->priv;
+
+ /* However, we can't import it if it belongs to a different device */
+ psPMRDevNode = PMR_DeviceNode(psPMR);
+ if (psPMRDevNode != psDevNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device\n",
+ __func__));
+ eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ goto errDMAPut;
+ }
+ }
+ else if (g_psDmaBufHash)
+ {
+ /* We have a hash table so check if we've seen this dmabuf before */
+ psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+ }
+
+ if (psPMR)
+ {
+ /* Reuse the PMR we already created */
+ PMRRefPMR(psPMR);
+
+ *ppsPMRPtr = psPMR;
+ PMR_LogicalSize(psPMR, puiSize);
+ *puiAlign = PAGE_SIZE;
+
+ dma_buf_put(psDmaBuf);
+
+ return PVRSRV_OK;
+ }
+
+ /* Do we want this to be a sparse PMR? */
+ if (ui32NumVirtChunks > 1)
+ {
+ IMG_UINT32 i;
+
+ /* Parameter validation */
+ if (psDmaBuf->size != (uiChunkSize * ui32NumPhysChunks) ||
+ uiChunkSize != PAGE_SIZE ||
+ ui32NumPhysChunks > ui32NumVirtChunks)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Requesting sparse buffer: "
+ "uiChunkSize ("IMG_DEVMEM_SIZE_FMTSPEC") must be equal to "
+ "OS page size (%lu). uiChunkSize * ui32NumPhysChunks "
+ "("IMG_DEVMEM_SIZE_FMTSPEC") must"
+ " be equal to the buffer size ("IMG_SIZE_FMTSPEC"). "
+ "ui32NumPhysChunks (%u) must be lesser or equal to "
+ "ui32NumVirtChunks (%u)",
+ __func__,
+ uiChunkSize,
+ PAGE_SIZE,
+ uiChunkSize * ui32NumPhysChunks,
+ psDmaBuf->size,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errDMAPut;
+ }
+
+ /* Parameter validation - Mapping table entries*/
+ for (i = 0; i < ui32NumPhysChunks; i++)
+ {
+ if (pui32MappingTable[i] > ui32NumVirtChunks)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Requesting sparse buffer: "
+ "Entry in mapping table (%u) is out of allocation "
+ "bounds (%u)",
+ __func__,
+ (IMG_UINT32) pui32MappingTable[i],
+ (IMG_UINT32) ui32NumVirtChunks));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errDMAPut;
+ }
+ }
+ }
+ else
+ {
+ /* Make sure parameters are valid for non-sparse allocations as well */
+ uiChunkSize = psDmaBuf->size;
+ ui32NumPhysChunks = 1;
+ ui32NumVirtChunks = 1;
+ pui32MappingTable[0] = 0;
+ }
+
+
+ psAttachment = dma_buf_attach(psDmaBuf, psDevNode->psDevConfig->pvOSDevice);
+ if (IS_ERR_OR_NULL(psAttachment))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to attach to dma-buf (err=%ld)",
+ __func__, psAttachment? PTR_ERR(psAttachment) : -ENOMEM));
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto errDMAPut;
+ }
+
+ /*
+ * Get the physical heap for this PMR
+ *
+ * Note:
+ * While we have no way to determine the type of the buffer
+ * we just assume that all dmabufs are from the same
+ * physical heap.
+ */
+ eError = PhysHeapAcquire(DMABUF_IMPORT_PHYSHEAP_ID, &psHeap);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire physical heap (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto errDMADetach;
+ }
+
+ eError = PhysmemCreateNewDmaBufBackedPMR(psDevNode,
+ psHeap,
+ psAttachment,
+ PhysmemDestroyDmaBuf,
+ uiFlags,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ &psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto errHeapRelease;
+ }
+
+ if (!g_psDmaBufHash)
+ {
+ /*
+ * As different processes may import the same dmabuf we need to
+ * create a hash table so we don't generate a duplicate PMR but
+ * rather just take a reference on an existing one.
+ */
+ g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE);
+ if (!g_psDmaBufHash)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errUnrefPMR;
+ }
+ }
+
+ /* First time we've seen this dmabuf so store it in the hash table */
+ HASH_Insert(g_psDmaBufHash, (uintptr_t) psDmaBuf, (uintptr_t) psPMR);
+ g_ui32HashRefCount++;
+
+ *ppsPMRPtr = psPMR;
+ *puiSize = ui32NumVirtChunks * uiChunkSize;
+ *puiAlign = PAGE_SIZE;
+
+ return PVRSRV_OK;
+
+errUnrefPMR:
+ PMRUnrefPMR(psPMR);
+
+errHeapRelease:
+ PhysHeapRelease(psHeap);
+
+errDMADetach:
+ dma_buf_detach(psDmaBuf, psAttachment);
+
+errDMAPut:
+ dma_buf_put(psDmaBuf);
+
+errReturn:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) */
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment,
+ PFN_DESTROY_DMABUF_PMR pfnDestroy,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR **ppsPMRPtr)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(psHeap);
+ PVR_UNREFERENCED_PARAMETER(psAttachment);
+ PVR_UNREFERENCED_PARAMETER(pfnDestroy);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+ PVR_UNREFERENCED_PARAMETER(uiChunkSize);
+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+ PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+ PVR_UNREFERENCED_PARAMETER(pui32MappingTable);
+ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+
+ return NULL;
+}
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PMR *psPMR,
+ IMG_INT *piFd)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(piFd);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(fd);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+ PVR_UNREFERENCED_PARAMETER(puiSize);
+ PVR_UNREFERENCED_PARAMETER(puiAlign);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(fd);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+ PVR_UNREFERENCED_PARAMETER(puiSize);
+ PVR_UNREFERENCED_PARAMETER(puiAlign);
+ PVR_UNREFERENCED_PARAMETER(uiChunkSize);
+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+ PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+ PVR_UNREFERENCED_PARAMETER(pui32MappingTable);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) */
--- /dev/null
+/**************************************************************************/ /*!
+@File physmem_dmabuf.h
+@Title Header for dmabuf PMR factory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks importing Ion allocations
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(_PHYSMEM_DMABUF_H_)
+#define _PHYSMEM_DMABUF_H_
+
+#include <linux/dma-buf.h>
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "connection_server.h"
+
+#include "pmr.h"
+
+typedef PVRSRV_ERROR (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment);
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment,
+ PFN_DESTROY_DMABUF_PMR pfnDestroy,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR **ppsPMRPtr);
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR);
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PMR *psPMR,
+ IMG_INT *piFd);
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign);
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign);
+
+#endif /* !defined(_PHYSMEM_DMABUF_H_) */
--- /dev/null
+/*************************************************************************/ /*!
+@File physmem_lma.c
+@Title Local card memory allocator
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks for local card memory.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "rgx_pdump_panics.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "devicemem_server_utils.h"
+#include "physmem_lma.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "rgxutils.h"
+#endif
+
+typedef struct _PMR_LMALLOCARRAY_DATA_ {
+ PVRSRV_DEVICE_NODE *psDevNode;
+ IMG_INT32 iNumPagesAllocated;
+ /*
+ * uiTotalNumPages:
+ * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
+ */
+ IMG_UINT32 uiTotalNumPages;
+ IMG_UINT32 uiPagesToAlloc;
+
+ IMG_UINT32 uiLog2AllocSize;
+ IMG_UINT32 uiAllocSize;
+ IMG_DEV_PHYADDR *pasDevPAddr;
+
+ IMG_BOOL bZeroOnAlloc;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bFwLocalAlloc;
+
+ /* Tells if allocation is physically backed */
+ IMG_BOOL bHasLMPages;
+ IMG_BOOL bOnDemand;
+
+ /*
+ record at alloc time whether poisoning will be required when the
+ PMR is freed.
+ */
+ IMG_BOOL bPoisonOnFree;
+
+ /* Physical heap and arena pointers for this allocation */
+ PHYS_HEAP* psPhysHeap;
+ RA_ARENA* psArena;
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags;
+
+} PMR_LMALLOCARRAY_DATA;
+
+static PVRSRV_ERROR _MapAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ size_t uiSize,
+ IMG_BOOL bFwLocalAlloc,
+ PMR_FLAGS_T ulFlags,
+ void **pvPtr)
+{
+ IMG_UINT32 ui32CPUCacheFlags = DevmemCPUCacheMode(psDevNode, ulFlags);
+ IMG_CPU_PHYADDR sCpuPAddr;
+ PHYS_HEAP *psPhysHeap;
+
+ if (bFwLocalAlloc)
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+ }
+ else
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+ }
+
+ PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr);
+
+ *pvPtr = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags);
+ if (*pvPtr == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ else
+ {
+ return PVRSRV_OK;
+ }
+}
+
+static void _UnMapAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ size_t uiSize,
+ IMG_BOOL bFwLocalAlloc,
+ PMR_FLAGS_T ulFlags,
+ void *pvPtr)
+{
+ OSUnMapPhysToLin(pvPtr, uiSize, PVRSRV_CPU_CACHE_MODE(ulFlags));
+}
+
+static PVRSRV_ERROR
+_PoisonAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_BOOL bFwLocalAlloc,
+ IMG_UINT32 uiAllocSize,
+ const IMG_CHAR *pacPoisonData,
+ size_t uiPoisonSize)
+{
+ IMG_UINT32 uiSrcByteIndex;
+ IMG_UINT32 uiDestByteIndex;
+ void *pvKernLin = NULL;
+ IMG_CHAR *pcDest = NULL;
+
+ PVRSRV_ERROR eError;
+
+ eError = _MapAlloc(psDevNode,
+ psDevPAddr,
+ uiAllocSize,
+ bFwLocalAlloc,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+ &pvKernLin);
+ if (eError != PVRSRV_OK)
+ {
+ goto map_failed;
+ }
+ pcDest = pvKernLin;
+
+ uiSrcByteIndex = 0;
+ for(uiDestByteIndex=0; uiDestByteIndex<uiAllocSize; uiDestByteIndex++)
+ {
+ pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
+ uiSrcByteIndex++;
+ if (uiSrcByteIndex == uiPoisonSize)
+ {
+ uiSrcByteIndex = 0;
+ }
+ }
+
+ _UnMapAlloc(psDevNode, uiAllocSize, bFwLocalAlloc, 0,pvKernLin);
+
+ return PVRSRV_OK;
+
+map_failed:
+ PVR_DPF((PVR_DBG_ERROR, "Failed to poison allocation"));
+ return eError;
+}
+
+static PVRSRV_ERROR
+_ZeroAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_BOOL bFwLocalAlloc,
+ IMG_UINT32 uiAllocSize)
+{
+ void *pvKernLin = NULL;
+ PVRSRV_ERROR eError;
+
+ eError = _MapAlloc(psDevNode,
+ psDevPAddr,
+ uiAllocSize,
+ bFwLocalAlloc,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+ &pvKernLin);
+ if (eError != PVRSRV_OK)
+ {
+ goto map_failed;
+ }
+
+ /* NOTE: 'CachedMemSet' means the operating system default memset, which
+ * we *assume* in the LMA code will be faster, and doesn't need to
+ * worry about ARM64.
+ */
+ OSCachedMemSet(pvKernLin, 0, uiAllocSize);
+
+ _UnMapAlloc(psDevNode, uiAllocSize, bFwLocalAlloc, 0, pvKernLin);
+
+ return PVRSRV_OK;
+
+map_failed:
+ PVR_DPF((PVR_DBG_ERROR, "Failed to zero allocation"));
+ return eError;
+}
+
+static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
+static const IMG_UINT32 _AllocPoisonSize = 7;
+static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
+static const IMG_UINT32 _FreePoisonSize = 11;
+
+static PVRSRV_ERROR
+_AllocLMPageArray(PVRSRV_DEVICE_NODE *psDevNode,
+ PMR_SIZE_T uiSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pabMappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_BOOL bZero,
+ IMG_BOOL bPoisonOnAlloc,
+ IMG_BOOL bPoisonOnFree,
+ IMG_BOOL bContig,
+ IMG_BOOL bOnDemand,
+ IMG_BOOL bFwLocalAlloc,
+ PHYS_HEAP* psPhysHeap,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags,
+ PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr
+ )
+{
+ PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(!bZero || !bPoisonOnAlloc);
+
+ if (uiSize >= 0x1000000000ULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "physmem_lma.c: Do you really want 64GB of physical memory in one go? This is likely a bug"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errorOnParam;
+ }
+
+ PVR_ASSERT(OSGetPageShift() <= uiLog2PageSize);
+
+ if ((uiSize & ((1ULL << uiLog2PageSize) - 1)) != 0)
+ {
+ eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ goto errorOnParam;
+ }
+
+ psPageArrayData = OSAllocZMem(sizeof(PMR_LMALLOCARRAY_DATA));
+ if (psPageArrayData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errorOnAllocArray;
+ }
+
+ if (bContig)
+ {
+ /*
+ Some allocations require kernel mappings in which case in order
+ to be virtually contiguous we also have to be physically contiguous.
+ */
+ psPageArrayData->uiPagesToAlloc = psPageArrayData->uiTotalNumPages = 1;
+ psPageArrayData->uiAllocSize = TRUNCATE_64BITS_TO_32BITS(uiSize);
+ psPageArrayData->uiLog2AllocSize = uiLog2PageSize;
+ }
+ else
+ {
+ IMG_UINT32 uiNumPages;
+
+ /* Use of cast below is justified by the assertion that follows to
+ prove that no significant bits have been truncated */
+ uiNumPages = (IMG_UINT32)(((uiSize-1)>>uiLog2PageSize) + 1);
+ PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2PageSize) == uiSize);
+ psPageArrayData->uiTotalNumPages = uiNumPages;
+ if((1 == ui32NumPhysChunks) && (1 == ui32NumVirtChunks))
+ {
+ psPageArrayData->uiPagesToAlloc = uiNumPages;
+ }else{
+ psPageArrayData->uiPagesToAlloc = ui32NumPhysChunks;
+ }
+ psPageArrayData->uiAllocSize = 1 << uiLog2PageSize;
+ psPageArrayData->uiLog2AllocSize = uiLog2PageSize;
+ }
+ psPageArrayData->psDevNode = psDevNode;
+ psPageArrayData->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR)*
+ psPageArrayData->uiTotalNumPages);
+ if (psPageArrayData->pasDevPAddr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errorOnAllocAddr;
+ }
+
+ OSCachedMemSet(&psPageArrayData->pasDevPAddr[0], INVALID_PAGE, sizeof(IMG_DEV_PHYADDR)*
+ psPageArrayData->uiTotalNumPages);
+
+ psPageArrayData->iNumPagesAllocated = 0;
+ psPageArrayData->bZeroOnAlloc = bZero;
+ psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
+ psPageArrayData->bPoisonOnFree = bPoisonOnFree;
+ psPageArrayData->bHasLMPages = IMG_FALSE;
+ psPageArrayData->bOnDemand = bOnDemand;
+ psPageArrayData->bFwLocalAlloc = bFwLocalAlloc;
+ psPageArrayData->psPhysHeap = psPhysHeap;
+ psPageArrayData->uiAllocFlags = uiAllocFlags;
+
+ *ppsPageArrayDataPtr = psPageArrayData;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow:
+ */
+
+errorOnAllocAddr:
+ OSFreeMem(psPageArrayData);
+
+errorOnAllocArray:
+errorOnParam:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+static PVRSRV_ERROR
+_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable)
+{
+ PVRSRV_ERROR eError;
+ RA_BASE_T uiCardAddr;
+ RA_LENGTH_T uiActualSize;
+ IMG_UINT32 i,ui32Index=0;
+ IMG_UINT32 uiAllocSize;
+ IMG_UINT32 uiLog2AllocSize;
+ IMG_UINT32 uiRegionId;
+ PVRSRV_DEVICE_NODE *psDevNode;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bZeroOnAlloc;
+ RA_ARENA *pArena;
+
+ PVR_ASSERT(NULL != psPageArrayData);
+ PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+
+ uiAllocSize = psPageArrayData->uiAllocSize;
+ uiLog2AllocSize = psPageArrayData->uiLog2AllocSize;
+ psDevNode = psPageArrayData->psDevNode;
+ bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc;
+ bZeroOnAlloc = psPageArrayData->bZeroOnAlloc;
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ if (psPageArrayData->bFwLocalAlloc)
+ {
+ PVR_ASSERT(psDevNode->uiKernelFwRAIdx < RGXFW_NUM_OS);
+ pArena = psDevNode->psKernelFwMemArena[psDevNode->uiKernelFwRAIdx];
+ psDevNode->uiKernelFwRAIdx = 0;
+ }
+ else
+#endif
+ {
+ /* Get suitable local memory region for this allocation */
+ uiRegionId = PhysHeapGetRegionId(psPageArrayData->psPhysHeap, psPageArrayData->uiAllocFlags);
+
+ PVR_ASSERT(uiRegionId < psDevNode->ui32NumOfLocalMemArenas);
+ pArena = psDevNode->apsLocalDevMemArenas[uiRegionId];
+ }
+
+ if(psPageArrayData->uiTotalNumPages < (psPageArrayData->iNumPagesAllocated + psPageArrayData->uiPagesToAlloc))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Pages requested to allocate larger than original PMR alloc Size"));
+ eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+ return eError;
+ }
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ {
+ IMG_UINT32 ui32OSid=0, ui32OSidReg=0;
+ IMG_BOOL bOSidAxiProt;
+ IMG_PID pId;
+
+ pId=OSGetCurrentClientProcessIDKM();
+ RetrieveOSidsfromPidList(pId, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+ pArena=psDevNode->psOSidSubArena[ui32OSid];
+ PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Giving from OS slot %d",ui32OSid));
+ }
+#endif
+
+ psPageArrayData->psArena = pArena;
+
+ for(i=0;i<psPageArrayData->uiPagesToAlloc;i++)
+ {
+
+ /*This part of index finding should happen before allocating page. Just avoiding intricate paths */
+ if(psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
+ {
+ ui32Index = i;
+ }
+ else
+ {
+ if(NULL == pui32MapTable)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"Mapping table cannot be null"));
+ eError = PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY;
+ goto errorOnRAAlloc;
+ }
+
+ ui32Index = pui32MapTable[i];
+ if(ui32Index >= psPageArrayData->uiTotalNumPages)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Page alloc request Index out of bounds for PMR @0x%p",__func__, psPageArrayData));
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto errorOnRAAlloc;
+ }
+
+ if(INVALID_PAGE != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"Mapping already exists"));
+ eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
+ goto errorOnRAAlloc;
+ }
+ }
+
+ eError = RA_Alloc(pArena,
+ uiAllocSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, /* No flags */
+ 1ULL << uiLog2AllocSize,
+ "LMA_Page_Alloc",
+ &uiCardAddr,
+ &uiActualSize,
+ NULL); /* No private handle */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Address: %llu \n",uiCardAddr));
+}
+#endif
+
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to Allocate the page @index:%d",ui32Index));
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+ goto errorOnRAAlloc;
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ /* Allocation is done a page at a time */
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiActualSize);
+#else
+ {
+ IMG_CPU_PHYADDR sLocalCpuPAddr;
+
+ sLocalCpuPAddr.uiAddr = (IMG_UINT64)uiCardAddr;
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ NULL,
+ sLocalCpuPAddr,
+ uiActualSize,
+ NULL);
+ }
+#endif
+#endif
+
+ psPageArrayData->pasDevPAddr[ui32Index].uiAddr = uiCardAddr;
+ if (bPoisonOnAlloc)
+ {
+ eError = _PoisonAlloc(psDevNode,
+ &psPageArrayData->pasDevPAddr[i],
+ psPageArrayData->bFwLocalAlloc,
+ uiAllocSize,
+ _AllocPoison,
+ _AllocPoisonSize);
+ if (eError !=PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to poison the page"));
+ goto errorOnPoison;
+ }
+ }
+
+ if (bZeroOnAlloc)
+ {
+ eError = _ZeroAlloc(psDevNode,
+ &psPageArrayData->pasDevPAddr[i],
+ psPageArrayData->bFwLocalAlloc,
+ uiAllocSize);
+ if (eError !=PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to zero the page"));
+ goto errorOnZero;
+ }
+ }
+ }
+ psPageArrayData->iNumPagesAllocated += psPageArrayData->uiPagesToAlloc;
+ if(psPageArrayData->iNumPagesAllocated)
+ {
+ psPageArrayData->bHasLMPages = IMG_TRUE;
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow:
+ */
+errorOnZero:
+errorOnPoison:
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+errorOnRAAlloc:
+PVR_DPF((PVR_DBG_ERROR,
+ "%s: alloc_pages failed to honour request %d @index: %d of %d pages: (%s)",__func__,
+ ui32Index,
+ i,
+ psPageArrayData->uiPagesToAlloc,
+ PVRSRVGetErrorStringKM(eError)));
+ while (--i < psPageArrayData->uiPagesToAlloc)
+ {
+ if(psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
+ {
+ ui32Index = i;
+ }
+ else
+ {
+ if(NULL != pui32MapTable)
+ ui32Index = pui32MapTable[i];
+ }
+
+ if(ui32Index < psPageArrayData->uiTotalNumPages)
+ {
+ RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+ psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE;
+ }
+ }
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static PVRSRV_ERROR
+_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+{
+ OSFreeMem(psPageArrayData->pasDevPAddr);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "physmem_lma.c: freed local memory array structure for PMR @0x%p", psPageArrayData));
+
+ OSFreeMem(psPageArrayData);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData,IMG_UINT32 *pui32FreeIndices, IMG_UINT32 ui32FreePageCount)
+{
+ IMG_UINT32 uiAllocSize;
+ IMG_UINT32 i,ui32PagesToFree=0,ui32PagesFreed=0,ui32Index=0;
+ RA_ARENA *pArena = psPageArrayData->psArena;
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ PVRSRV_DEVICE_NODE *psDevNode = psPageArrayData->psDevNode;
+ if (psPageArrayData->bFwLocalAlloc)
+ {
+ PVR_ASSERT(psDevNode->uiKernelFwRAIdx < RGXFW_NUM_OS);
+ pArena = psDevNode->psKernelFwMemArena[psDevNode->uiKernelFwRAIdx];
+ psDevNode->uiKernelFwRAIdx = 0;
+ }
+#endif
+
+ PVR_ASSERT(psPageArrayData->bHasLMPages);
+
+ uiAllocSize = psPageArrayData->uiAllocSize;
+
+ ui32PagesToFree = (NULL == pui32FreeIndices)?psPageArrayData->uiTotalNumPages:ui32FreePageCount;
+
+ for (i = 0;i < ui32PagesToFree;i++)
+ {
+ if(NULL == pui32FreeIndices)
+ {
+ ui32Index = i;
+ }
+ else
+ {
+ ui32Index = pui32FreeIndices[i];
+ }
+
+ if (INVALID_PAGE != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
+ {
+ ui32PagesFreed++;
+ if (psPageArrayData->bPoisonOnFree)
+ {
+ _PoisonAlloc(psPageArrayData->psDevNode,
+ &psPageArrayData->pasDevPAddr[ui32Index],
+ psPageArrayData->bFwLocalAlloc,
+ uiAllocSize,
+ _FreePoison,
+ _FreePoisonSize);
+ }
+
+ RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ /* Allocation is done a page at a time */
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiAllocSize);
+#else
+ {
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+ }
+#endif
+#endif
+ psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE;
+ }
+ }
+ psPageArrayData->iNumPagesAllocated -= ui32PagesFreed;
+
+ PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+
+ if(0 == psPageArrayData->iNumPagesAllocated)
+ {
+ psPageArrayData->bHasLMPages = IMG_FALSE;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: freed %d local memory for PMR @0x%p",__func__,(ui32PagesFreed*uiAllocSize), psPageArrayData));
+ return PVRSRV_OK;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* destructor func is called after last reference disappears, but
+ before PMR itself is freed. */
+static PVRSRV_ERROR
+PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv
+ )
+{
+ PVRSRV_ERROR eError;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+
+ psLMAllocArrayData = pvPriv;
+
+ /* We can't free pages until now. */
+ if (psLMAllocArrayData->bHasLMPages)
+ {
+ eError = _FreeLMPages(psLMAllocArrayData,NULL,0);
+ PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+ }
+
+ eError = _FreeLMPageArray(psLMAllocArrayData);
+ PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+
+ return PVRSRV_OK;
+}
+
+/* callback function for locking the system physical page addresses.
+ As we are LMA there is nothing to do as we control physical memory. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+
+ PVRSRV_ERROR eError;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+ psLMAllocArrayData = pvPriv;
+
+ if (psLMAllocArrayData->bOnDemand)
+ {
+ /* Allocate Memory for deferred allocation */
+ eError = _AllocLMPages(psLMAllocArrayData, NULL);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv
+ )
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+ psLMAllocArrayData = pvPriv;
+
+ if (psLMAllocArrayData->bOnDemand)
+ {
+ /* Free Memory for deferred allocation */
+ eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+ return eError;
+}
+
+/* N.B. It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ IMG_UINT32 idx;
+ IMG_UINT32 uiLog2AllocSize;
+ IMG_UINT32 uiNumAllocs;
+ IMG_UINT64 uiAllocIndex;
+ IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
+
+ if (psLMAllocArrayData->uiLog2AllocSize < ui32Log2PageSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Requested physical addresses from PMR "
+ "for incompatible contiguity %u!",
+ __FUNCTION__,
+ ui32Log2PageSize));
+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+ }
+
+ uiNumAllocs = psLMAllocArrayData->uiTotalNumPages;
+ if (uiNumAllocs > 1)
+ {
+ PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+ uiLog2AllocSize = psLMAllocArrayData->uiLog2AllocSize;
+
+ for (idx=0; idx < ui32NumOfPages; idx++)
+ {
+ if (pbValid[idx])
+ {
+ uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize;
+ uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize);
+
+ PVR_ASSERT(uiAllocIndex < uiNumAllocs);
+ PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize));
+
+ psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[uiAllocIndex].uiAddr + uiInAllocOffset;
+ }
+ }
+ }
+ else
+ {
+ for (idx=0; idx < ui32NumOfPages; idx++)
+ {
+ if (pbValid[idx])
+ {
+ psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[0].uiAddr + puiOffset[idx];
+ }
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ size_t uiOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ IMG_HANDLE *phHandleOut,
+ PMR_FLAGS_T ulFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+ void *pvKernLinAddr = NULL;
+ IMG_UINT32 ui32PageIndex = 0;
+ size_t uiOffsetMask = uiOffset;
+
+ psLMAllocArrayData = pvPriv;
+
+ /* Check that we can map this in contiguously */
+ if (psLMAllocArrayData->uiTotalNumPages != 1)
+ {
+ size_t uiStart = uiOffset;
+ size_t uiEnd = uiOffset + uiSize - 1;
+ size_t uiPageMask = ~((1 << psLMAllocArrayData->uiLog2AllocSize) - 1);
+
+ /* We can still map if only one page is required */
+ if ((uiStart & uiPageMask) != (uiEnd & uiPageMask))
+ {
+ eError = PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+ goto e0;
+ }
+
+ /* Locate the desired physical page to map in */
+ ui32PageIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+ uiOffsetMask = (1U << psLMAllocArrayData->uiLog2AllocSize) - 1;
+ }
+
+ PVR_ASSERT(ui32PageIndex < psLMAllocArrayData->uiTotalNumPages);
+
+ eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+ &psLMAllocArrayData->pasDevPAddr[ui32PageIndex],
+ psLMAllocArrayData->uiAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ ulFlags,
+ &pvKernLinAddr);
+
+ *ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + (uiOffset & uiOffsetMask);
+ *phHandleOut = pvKernLinAddr;
+
+ return eError;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE hHandle)
+{
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+ void *pvKernLinAddr = NULL;
+
+ psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv;
+ pvKernLinAddr = (void *) hHandle;
+
+ _UnMapAlloc(psLMAllocArrayData->psDevNode,
+ psLMAllocArrayData->uiAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ 0,
+ pvKernLinAddr);
+}
+
+
+static PVRSRV_ERROR
+CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes,
+ void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer,
+ IMG_UINT8 *pcPMR,
+ size_t uiSize))
+{
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+ size_t uiBytesCopied;
+ size_t uiBytesToCopy;
+ size_t uiBytesCopyableFromAlloc;
+ void *pvMapping = NULL;
+ IMG_UINT8 *pcKernelPointer = NULL;
+ size_t uiBufferOffset;
+ IMG_UINT64 uiAllocIndex;
+ IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+ PVRSRV_ERROR eError;
+
+ psLMAllocArrayData = pvPriv;
+
+ uiBytesCopied = 0;
+ uiBytesToCopy = uiBufSz;
+ uiBufferOffset = 0;
+
+ if (psLMAllocArrayData->uiTotalNumPages > 1)
+ {
+ while (uiBytesToCopy > 0)
+ {
+ /* we have to map one alloc in at a time */
+ PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+ uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+ uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2AllocSize);
+ uiBytesCopyableFromAlloc = uiBytesToCopy;
+ if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2AllocSize))
+ {
+ uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2AllocSize)-uiInAllocOffset);
+ }
+
+ PVR_ASSERT(uiBytesCopyableFromAlloc != 0);
+ PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumPages);
+ PVR_ASSERT(uiInAllocOffset < (1ULL << psLMAllocArrayData->uiLog2AllocSize));
+
+ eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+ &psLMAllocArrayData->pasDevPAddr[uiAllocIndex],
+ psLMAllocArrayData->uiAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+ &pvMapping);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ pcKernelPointer = pvMapping;
+ pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc);
+
+ _UnMapAlloc(psLMAllocArrayData->psDevNode,
+ psLMAllocArrayData->uiAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ 0,
+ pvMapping);
+
+ uiBufferOffset += uiBytesCopyableFromAlloc;
+ uiBytesToCopy -= uiBytesCopyableFromAlloc;
+ uiOffset += uiBytesCopyableFromAlloc;
+ uiBytesCopied += uiBytesCopyableFromAlloc;
+ }
+ }
+ else
+ {
+ PVR_ASSERT((uiOffset + uiBufSz) <= psLMAllocArrayData->uiAllocSize);
+ PVR_ASSERT(psLMAllocArrayData->uiAllocSize != 0);
+ eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+ &psLMAllocArrayData->pasDevPAddr[0],
+ psLMAllocArrayData->uiAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+ &pvMapping);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ pcKernelPointer = pvMapping;
+ pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz);
+
+ _UnMapAlloc(psLMAllocArrayData->psDevNode,
+ psLMAllocArrayData->uiAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ 0,
+ pvMapping);
+
+ uiBytesCopied = uiBufSz;
+ }
+ *puiNumBytes = uiBytesCopied;
+ return PVRSRV_OK;
+e0:
+ *puiNumBytes = uiBytesCopied;
+ return eError;
+}
+
+static void ReadLocalMem(IMG_UINT8 *pcBuffer,
+ IMG_UINT8 *pcPMR,
+ size_t uiSize)
+{
+ /* NOTE: 'CachedMemCopy' means the operating system default memcpy, which
+ * we *assume* in the LMA code will be faster, and doesn't need to
+ * worry about ARM64.
+ */
+ OSCachedMemCopy(pcBuffer, pcPMR, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ return CopyBytesLocalMem(pvPriv,
+ uiOffset,
+ pcBuffer,
+ uiBufSz,
+ puiNumBytes,
+ ReadLocalMem);
+}
+
+static void WriteLocalMem(IMG_UINT8 *pcBuffer,
+ IMG_UINT8 *pcPMR,
+ size_t uiSize)
+{
+ /* NOTE: 'CachedMemCopy' means the operating system default memcpy, which
+ * we *assume* in the LMA code will be faster, and doesn't need to
+ * worry about ARM64.
+ */
+ OSCachedMemCopy(pcPMR, pcBuffer, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ return CopyBytesLocalMem(pvPriv,
+ uiOffset,
+ pcBuffer,
+ uiBufSz,
+ puiNumBytes,
+ WriteLocalMem);
+}
+
+/*************************************************************************/ /*!
+@Function PMRChangeSparseMemLocalMem
+@Description This function Changes the sparse mapping by allocating & freeing
+ of pages. It does also change the GPU maps accordingly
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ IMG_UINT32 ui32AdtnlAllocPages = 0;
+ IMG_UINT32 ui32AdtnlFreePages = 0;
+ IMG_UINT32 ui32CommonRequstCount = 0;
+ IMG_UINT32 ui32Loop = 0;
+ IMG_UINT32 ui32Index = 0;
+ IMG_UINT32 uiAllocpgidx;
+ IMG_UINT32 uiFreepgidx;
+
+ PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
+ IMG_DEV_PHYADDR sPhyAddr;
+
+#if defined(DEBUG)
+ IMG_BOOL bPoisonFail = IMG_FALSE;
+ IMG_BOOL bZeroFail = IMG_FALSE;
+#endif
+
+ /* Fetch the Page table array represented by the PMR */
+ IMG_DEV_PHYADDR *psPageArray = psPMRPageArrayData->pasDevPAddr;
+ PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR);
+
+ /* The incoming request is classified into two operations independent of
+ * each other: alloc & free pages.
+ * These operations can be combined with two mapping operations as well
+ * which are GPU & CPU space mappings.
+ *
+ * From the alloc and free page requests, the net amount of pages to be
+ * allocated or freed is computed. Pages that were requested to be freed
+ * will be reused to fulfil alloc requests.
+ *
+ * The order of operations is:
+ * 1. Allocate new pages from the OS
+ * 2. Move the free pages from free request to alloc positions.
+ * 3. Free the rest of the pages not used for alloc
+ *
+ * Alloc parameters are validated at the time of allocation
+ * and any error will be handled then. */
+
+ if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
+ {
+ ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ?
+ ui32FreePageCount : ui32AllocPageCount;
+
+ PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
+ }
+
+ if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
+ {
+ ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount;
+ }
+ else
+ {
+ ui32AllocPageCount = 0;
+ }
+
+ if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
+ {
+ ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount;
+ }
+ else
+ {
+ ui32FreePageCount = 0;
+ }
+
+ if (0 == (ui32CommonRequstCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return eError;
+ }
+
+ {
+ /* Validate the free page indices */
+ if (ui32FreePageCount)
+ {
+ if (NULL != pai32FreeIndices)
+ {
+ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+ {
+ uiFreepgidx = pai32FreeIndices[ui32Loop];
+
+ if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ if (INVALID_PAGE == psPageArray[uiFreepgidx].uiAddr)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ }
+ }else{
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return eError;
+ }
+ }
+
+ /*The following block of code verifies any issues with common alloc page indices */
+ for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
+ {
+ uiAllocpgidx = pai32AllocIndices[ui32Loop];
+ if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+ {
+ if ((INVALID_PAGE != psPageArray[uiAllocpgidx].uiAddr) ||
+ (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ }
+ else
+ {
+ if ((INVALID_PAGE == psPageArray[uiAllocpgidx].uiAddr) ||
+ (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ }
+ }
+
+
+ ui32Loop = 0;
+
+ /* Allocate new pages */
+ if (0 != ui32AdtnlAllocPages)
+ {
+ /* Say how many pages to allocate */
+ psPMRPageArrayData->uiPagesToAlloc = ui32AdtnlAllocPages;
+
+ eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: New Addtl Allocation of pages failed",
+ __FUNCTION__));
+ goto e0;
+ }
+
+ /* Mark the corresponding pages of translation table as valid */
+ for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
+ {
+ psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
+ }
+ }
+
+ ui32Index = ui32Loop;
+
+ /* Move the corresponding free pages to alloc request */
+ for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++)
+ {
+
+ uiAllocpgidx = pai32AllocIndices[ui32Index];
+ uiFreepgidx = pai32FreeIndices[ui32Loop];
+ sPhyAddr = psPageArray[uiAllocpgidx];
+ psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
+
+ /* Is remap mem used in real world scenario? Should it be turned to a
+ * debug feature? The condition check needs to be out of loop, will be
+ * done at later point though after some analysis */
+ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+ {
+ psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
+ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+ psPageArray[uiFreepgidx].uiAddr = INVALID_PAGE;
+ }
+ else
+ {
+ psPageArray[uiFreepgidx] = sPhyAddr;
+ psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
+ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+ }
+
+ /* Be sure to honour the attributes associated with the allocation
+ * such as zeroing, poisoning etc. */
+ if (psPMRPageArrayData->bPoisonOnAlloc)
+ {
+ eError = _PoisonAlloc(psPMRPageArrayData->psDevNode,
+ &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
+ psPMRPageArrayData->bFwLocalAlloc,
+ psPMRPageArrayData->uiAllocSize,
+ _AllocPoison,
+ _AllocPoisonSize);
+
+ /* Consider this as a soft failure and go ahead but log error to kernel log */
+ if (eError != PVRSRV_OK)
+ {
+#if defined(DEBUG)
+ bPoisonFail = IMG_TRUE;
+#endif
+ }
+ }
+ else
+ {
+ if (psPMRPageArrayData->bZeroOnAlloc)
+ {
+ eError = _ZeroAlloc(psPMRPageArrayData->psDevNode,
+ &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
+ psPMRPageArrayData->bFwLocalAlloc,
+ psPMRPageArrayData->uiAllocSize);
+ /* Consider this as a soft failure and go ahead but log error to kernel log */
+ if (eError != PVRSRV_OK)
+ {
+#if defined(DEBUG)
+ /*Don't think we need to zero any pages further*/
+ bZeroFail = IMG_TRUE;
+#endif
+ }
+ }
+ }
+ }
+
+ /*Free the additional free pages */
+ if (0 != ui32AdtnlFreePages)
+ {
+ ui32Index = ui32Loop;
+ _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages);
+ ui32Loop = 0;
+
+ while(ui32Loop++ < ui32AdtnlFreePages)
+ {
+ /*Set the corresponding mapping table entry to invalid address */
+ psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID;
+ }
+ }
+
+ }
+
+#if defined(DEBUG)
+ if(IMG_TRUE == bPoisonFail)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the page", __FUNCTION__));
+ }
+
+ if(IMG_TRUE == bZeroFail)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the page", __FUNCTION__));
+ }
+#endif
+
+ /* Update the PMR memory holding information */
+ eError = PVRSRV_OK;
+
+e0:
+ return eError;
+
+}
+
+/*************************************************************************/ /*!
+@Function PMRChangeSparseMemCPUMapLocalMem
+@Description This function Changes CPU maps accordingly
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR PMRChangeSparseMemCPUMapLocalMem(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices)
+{
+ IMG_DEV_PHYADDR *psPageArray;
+ PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
+ uintptr_t sCpuVABase = sCpuVAddrBase;
+ IMG_CPU_PHYADDR sCpuAddrPtr;
+ IMG_BOOL bValid;
+
+ /*Get the base address of the heap */
+ PMR_CpuPhysAddr(psPMR,
+ psPMRPageArrayData->uiLog2AllocSize,
+ 1,
+ 0, /* offset zero here mean first page in the PMR */
+ &sCpuAddrPtr,
+ &bValid);
+
+ /* Phys address of heap is computed here by subtracting the offset of this page
+ * basically phys address of any page = Base address of heap + offset of the page */
+ sCpuAddrPtr.uiAddr -= psPMRPageArrayData->pasDevPAddr[0].uiAddr;
+ psPageArray = psPMRPageArrayData->pasDevPAddr;
+
+ return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
+ sCpuVABase,
+ sCpuAddrPtr,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ IMG_TRUE);
+}
+
+
+static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = {
+ /* pfnLockPhysAddresses */
+ &PMRLockSysPhysAddressesLocalMem,
+ /* pfnUnlockPhysAddresses */
+ &PMRUnlockSysPhysAddressesLocalMem,
+ /* pfnDevPhysAddr */
+ &PMRSysPhysAddrLocalMem,
+ /* pfnAcquireKernelMappingData */
+ &PMRAcquireKernelMappingDataLocalMem,
+ /* pfnReleaseKernelMappingData */
+ &PMRReleaseKernelMappingDataLocalMem,
+#if defined(INTEGRITY_OS)
+ /* pfnMapMemoryObject */
+ NULL,
+ /* pfnUnmapMemoryObject */
+ NULL,
+#endif
+ /* pfnReadBytes */
+ &PMRReadBytesLocalMem,
+ /* pfnWriteBytes */
+ &PMRWriteBytesLocalMem,
+ /* .pfnUnpinMem */
+ NULL,
+ /* .pfnPinMem */
+ NULL,
+ /* pfnChangeSparseMem*/
+ &PMRChangeSparseMemLocalMem,
+ /* pfnChangeSparseMemCPUMap */
+ &PMRChangeSparseMemCPUMapLocalMem,
+ /* pfnMMap */
+ NULL,
+ /* pfnFinalize */
+ &PMRFinalizeLocalMem
+};
+
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ PMR **ppsPMRPtr)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ PMR *psPMR = NULL;
+ PMR_LMALLOCARRAY_DATA *psPrivData = NULL;
+ PMR_FLAGS_T uiPMRFlags;
+ PHYS_HEAP *psPhysHeap;
+ IMG_BOOL bZero;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bPoisonOnFree;
+ IMG_BOOL bOnDemand;
+ IMG_BOOL bContig;
+ IMG_BOOL bFwLocalAlloc;
+ IMG_BOOL bCpuLocalAlloc;
+
+ if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) &&
+ (ui32NumPhysChunks == ui32NumVirtChunks))
+ {
+ bContig = IMG_TRUE;
+ }
+ else
+ {
+ bContig = IMG_FALSE;
+ }
+
+ bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bFwLocalAlloc = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bCpuLocalAlloc = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
+
+ if (bZero && bPoisonOnAlloc)
+ {
+ /* Zero on Alloc and Poison on Alloc are mutually exclusive */
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errorOnParam;
+ }
+
+ /* Silently round up alignment/pagesize if request was less that
+ PAGE_SHIFT, because it would never be harmful for memory to be
+ _more_ contiguous that was desired */
+
+ uiLog2PageSize = OSGetPageShift() > uiLog2PageSize
+ ? OSGetPageShift()
+ : uiLog2PageSize;
+
+ /* In case we have a non-sparse allocation tolerate bad requests and round up.
+ * For sparse allocations the users have to make sure to meet the right
+ * requirements. */
+ if (ui32NumPhysChunks == ui32NumVirtChunks &&
+ ui32NumVirtChunks == 1)
+ {
+ /* Round up allocation size to at least a full OSGetPageSize() */
+ uiSize = PVR_ALIGN(uiSize, OSGetPageSize());
+ uiChunkSize = uiSize;
+ }
+
+ if (bFwLocalAlloc)
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+ }
+ else if (bCpuLocalAlloc)
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL];
+ }
+ else
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+ }
+
+ /* Create Array structure that holds the physical pages */
+ eError = _AllocLMPageArray(psDevNode,
+ uiChunkSize * ui32NumVirtChunks,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2PageSize,
+ bZero,
+ bPoisonOnAlloc,
+ bPoisonOnFree,
+ bContig,
+ bOnDemand,
+ bFwLocalAlloc,
+ psPhysHeap,
+ uiFlags,
+ &psPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnAllocPageArray;
+ }
+
+
+ if (!bOnDemand)
+ {
+ /* Allocate the physical pages */
+ eError = _AllocLMPages(psPrivData,pui32MappingTable);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnAllocPages;
+ }
+ }
+
+ /* In this instance, we simply pass flags straight through.
+
+ Generically, uiFlags can include things that control the PMR
+ factory, but we don't need any such thing (at the time of
+ writing!), and our caller specifies all PMR flags so we don't
+ need to meddle with what was given to us.
+ */
+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+ /* check no significant bits were lost in cast due to different
+ bit widths for flags */
+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+ if (bOnDemand)
+ {
+ PDUMPCOMMENT("Deferred Allocation PMR (LMA)");
+ }
+
+
+ eError = PMRCreatePMR(psDevNode,
+ psPhysHeap,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2PageSize,
+ uiPMRFlags,
+ pszAnnotation,
+ &_sPMRLMAFuncTab,
+ psPrivData,
+ PMR_TYPE_LMA,
+ &psPMR,
+ IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewLocalRamBackedPMR: Unable to create PMR (status=%d)", eError));
+ goto errorOnCreate;
+ }
+
+ *ppsPMRPtr = psPMR;
+ return PVRSRV_OK;
+
+errorOnCreate:
+ if(!bOnDemand && psPrivData->bHasLMPages)
+ {
+ eError2 = _FreeLMPages(psPrivData, NULL,0);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+ }
+
+errorOnAllocPages:
+ eError2 = _FreeLMPageArray(psPrivData);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+
+struct PidOSidCouplingList
+{
+ IMG_PID pId;
+ IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32OSidReg;
+ IMG_BOOL bOSidAxiProt;
+
+ struct PidOSidCouplingList *psNext;
+};
+typedef struct PidOSidCouplingList PidOSidCouplingList;
+
+static PidOSidCouplingList *psPidOSidHead=NULL;
+static PidOSidCouplingList *psPidOSidTail=NULL;
+
+void InsertPidOSidsCoupling(IMG_PID pId, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt)
+{
+ PidOSidCouplingList *psTmp;
+
+ PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Inserting (PID/ OSid/ OSidReg/ IsSecure) (%d/ %d/ %d/ %s) into list",
+ pId,ui32OSid, ui32OSidReg, (bOSidAxiProt)?"Yes":"No"));
+
+ psTmp=OSAllocMem(sizeof(PidOSidCouplingList));
+
+ if (psTmp==NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"(GPU Virtualization Validation): Memory allocation failed. No list insertion => program will execute normally.\n"));
+ return ;
+ }
+
+ psTmp->pId=pId;
+ psTmp->ui32OSid=ui32OSid;
+ psTmp->ui32OSidReg=ui32OSidReg;
+ psTmp->bOSidAxiProt = bOSidAxiProt;
+
+ psTmp->psNext=NULL;
+ if (psPidOSidHead==NULL)
+ {
+ psPidOSidHead=psTmp;
+ psPidOSidTail=psTmp;
+ }
+ else
+ {
+ psPidOSidTail->psNext=psTmp;
+ psPidOSidTail=psTmp;
+ }
+
+ return ;
+}
+
+void RetrieveOSidsfromPidList(IMG_PID pId, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt)
+{
+ PidOSidCouplingList *psTmp;
+
+ for (psTmp=psPidOSidHead;psTmp!=NULL;psTmp=psTmp->psNext)
+ {
+ if (psTmp->pId==pId)
+ {
+ (*pui32OSid) = psTmp->ui32OSid;
+ (*pui32OSidReg) = psTmp->ui32OSidReg;
+ (*pbOSidAxiProt) = psTmp->bOSidAxiProt;
+
+ return ;
+ }
+ }
+
+ (*pui32OSid)=0;
+ (*pui32OSidReg)=0;
+ (*pbOSidAxiProt) = IMG_FALSE;
+
+ return ;
+}
+
+void RemovePidOSidCoupling(IMG_PID pId)
+{
+ PidOSidCouplingList *psTmp, *psPrev=NULL;
+
+ for (psTmp=psPidOSidHead; psTmp!=NULL; psTmp=psTmp->psNext)
+ {
+ if (psTmp->pId==pId) break;
+ psPrev=psTmp;
+ }
+
+ if (psTmp==NULL)
+ {
+ return ;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Deleting Pairing %d / (%d - %d) from list",psTmp->pId, psTmp->ui32OSid, psTmp->ui32OSidReg));
+
+ if (psTmp==psPidOSidHead)
+ {
+ if (psPidOSidHead->psNext==NULL)
+ {
+ psPidOSidHead=NULL;
+ psPidOSidTail=NULL;
+ OSFreeMem(psTmp);
+
+ return ;
+ }
+
+ psPidOSidHead=psPidOSidHead->psNext;
+ OSFreeMem(psTmp);
+ return ;
+ }
+
+ if (psPrev==NULL) return ;
+
+ psPrev->psNext=psTmp->psNext;
+ if (psTmp==psPidOSidTail)
+ {
+ psPidOSidTail=psPrev;
+ }
+
+ OSFreeMem(psTmp);
+
+ return ;
+}
+
+#endif
+
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Header for local card memory allocator
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks for local card memory.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PHYSMEM_LMA_H_
+#define _SRVSRV_PHYSMEM_LMA_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*
+ * PhysmemNewLocalRamBackedPMR
+ *
+ * This function will create a PMR using the local card memory and is OS
+ * agnostic.
+ */
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ PMR **ppsPMRPtr);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+ * Define some helper list functions for the virtualization validation code
+ */
+
+void InsertPidOSidsCoupling(IMG_PID pId, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt);
+void RetrieveOSidsfromPidList(IMG_PID pId, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt);
+void RemovePidOSidCoupling(IMG_PID pId);
+#endif
+
+#endif /* #ifndef _SRVSRV_PHYSMEM_LMA_H_ */
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title PMR implementation of OS derived physical memory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is
+ responsible for the an implementation of the "PMR"
+ abstraction. This interface is for the
+ PhysmemNewOSRamBackedPMR() "PMR Factory" which is
+ responsible for claiming chunks of memory (in
+ particular physically contiguous quanta) from the
+ Operating System.
+
+ As such, this interface will be implemented on a
+ Per-OS basis, in the "env" directory for that system.
+ A dummy implementation is available in
+ physmem_osmem_dummy.c for operating systems that
+ cannot, or do not wish to, offer this functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#ifndef _SRVSRV_PHYSMEM_OSMEM_H_
+#define _SRVSRV_PHYSMEM_OSMEM_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*************************************************************************/ /*!
+@Function PhysmemNewOSRamBackedPMR
+@Description Rogue Services will call this function to allocate GPU device
+ memory from the PMR factory supported by the OS DDK port. This
+ factory typically obtains physical memory from the kernel/OS
+ API that allocates memory from the default heap of shared system
+ memory available on the platform. The allocated memory must be
+ page-aligned and be a whole number of pages.
+ After allocating the required memory, the implementation must
+ then call PMRCreatePMR() to obtain the PMR structure that
+ describes this allocation to the upper layers of the Services.
+ memory management sub-system.
+ NB. Implementation of this function is mandatory. If shared
+ system memory is not to be used in the OS port then the
+ implementation must return PVRSRV_ERROR_NOT_SUPPORTED.
+
+@Input psDevNode the device node
+@Input uiSize the size of the allocation
+ (must be a multiple of page size)
+@Input uiChunkSize when sparse allocations are requested,
+ this is the allocated chunk size.
+ For regular allocations, this will be
+ the same as uiSize.
+ (must be a multiple of page size)
+@Input ui32NumPhysChunks when sparse allocations are requested,
+ this is the number of physical chunks
+ to be allocated.
+ For regular allocations, this will be 1.
+@Input ui32NumVirtChunks when sparse allocations are requested,
+ this is the number of virtual chunks
+ covering the sparse allocation.
+ For regular allocations, this will be 1.
+@Input pui32MappingTable when sparse allocations are requested,
+ this is the list of the indices of
+ each physically-backed virtual chunk
+ For regular allocations, this will
+ be NULL.
+@Input uiLog2PageSize the physical pagesize in log2(bytes).
+@Input uiFlags the allocation flags.
+@Input pszAnnotation string describing the PMR (for debug).
+ This should be passed into the function
+ PMRCreatePMR().
+@Output ppsPMROut pointer to the PMR created for the
+ new allocation
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ PMR **ppsPMROut);
+
+#endif /* #ifndef _SRVSRV_PHYSMEM_OSMEM_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Implementation of PMR functions for OS managed memory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks for physical memory borrowed
+ from that normally managed by the operating system.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/vmalloc.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <asm/io.h>
+#if defined(CONFIG_X86)
+#include <asm/cacheflush.h>
+#endif
+
+/* include5/ */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "rgx_pdump_panics.h"
+/* services/server/include/ */
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#include "devicemem_server_utils.h"
+
+/* ourselves */
+#include "physmem_osmem.h"
+#include "physmem_osmem_linux.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM;
+#else
+/* split_page not available on older kernels */
+#undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM
+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0
+static IMG_UINT32 g_uiMaxOrder = 0;
+#endif
+
+/* Get/Set/Mask out alloc_page/dma_alloc flag */
+#define DMA_GET_ADDR(x) (((x) >> 1) << 1)
+#define DMA_SET_ALLOCPG_ADDR(x) ((x) | ((dma_addr_t)0x01))
+#define DMA_IS_ALLOCPG_ADDR(x) ((x) & ((dma_addr_t)0x01))
+
+typedef struct _PMR_OSPAGEARRAY_DATA_ {
+ /* Device for which this allocation has been made */
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ /*
+ * iNumPagesAllocated:
+ * Number of pages allocated in this PMR so far.
+ * This allows for up to (2^31 - 1) pages. With 4KB pages, that's 8TB of memory for each PMR.
+ */
+ IMG_INT32 iNumPagesAllocated;
+
+ /*
+ * uiTotalNumPages:
+ * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
+ * number of "pages" (a.k.a. macro pages, compound pages, higher order pages, etc...)
+ */
+ IMG_UINT32 uiTotalNumPages;
+
+ /*
+ uiLog2PageSize;
+
+ size of each "page" -- this would normally be the same as
+ PAGE_SHIFT, but we support the idea that we may allocate pages
+ in larger chunks for better contiguity, using order>0 in the
+ call to alloc_pages()
+ */
+ IMG_UINT32 uiLog2DevPageSize;
+
+ /*
+ For non DMA/CMA allocation, pagearray references the pages
+ thus allocated; one entry per compound page when compound
+ pages are used. In addition, for DMA/CMA allocations, we
+ track the returned cpu virtual and device bus address.
+ */
+ struct page **pagearray;
+ dma_addr_t *dmaphysarray;
+ void **dmavirtarray;
+
+
+ /*
+ record at alloc time whether poisoning will be required when the
+ PMR is freed.
+ */
+ IMG_BOOL bZero;
+ IMG_BOOL bPoisonOnFree;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bOnDemand;
+ IMG_BOOL bUnpinned; /* Should be protected by page pool lock */
+ IMG_BOOL bIsCMA; /* Is CMA memory allocated via DMA framework */
+
+ /*
+ The cache mode of the PMR. Additionally carrying the CPU-Cache-Clean
+ flag, advising us to do cache maintenance on behalf of the caller.
+ NOTE: For DMA/CMA allocations, memory is _always_ uncached.
+
+ Boolean used to track if we need to revert the cache attributes
+ of the pages used in this allocation. Depends on OS/architecture.
+ */
+ IMG_UINT32 ui32CPUCacheFlags;
+ IMG_BOOL bUnsetMemoryType;
+} PMR_OSPAGEARRAY_DATA;
+
+/***********************************
+ * Page pooling for uncached pages *
+ ***********************************/
+
+static INLINE void
+_FreeOSPage_CMA(struct device *dev,
+ size_t alloc_size,
+ IMG_UINT32 uiOrder,
+ void *virt_addr,
+ dma_addr_t dev_addr,
+ struct page *psPage);
+
+static void
+_FreeOSPage(IMG_UINT32 uiOrder,
+ IMG_BOOL bUnsetMemoryType,
+ struct page *psPage);
+
+static PVRSRV_ERROR
+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 ui32FreePageCount);
+
+static PVRSRV_ERROR
+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
+ IMG_UINT32 *puiPagesFreed);
+
+static inline void
+_ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode,
+ struct page **ppsPage,
+ IMG_UINT32 uiNumPages,
+ IMG_BOOL bFlush);
+
+static inline PVRSRV_ERROR
+_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode,
+ struct page **ppsPage,
+ IMG_UINT32 uiNumPages,
+ IMG_BOOL bFlush,
+ IMG_UINT32 ui32CPUCacheFlags);
+
+/* A struct for our page pool holding an array of pages.
+ * We always put units of page arrays to the pool but are
+ * able to take individual pages */
+typedef struct
+{
+ /* Linkage for page pool LRU list */
+ struct list_head sPagePoolItem;
+
+ /* How many items are still in the page array */
+ IMG_UINT32 uiItemsRemaining;
+ struct page **ppsPageArray;
+
+} LinuxPagePoolEntry;
+
+/* A struct for the unpinned items */
+typedef struct
+{
+ struct list_head sUnpinPoolItem;
+ PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr;
+} LinuxUnpinEntry;
+
+/* Caches to hold page pool and page array structures */
+static struct kmem_cache *g_psLinuxPagePoolCache = NULL;
+static struct kmem_cache *g_psLinuxPageArray = NULL;
+
+/* Track what is live */
+static IMG_UINT32 g_ui32UnpinPageCount = 0;
+static IMG_UINT32 g_ui32PagePoolEntryCount = 0;
+
+/* Pool entry limits */
+#if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES)
+static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES;
+static const IMG_UINT32 g_ui32PagePoolMaxEntries_5Percent= PVR_LINUX_PHYSMEM_MAX_POOL_PAGES / 20;
+#else
+static const IMG_UINT32 g_ui32PagePoolMaxEntries = 0;
+static const IMG_UINT32 g_ui32PagePoolMaxEntries_5Percent = 0;
+#endif
+
+#if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES)
+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES;
+#else
+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = 0;
+#endif
+
+#if defined(CONFIG_X86)
+#define PHYSMEM_OSMEM_NUM_OF_POOLS 3
+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
+ PVRSRV_MEMALLOCFLAG_CPU_CACHED,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE
+};
+#else
+#define PHYSMEM_OSMEM_NUM_OF_POOLS 2
+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
+ PVRSRV_MEMALLOCFLAG_CPU_CACHED,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED
+};
+#endif
+
+/* Global structures we use to manage the page pool */
+static DEFINE_MUTEX(g_sPagePoolMutex);
+
+/* List holding the page array pointers: */
+static LIST_HEAD(g_sPagePoolList_WB);
+static LIST_HEAD(g_sPagePoolList_WC);
+static LIST_HEAD(g_sPagePoolList_UC);
+static LIST_HEAD(g_sUnpinList);
+
+static inline void
+_PagePoolLock(void)
+{
+ mutex_lock(&g_sPagePoolMutex);
+}
+
+static inline int
+_PagePoolTrylock(void)
+{
+ return mutex_trylock(&g_sPagePoolMutex);
+}
+
+static inline void
+_PagePoolUnlock(void)
+{
+ mutex_unlock(&g_sPagePoolMutex);
+}
+
+static PVRSRV_ERROR
+_AddUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
+{
+ LinuxUnpinEntry *psUnpinEntry;
+
+ psUnpinEntry = OSAllocMem(sizeof(*psUnpinEntry));
+ if (!psUnpinEntry)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OSAllocMem failed. Cannot add entry to unpin list.",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psUnpinEntry->psPageArrayDataPtr = psOSPageArrayData;
+
+ /* Add into pool that the shrinker can access easily*/
+ list_add_tail(&psUnpinEntry->sUnpinPoolItem, &g_sUnpinList);
+
+ g_ui32UnpinPageCount += psOSPageArrayData->iNumPagesAllocated;
+
+ return PVRSRV_OK;
+}
+
+static void
+_RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
+{
+ LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
+
+ /* Remove from pool */
+ list_for_each_entry_safe(psUnpinEntry,
+ psTempUnpinEntry,
+ &g_sUnpinList,
+ sUnpinPoolItem)
+ {
+ if (psUnpinEntry->psPageArrayDataPtr == psOSPageArrayData)
+ {
+ list_del(&psUnpinEntry->sUnpinPoolItem);
+ break;
+ }
+ }
+
+ OSFreeMem(psUnpinEntry);
+
+ g_ui32UnpinPageCount -= psOSPageArrayData->iNumPagesAllocated;
+}
+
+static inline IMG_BOOL
+_GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags,
+ struct list_head **ppsPoolHead)
+{
+ switch(PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+#if defined(CONFIG_X86)
+ /*
+ For x86 we need to keep different lists for uncached
+ and write-combined as we must always honour the PAT
+ setting which cares about this difference.
+ */
+
+ *ppsPoolHead = &g_sPagePoolList_WC;
+ break;
+#endif
+
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ *ppsPoolHead = &g_sPagePoolList_UC;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+ *ppsPoolHead = &g_sPagePoolList_WB;
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get pages from pool, "
+ "unknown CPU caching mode.", __func__));
+ return IMG_FALSE;
+ }
+ return IMG_TRUE;
+}
+
+static struct shrinker g_sShrinker;
+
+/* Returning the number of pages that still reside in the page pool.
+ * Do not count excess pages that will be freed by the defer free thread. */
+static unsigned long
+_GetNumberOfPagesInPoolUnlocked(void)
+{
+ unsigned int uiEntryCount;
+
+ uiEntryCount = (g_ui32PagePoolEntryCount > g_ui32PagePoolMaxEntries) ? g_ui32PagePoolMaxEntries : g_ui32PagePoolEntryCount;
+ return uiEntryCount + g_ui32UnpinPageCount;
+}
+
+/* Linux shrinker function that informs the OS about how many pages we are caching and
+ * it is able to reclaim. */
+static unsigned long
+_CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+ int remain;
+
+ PVR_ASSERT(psShrinker == &g_sShrinker);
+ (void)psShrinker;
+ (void)psShrinkControl;
+
+ /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+ if (_PagePoolTrylock() == 0)
+ return 0;
+ remain = _GetNumberOfPagesInPoolUnlocked();
+ _PagePoolUnlock();
+
+ return remain;
+}
+
+/* Linux shrinker function to reclaim the pages from our page pool */
+static unsigned long
+_ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+ unsigned long uNumToScan = psShrinkControl->nr_to_scan;
+ unsigned long uSurplus = 0;
+ LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
+ IMG_UINT32 uiPagesFreed;
+
+ PVR_ASSERT(psShrinker == &g_sShrinker);
+ (void)psShrinker;
+
+ /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+ if (_PagePoolTrylock() == 0)
+ return SHRINK_STOP;
+
+ _FreePagesFromPoolUnlocked(uNumToScan,
+ &uiPagesFreed);
+ uNumToScan -= uiPagesFreed;
+
+ if (uNumToScan == 0)
+ {
+ goto e_exit;
+ }
+
+ /* Free unpinned memory, starting with LRU entries */
+ list_for_each_entry_safe(psUnpinEntry,
+ psTempUnpinEntry,
+ &g_sUnpinList,
+ sUnpinPoolItem)
+ {
+ PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr = psUnpinEntry->psPageArrayDataPtr;
+ IMG_UINT32 uiNumPages = (psPageArrayDataPtr->uiTotalNumPages > psPageArrayDataPtr->iNumPagesAllocated)?
+ psPageArrayDataPtr->iNumPagesAllocated:psPageArrayDataPtr->uiTotalNumPages;
+ PVRSRV_ERROR eError;
+
+ /* Free associated pages */
+ eError = _FreeOSPages(psPageArrayDataPtr,
+ NULL,
+ 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Shrinker is unable to free unpinned pages. Error: %s (%d)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError),
+ eError));
+ goto e_exit;
+ }
+
+ /* Remove item from pool */
+ list_del(&psUnpinEntry->sUnpinPoolItem);
+
+ g_ui32UnpinPageCount -= uiNumPages;
+
+ /* Check if there is more to free or if we already surpassed the limit */
+ if (uiNumPages < uNumToScan)
+ {
+ uNumToScan -= uiNumPages;
+
+ }
+ else if (uiNumPages > uNumToScan)
+ {
+ uSurplus += uiNumPages - uNumToScan;
+ uNumToScan = 0;
+ goto e_exit;
+ }
+ else
+ {
+ uNumToScan -= uiNumPages;
+ goto e_exit;
+ }
+ }
+
+e_exit:
+ if (list_empty(&g_sPagePoolList_WC) &&
+ list_empty(&g_sPagePoolList_UC) &&
+ list_empty(&g_sPagePoolList_WB))
+ {
+ PVR_ASSERT(g_ui32PagePoolEntryCount == 0);
+ }
+ if (list_empty(&g_sUnpinList))
+ {
+ PVR_ASSERT(g_ui32UnpinPageCount == 0);
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+ {
+ int remain;
+ remain = _GetNumberOfPagesInPoolUnlocked();
+ _PagePoolUnlock();
+ return remain;
+ }
+#else
+ /* Returning the number of pages freed during the scan */
+ _PagePoolUnlock();
+ return psShrinkControl->nr_to_scan - uNumToScan + uSurplus;
+#endif
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+static int
+_ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+ if (psShrinkControl->nr_to_scan != 0)
+ {
+ return _ScanObjectsInPagePool(psShrinker, psShrinkControl);
+ }
+ else
+ {
+ /* No pages are being reclaimed so just return the page count */
+ return _CountObjectsInPagePool(psShrinker, psShrinkControl);
+ }
+}
+
+static struct shrinker g_sShrinker =
+{
+ .shrink = _ShrinkPagePool,
+ .seeks = DEFAULT_SEEKS
+};
+#else
+static struct shrinker g_sShrinker =
+{
+ .count_objects = _CountObjectsInPagePool,
+ .scan_objects = _ScanObjectsInPagePool,
+ .seeks = DEFAULT_SEEKS
+};
+#endif
+
+/* Register the shrinker so Linux can reclaim cached pages */
+void LinuxInitPhysmem(void)
+{
+ g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL);
+
+ _PagePoolLock();
+ g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL);
+ if (g_psLinuxPagePoolCache)
+ {
+ /* Only create the shrinker if we created the cache OK */
+ register_shrinker(&g_sShrinker);
+ }
+ _PagePoolUnlock();
+}
+
+/* Unregister the shrinker and remove all pages from the pool that are still left */
+void LinuxDeinitPhysmem(void)
+{
+ IMG_UINT32 uiPagesFreed;
+
+ _PagePoolLock();
+ if (_FreePagesFromPoolUnlocked(g_ui32PagePoolEntryCount, &uiPagesFreed) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when deinitialising."));
+ PVR_ASSERT(0);
+ }
+
+ PVR_ASSERT(g_ui32PagePoolEntryCount == 0);
+
+ /* Free the page cache */
+ kmem_cache_destroy(g_psLinuxPagePoolCache);
+
+ unregister_shrinker(&g_sShrinker);
+ _PagePoolUnlock();
+
+ kmem_cache_destroy(g_psLinuxPageArray);
+}
+
+static void EnableOOMKiller(void)
+{
+ current->flags &= ~PF_DUMPCORE;
+}
+
+static void DisableOOMKiller(void)
+{
+ /* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled.
+ *
+ * As oom_killer_disable() is an inline, non-exported function, we
+ * can't use it from a modular driver. Furthermore, the OOM killer
+ * API doesn't look thread safe, which `current' is.
+ */
+ WARN_ON(current->flags & PF_DUMPCORE);
+ current->flags |= PF_DUMPCORE;
+}
+
+/* Prints out the addresses in a page array for debugging purposes
+ * Define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY locally to activate: */
+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY 1 */
+static inline void
+_DumpPageArray(struct page **pagearray, IMG_UINT32 uiPagesToPrint)
+{
+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY)
+ IMG_UINT32 i;
+ if (pagearray)
+ {
+ printk("Array %p:\n", pagearray);
+ for (i = 0; i < uiPagesToPrint; i++)
+ {
+ printk("%p | ", (pagearray)[i]);
+ }
+ printk("\n");
+ }
+ else
+ {
+ printk("Array is NULL:\n");
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(pagearray);
+ PVR_UNREFERENCED_PARAMETER(uiPagesToPrint);
+#endif
+}
+
+/* Debugging function that dumps out the number of pages for every
+ * page array that is currently in the page pool.
+ * Not defined by default. Define locally to activate feature: */
+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL 1 */
+static void
+_DumpPoolStructure(void)
+{
+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL)
+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+ struct list_head *psPoolHead = NULL;
+ IMG_UINT32 j;
+
+ printk("\n");
+ /* Empty all pools */
+ for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
+ {
+
+ printk("pool = %u \n", j);
+
+ /* Get the correct list for this caching mode */
+ if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead))
+ {
+ break;
+ }
+
+ list_for_each_entry_safe(psPagePoolEntry,
+ psTempPoolEntry,
+ psPoolHead,
+ sPagePoolItem)
+ {
+ printk("%u | ", psPagePoolEntry->uiItemsRemaining);
+ }
+ printk("\n");
+ }
+#endif
+}
+
+/* Will take excess pages from the pool with acquired pool lock and then free
+ * them without pool lock being held.
+ * Designed to run in the deferred free thread. */
+static PVRSRV_ERROR
+_FreeExcessPagesFromPool(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ LIST_HEAD(sPagePoolFreeList);
+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+ struct list_head *psPoolHead = NULL;
+ IMG_UINT32 i, j, uiPoolIdx;
+ static IMG_UINT8 uiPoolAccessRandomiser;
+ IMG_BOOL bDone = IMG_FALSE;
+
+ /* Make sure all pools are drained over time */
+ uiPoolAccessRandomiser++;
+
+ /* Empty all pools */
+ for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
+ {
+ uiPoolIdx = (j + uiPoolAccessRandomiser) % PHYSMEM_OSMEM_NUM_OF_POOLS;
+
+ /* Just lock down to collect pool entries and unlock again before freeing them */
+ _PagePoolLock();
+
+ /* Get the correct list for this caching mode */
+ if (!_GetPoolListHead(g_aui32CPUCacheFlags[uiPoolIdx], &psPoolHead))
+ {
+ _PagePoolUnlock();
+ break;
+ }
+
+ /* Traverse pool in reverse order to remove items that exceeded
+ * the pool size first */
+ list_for_each_entry_safe_reverse(psPagePoolEntry,
+ psTempPoolEntry,
+ psPoolHead,
+ sPagePoolItem)
+ {
+ /* Go to free the pages if we collected enough */
+ if (g_ui32PagePoolEntryCount <= g_ui32PagePoolMaxEntries)
+ {
+ bDone = IMG_TRUE;
+ break;
+ }
+
+ /* Move item to free list so we can free it later without the pool lock */
+ list_del(&psPagePoolEntry->sPagePoolItem);
+ list_add(&psPagePoolEntry->sPagePoolItem, &sPagePoolFreeList);
+
+ /* Update counters */
+ g_ui32PagePoolEntryCount -= psPagePoolEntry->uiItemsRemaining;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* MemStats usually relies on having the bridge lock held, however
+ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+ * the page pool lock is used to ensure these calls are mutually
+ * exclusive
+ */
+ PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * psPagePoolEntry->uiItemsRemaining);
+#endif
+ }
+
+ _PagePoolUnlock();
+
+
+ /* Free the pages that we removed from the pool */
+ list_for_each_entry_safe(psPagePoolEntry,
+ psTempPoolEntry,
+ &sPagePoolFreeList,
+ sPagePoolItem)
+ {
+#if defined(CONFIG_X86)
+ /* Set the correct page caching attributes on x86 */
+ if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[uiPoolIdx]))
+ {
+ int ret;
+ ret = set_pages_array_wb(psPagePoolEntry->ppsPageArray,
+ psPagePoolEntry->uiItemsRemaining);
+ if (ret)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
+ eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES;
+ goto e_exit;
+ }
+ }
+#endif
+ /* Free the actual pages */
+ for (i = 0; i < psPagePoolEntry->uiItemsRemaining; i++)
+ {
+ __free_pages(psPagePoolEntry->ppsPageArray[i], 0);
+ psPagePoolEntry->ppsPageArray[i] = NULL;
+ }
+
+ /* Free the pool entry and page array*/
+ list_del(&psPagePoolEntry->sPagePoolItem);
+ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+ }
+
+ /* Stop if all excess pages were removed */
+ if (bDone)
+ {
+ eError = PVRSRV_OK;
+ goto e_exit;
+ }
+
+ }
+
+e_exit:
+ _DumpPoolStructure();
+ return eError;
+}
+
+/* Free a certain number of pages from the page pool.
+ * Mainly used in error paths or at deinitialisation to
+ * empty the whole pool. */
+static PVRSRV_ERROR
+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
+ IMG_UINT32 *puiPagesFreed)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+ struct list_head *psPoolHead = NULL;
+ IMG_UINT32 i, j;
+
+ *puiPagesFreed = uiMaxPagesToFree;
+
+ /* Empty all pools */
+ for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
+ {
+
+ /* Get the correct list for this caching mode */
+ if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead))
+ {
+ break;
+ }
+
+ /* Free the pages and remove page arrays from the pool if they are exhausted */
+ list_for_each_entry_safe(psPagePoolEntry,
+ psTempPoolEntry,
+ psPoolHead,
+ sPagePoolItem)
+ {
+ IMG_UINT32 uiItemsToFree;
+ struct page **ppsPageArray;
+
+ /* Check if we are going to free the whole page array or just parts */
+ if (psPagePoolEntry->uiItemsRemaining <= uiMaxPagesToFree)
+ {
+ uiItemsToFree = psPagePoolEntry->uiItemsRemaining;
+ ppsPageArray = psPagePoolEntry->ppsPageArray;
+ }
+ else
+ {
+ uiItemsToFree = uiMaxPagesToFree;
+ ppsPageArray = &(psPagePoolEntry->ppsPageArray[psPagePoolEntry->uiItemsRemaining - uiItemsToFree]);
+ }
+
+#if defined(CONFIG_X86)
+ /* Set the correct page caching attributes on x86 */
+ if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[j]))
+ {
+ int ret;
+ ret = set_pages_array_wb(ppsPageArray, uiItemsToFree);
+ if (ret)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
+ eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES;
+ goto e_exit;
+ }
+ }
+#endif
+
+ /* Free the actual pages */
+ for (i = 0; i < uiItemsToFree; i++)
+ {
+ __free_pages(ppsPageArray[i], 0);
+ ppsPageArray[i] = NULL;
+ }
+
+ /* Reduce counters */
+ uiMaxPagesToFree -= uiItemsToFree;
+ g_ui32PagePoolEntryCount -= uiItemsToFree;
+ psPagePoolEntry->uiItemsRemaining -= uiItemsToFree;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* MemStats usually relies on having the bridge lock held, however
+ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+ * the page pool lock is used to ensure these calls are mutually
+ * exclusive
+ */
+ PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * uiItemsToFree);
+#endif
+
+ /* Is this pool entry exhausted, delete it */
+ if (psPagePoolEntry->uiItemsRemaining == 0)
+ {
+ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+ list_del(&psPagePoolEntry->sPagePoolItem);
+ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+ }
+
+ /* Return if we have all our pages */
+ if (uiMaxPagesToFree == 0)
+ {
+ goto e_exit;
+ }
+ }
+ }
+
+e_exit:
+ *puiPagesFreed -= uiMaxPagesToFree;
+ _DumpPoolStructure();
+ return eError;
+}
+
+/* Get a certain number of pages from the page pool and
+ * copy them directly into a given page array. */
+static void
+_GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags,
+ IMG_UINT32 uiMaxNumPages,
+ struct page **ppsPageArray,
+ IMG_UINT32 *puiNumReceivedPages)
+{
+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+ struct list_head *psPoolHead = NULL;
+ IMG_UINT32 i;
+
+ *puiNumReceivedPages = 0;
+
+ /* Get the correct list for this caching mode */
+ if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead))
+ {
+ return;
+ }
+
+ /* Check if there are actually items in the list */
+ if (list_empty(psPoolHead))
+ {
+ return;
+ }
+
+ PVR_ASSERT(g_ui32PagePoolEntryCount > 0);
+
+ /* Receive pages from the pool */
+ list_for_each_entry_safe(psPagePoolEntry,
+ psTempPoolEntry,
+ psPoolHead,
+ sPagePoolItem)
+ {
+ /* Get the pages from this pool entry */
+ for (i = psPagePoolEntry->uiItemsRemaining; i != 0 && *puiNumReceivedPages < uiMaxNumPages; i--)
+ {
+ ppsPageArray[*puiNumReceivedPages] = psPagePoolEntry->ppsPageArray[i-1];
+ (*puiNumReceivedPages)++;
+ psPagePoolEntry->uiItemsRemaining--;
+ }
+
+ /* Is this pool entry exhausted, delete it */
+ if (psPagePoolEntry->uiItemsRemaining == 0)
+ {
+ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+ list_del(&psPagePoolEntry->sPagePoolItem);
+ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+ }
+
+ /* Return if we have all our pages */
+ if (*puiNumReceivedPages == uiMaxNumPages)
+ {
+ goto exit_ok;
+ }
+ }
+
+exit_ok:
+
+ /* Update counters */
+ g_ui32PagePoolEntryCount -= *puiNumReceivedPages;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* MemStats usually relies on having the bridge lock held, however
+ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+ * the page pool lock is used to ensure these calls are mutually
+ * exclusive
+ */
+ PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * (*puiNumReceivedPages));
+#endif
+
+ _DumpPoolStructure();
+ return;
+}
+
+/* When is it worth waiting for the page pool? */
+#define PVR_LINUX_PHYSMEM_MIN_PAGES_TO_WAIT_FOR_POOL 64
+
+/* Same as _GetPagesFromPoolUnlocked but handles locking and
+ * checks first whether pages from the pool are a valid option. */
+static inline void
+_GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32CPUCacheFlags,
+ IMG_UINT32 uiPagesToAlloc,
+ IMG_UINT32 uiOrder,
+ IMG_BOOL bZero,
+ struct page **ppsPageArray,
+ IMG_UINT32 *puiPagesFromPool)
+{
+ /* The page pool stores only order 0 pages. If we need zeroed memory we
+ * directly allocate from the OS because it is faster than doing it ourselves. */
+ if (uiOrder == 0 && !bZero)
+ {
+ if (uiPagesToAlloc < PVR_LINUX_PHYSMEM_MIN_PAGES_TO_WAIT_FOR_POOL)
+ {
+ /* In case the request is a few pages, just try to acquire the pool lock */
+ if (_PagePoolTrylock() == 0)
+ {
+ return;
+ }
+ }
+ else
+ {
+ /* It is worth waiting if many pages were requested.
+ * Freeing an item to the pool is very fast and
+ * the defer free thread will release the lock regularly. */
+ _PagePoolLock();
+ }
+
+ _GetPagesFromPoolUnlocked(ui32CPUCacheFlags,
+ uiPagesToAlloc,
+ ppsPageArray,
+ puiPagesFromPool);
+ _PagePoolUnlock();
+
+ /* Do cache maintenance so allocations from the pool can be
+ * considered clean */
+ if (PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags) &&
+ PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags))
+ {
+ _ApplyCacheMaintenance(psDevNode,
+ ppsPageArray,
+ *puiPagesFromPool,
+ IMG_FALSE);
+ }
+ }
+
+ return;
+}
+
+/* Defer free function to remove excess pages from the page pool.
+ * We do not need the bridge lock for this function */
+static PVRSRV_ERROR
+_CleanupThread_FreePoolPages(void *pvData)
+{
+ PVRSRV_ERROR eError;
+
+ /* Free all that is necessary */
+ eError = _FreeExcessPagesFromPool();
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: _FreeExcessPagesFromPool failed", __func__));
+ goto e_exit;
+ }
+
+ OSFreeMem(pvData);
+
+e_exit:
+ return eError;
+}
+
+/* Signal the defer free thread that there are pages in the pool to be cleaned up.
+ * MUST NOT HOLD THE PAGE POOL LOCK! */
+static void
+_SignalDeferFree(void)
+{
+ PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn;
+ psCleanupThreadFn = OSAllocMem(sizeof(*psCleanupThreadFn));
+
+ if(!psCleanupThreadFn)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get memory for deferred page pool cleanup. "
+ "Trying to free pages immediately",
+ __FUNCTION__));
+ goto e_oom_exit;
+ }
+
+ psCleanupThreadFn->pfnFree = _CleanupThread_FreePoolPages;
+ psCleanupThreadFn->pvData = psCleanupThreadFn;
+ psCleanupThreadFn->ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT;
+ psCleanupThreadFn->bDependsOnHW = IMG_FALSE;
+ /* We must not hold the pool lock when calling AddWork because it might call us back to
+ * free pooled pages directly when unloading the driver */
+ PVRSRVCleanupThreadAddWork(psCleanupThreadFn);
+
+ return;
+
+e_oom_exit:
+ {
+ /* In case we are not able to signal the defer free thread
+ * we have to cleanup the pool now. */
+ IMG_UINT32 uiPagesFreed;
+
+ _PagePoolLock();
+ if (_FreePagesFromPoolUnlocked(g_ui32PagePoolEntryCount - g_ui32PagePoolMaxEntries,
+ &uiPagesFreed) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unable to free pooled pages!",
+ __FUNCTION__));
+ }
+ _PagePoolUnlock();
+
+ return;
+ }
+}
+
+/* Moves a page array to the page pool.
+ *
+ * If this function is successful the ppsPageArray is unusable and needs to be
+ * reallocated in case the _PMR_OSPAGEARRAY_DATA_ will be reused.
+ * This function expects cached pages to be not in the cache anymore,
+ * invalidate them before, ideally without using the pool lock. */
+static IMG_BOOL
+_PutPagesToPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags,
+ struct page **ppsPageArray,
+ IMG_UINT32 uiEntriesInArray)
+{
+ LinuxPagePoolEntry *psPagePoolEntry;
+ struct list_head *psPoolHead = NULL;
+
+ /* Check if there is still space in the pool */
+ if ( (g_ui32PagePoolEntryCount + uiEntriesInArray) >=
+ (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) )
+ {
+ return IMG_FALSE;
+ }
+
+ /* Get the correct list for this caching mode */
+ if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead))
+ {
+ return IMG_FALSE;
+ }
+
+ /* Fill the new pool entry structure and add it to the pool list */
+ psPagePoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL);
+ psPagePoolEntry->ppsPageArray = ppsPageArray;
+ psPagePoolEntry->uiItemsRemaining = uiEntriesInArray;
+
+ list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead);
+
+ /* Update counters */
+ g_ui32PagePoolEntryCount += uiEntriesInArray;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* MemStats usually relies on having the bridge lock held, however
+ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+ * the page pool lock is used to ensure these calls are mutually
+ * exclusive
+ */
+ PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * uiEntriesInArray);
+#endif
+
+ _DumpPoolStructure();
+ return IMG_TRUE;
+}
+
+/* Minimal amount of pages that will go to the pool, everything below is freed directly */
+#define PVR_LINUX_PHYSMEM_MIN_PAGES_TO_ADD_TO_POOL 16
+
+/* Same as _PutPagesToPoolUnlocked but handles locking and checks whether the pages are
+ * suitable to be stored in the page pool. */
+static inline IMG_BOOL
+_PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags,
+ struct page **ppsPageArray,
+ IMG_BOOL bUnpinned,
+ IMG_UINT32 uiOrder,
+ IMG_UINT32 uiNumPages)
+{
+ if (uiOrder == 0 &&
+ !bUnpinned &&
+ uiNumPages >= PVR_LINUX_PHYSMEM_MIN_PAGES_TO_ADD_TO_POOL)
+ {
+ _PagePoolLock();
+
+ /* Try to quickly move page array to the pool */
+ if (_PutPagesToPoolUnlocked(ui32CPUCacheFlags,
+ ppsPageArray,
+ uiNumPages) )
+ {
+ if (g_ui32PagePoolEntryCount > (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxEntries_5Percent))
+ {
+ /* Signal defer free to clean up excess pages from pool.
+ * Allow a little excess before signalling to avoid oscillating behaviour */
+ _PagePoolUnlock();
+ _SignalDeferFree();
+ }
+ else
+ {
+ _PagePoolUnlock();
+ }
+
+ /* All done */
+ return IMG_TRUE;
+ }
+
+ /* Could not move pages to pool, continue and free them now */
+ _PagePoolUnlock();
+ }
+
+ return IMG_FALSE;
+}
+
+/* Get the GFP flags that we pass to the page allocator */
+static inline unsigned int
+_GetGFPFlags(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+ struct device *psDev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice;
+ unsigned int gfp_flags = 0;
+ gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC;
+
+ if (*psDev->dma_mask == DMA_BIT_MASK(32))
+ {
+ /* Limit to 32 bit.
+ * Achieved by NOT setting __GFP_HIGHMEM for 32 bit systems and
+ * setting __GFP_DMA32 for 64 bit systems */
+ gfp_flags |= __GFP_DMA32;
+ }
+ else
+ {
+ /* If our system is able to handle large addresses use highmem */
+ gfp_flags |= __GFP_HIGHMEM;
+ }
+
+ if (psPageArrayData->bZero)
+ {
+ gfp_flags |= __GFP_ZERO;
+ }
+
+ return gfp_flags;
+}
+
+/* Poison a page of order uiOrder with string taken from pacPoisonData*/
+static void
+_PoisonPages(struct page *page,
+ IMG_UINT32 uiOrder,
+ const IMG_CHAR *pacPoisonData,
+ size_t uiPoisonSize)
+{
+ void *kvaddr;
+ IMG_UINT32 uiSrcByteIndex;
+ IMG_UINT32 uiDestByteIndex;
+ IMG_UINT32 uiSubPageIndex;
+ IMG_CHAR *pcDest;
+
+ uiSrcByteIndex = 0;
+ for (uiSubPageIndex = 0; uiSubPageIndex < (1U << uiOrder); uiSubPageIndex++)
+ {
+ kvaddr = kmap(page + uiSubPageIndex);
+ pcDest = kvaddr;
+
+ for(uiDestByteIndex=0; uiDestByteIndex<PAGE_SIZE; uiDestByteIndex++)
+ {
+ pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
+ uiSrcByteIndex++;
+ if (uiSrcByteIndex == uiPoisonSize)
+ {
+ uiSrcByteIndex = 0;
+ }
+ }
+
+ flush_dcache_page(page);
+ kunmap(page + uiSubPageIndex);
+ }
+}
+
+static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
+static const IMG_UINT32 _AllocPoisonSize = 7;
+static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
+static const IMG_UINT32 _FreePoisonSize = 11;
+
+/* Allocate and initialise the structure to hold the metadata of the allocation */
+static PVRSRV_ERROR
+_AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 uiLog2DevPageSize,
+ IMG_BOOL bZero,
+ IMG_BOOL bIsCMA,
+ IMG_BOOL bPoisonOnAlloc,
+ IMG_BOOL bPoisonOnFree,
+ IMG_BOOL bOnDemand,
+ IMG_UINT32 ui32CPUCacheFlags,
+ PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr)
+{
+ PVRSRV_ERROR eError;
+ PMR_SIZE_T uiSize = uiChunkSize * ui32NumVirtChunks;
+ IMG_UINT32 uiNumOSPageSizeVirtPages;
+ IMG_UINT32 uiNumDevPageSizeVirtPages;
+ PMR_OSPAGEARRAY_DATA *psPageArrayData;
+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+
+ /* Sanity check of the alloc size */
+ if (uiSize >= 0x1000000000ULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Do you really want 64GB of physical memory in one go? "
+ "This is likely a bug", __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e_freed_none;
+ }
+
+ /* Check that we allocate the correct contiguity */
+ PVR_ASSERT(PAGE_SHIFT <= uiLog2DevPageSize);
+ if ((uiSize & ((1ULL << uiLog2DevPageSize) - 1)) != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Allocation size " PMR_SIZE_FMTSPEC " is not multiple of page size 2^%u !",
+ uiSize,
+ uiLog2DevPageSize));
+
+ eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ goto e_freed_none;
+ }
+
+ /* Use of cast below is justified by the assertion that follows to
+ prove that no significant bits have been truncated */
+ uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1);
+ PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize);
+ uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2DevPageSize - PAGE_SHIFT);
+
+ /* Allocate the struct to hold the metadata */
+ psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL);
+ if (psPageArrayData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OS refused the memory allocation for the private data.",
+ __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_freed_none;
+ }
+
+ /*
+ * Allocate the page array
+ *
+ * We avoid tracking this memory because this structure might go into the page pool.
+ * The OS can drain the pool asynchronously and when doing that we have to avoid
+ * any potential deadlocks.
+ *
+ * In one scenario the process stats vmalloc hash table lock is held and then
+ * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not
+ * try to acquire the vmalloc hash table lock again.
+ */
+ psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages);
+ if (psPageArrayData->pagearray == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_free_kmem_cache;
+ }
+ else
+ {
+ if (bIsCMA)
+ {
+ /* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */
+ psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages);
+ if (psPageArrayData->dmavirtarray == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_free_pagearray;
+ }
+
+ psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages);
+ if (psPageArrayData->dmaphysarray == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_free_cpuvirtaddrarray;
+ }
+ }
+ }
+
+ /* Init metadata */
+ psPageArrayData->psDevNode = psDevNode;
+ psPageArrayData->iNumPagesAllocated = 0;
+ psPageArrayData->uiTotalNumPages = uiNumOSPageSizeVirtPages;
+ psPageArrayData->uiLog2DevPageSize = uiLog2DevPageSize;
+ psPageArrayData->bZero = bZero;
+ psPageArrayData->bIsCMA = bIsCMA;
+ psPageArrayData->bOnDemand = bOnDemand;
+ psPageArrayData->bUnpinned = IMG_FALSE;
+ psPageArrayData->bPoisonOnFree = bPoisonOnFree;
+ psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
+ psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags;
+
+ /* Indicate whether this is an allocation with default caching attribute (i.e cached) or not */
+ if (PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags) ||
+ PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags))
+ {
+ psPageArrayData->bUnsetMemoryType = IMG_TRUE;
+ }
+ else
+ {
+ psPageArrayData->bUnsetMemoryType = IMG_FALSE;
+ }
+
+ *ppsPageArrayDataPtr = psPageArrayData;
+ return PVRSRV_OK;
+
+/* Error path */
+e_free_cpuvirtaddrarray:
+ OSFreeMemNoStats(psPageArrayData->dmavirtarray);
+
+e_free_pagearray:
+ OSFreeMemNoStats(psPageArrayData->pagearray);
+
+e_free_kmem_cache:
+ kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OS refused the memory allocation for the page pointer table. "
+ "Did you ask for too much?",
+ __func__));
+
+e_freed_none:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static inline void
+_ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode,
+ struct page **ppsPage,
+ IMG_UINT32 uiNumPages,
+ IMG_BOOL bFlush)
+{
+ PVRSRV_ERROR eError = PVRSRV_ERROR_RETRY;
+ IMG_UINT32 ui32Idx;
+
+ if ((uiNumPages << PAGE_SHIFT) >= PVR_DIRTY_BYTES_FLUSH_THRESHOLD)
+ {
+ /* May fail so fallback to range-based flush */
+ eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ for (ui32Idx = 0; ui32Idx < uiNumPages; ++ui32Idx)
+ {
+ IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd;
+ void *pvPageVAddr;
+
+ pvPageVAddr = kmap(ppsPage[ui32Idx]);
+ sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]);
+ sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
+
+ /* If we're zeroing, we need to make sure the cleared memory is pushed out
+ of the cache before the cache lines are invalidated */
+ if (bFlush)
+ {
+ OSFlushCPUCacheRangeKM(psDevNode,
+ pvPageVAddr,
+ pvPageVAddr + PAGE_SIZE,
+ sCPUPhysAddrStart,
+ sCPUPhysAddrEnd);
+ }
+ else
+ {
+ OSInvalidateCPUCacheRangeKM(psDevNode,
+ pvPageVAddr,
+ pvPageVAddr + PAGE_SIZE,
+ sCPUPhysAddrStart,
+ sCPUPhysAddrEnd);
+ }
+
+ kunmap(ppsPage[ui32Idx]);
+ }
+ }
+}
+
+/* Change the caching attribute of pages on x86 systems and takes care of
+ * cache maintenance. This function is supposed to be called once for pages that
+ * came from alloc_pages().
+ *
+ * Flush/Invalidate pages in case the allocation is not cached. Necessary to
+ * remove pages from the cache that might be flushed later and corrupt memory. */
+static inline PVRSRV_ERROR
+_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode,
+ struct page **ppsPage,
+ IMG_UINT32 uiNumPages,
+ IMG_BOOL bFlush,
+ IMG_UINT32 ui32CPUCacheFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BOOL bCPUCached = PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags);
+ IMG_BOOL bCPUUncached = PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags);
+ IMG_BOOL bCPUWriteCombine = PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags);
+
+ if (ppsPage != NULL)
+ {
+#if defined (CONFIG_X86)
+ /* On x86 we have to set page cache attributes for non-cached pages.
+ * The call is implicitly taking care of all flushing/invalidating
+ * and therefore we can skip the usual cache maintenance after this. */
+ if (bCPUUncached || bCPUWriteCombine)
+ {
+ /* On X86 if we already have a mapping (e.g. low memory) we need to change the mode of
+ current mapping before we map it ourselves */
+ int ret = IMG_FALSE;
+ PVR_UNREFERENCED_PARAMETER(bFlush);
+
+ switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ ret = set_pages_array_uc(ppsPage, uiNumPages);
+ if (ret)
+ {
+ eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+ PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret));
+ }
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+ ret = set_pages_array_wc(ppsPage, uiNumPages);
+ if (ret)
+ {
+ eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+ PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret));
+ }
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+ break;
+
+ default:
+ break;
+ }
+ }
+ else
+#endif
+ /* Cache maintenance if:
+ * cached && (cleanFlag || bFlush)
+ * OR
+ * uncached || write-combine
+ */
+ if ( (bCPUCached && (PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags) || bFlush)) ||
+ bCPUUncached || bCPUWriteCombine )
+ {
+ /* We can be given pages which still remain in the cache.
+ In order to make sure that the data we write through our mappings
+ doesn't get overwritten by later cache evictions we invalidate the
+ pages that are given to us.
+
+ Note:
+ This still seems to be true if we request cold pages, it's just less
+ likely to be in the cache. */
+ _ApplyCacheMaintenance(psDevNode,
+ ppsPage,
+ uiNumPages,
+ bFlush);
+ }
+ }
+
+ return eError;
+}
+
+/* Same as _AllocOSPage except it uses DMA framework to perform allocation */
+static PVRSRV_ERROR
+_AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ unsigned int gfp_flags,
+ IMG_UINT32 ui32AllocOrder,
+ IMG_UINT32 ui32MinOrder,
+ IMG_UINT32 uiPageIndex)
+{
+ void *virt_addr;
+ struct page *page;
+ dma_addr_t bus_addr;
+ size_t alloc_size = PAGE_SIZE << ui32MinOrder;
+ PVR_UNREFERENCED_PARAMETER(ui32AllocOrder);
+ PVR_ASSERT(ui32AllocOrder == ui32MinOrder);
+
+ DisableOOMKiller();
+ virt_addr = dma_alloc_coherent(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+ alloc_size,
+ &bus_addr,
+ gfp_flags);
+ if (virt_addr == NULL)
+ {
+ /* The idea here is primarily to support some older kernels with
+ broken or non-functioning DMA/CMA implementations (< Linux-3.4)
+ and to also handle DMA/CMA allocation failures by attempting a
+ normal page allocation though we expect dma_alloc_coherent()
+ already attempts this internally also before failing but
+ nonetheless it does no harm to retry allocation ourself */
+ page = alloc_pages(gfp_flags, ui32AllocOrder);
+ if (page)
+ {
+ /* Taint bus_addr as alloc_page, needed when freeing;
+ also acquire the low memory page address only, this
+ prevents mapping possible high memory pages into
+ kernel virtual address space which might exhaust
+ the VMALLOC address space */
+ bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page));
+ virt_addr = page_address(page);
+ }
+ else
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+ else
+ {
+ page = pfn_to_page(bus_addr >> PAGE_SHIFT);
+ }
+ EnableOOMKiller();
+
+ /* Convert OSPageSize-based index into DevicePageSize-based index */
+ psPageArrayData->dmavirtarray[uiPageIndex >> ui32MinOrder] = virt_addr;
+ psPageArrayData->dmaphysarray[uiPageIndex >> ui32MinOrder] = bus_addr;
+ psPageArrayData->pagearray[uiPageIndex >> ui32MinOrder] = page;
+
+ return PVRSRV_OK;
+}
+
+/* Allocate a page of order uiAllocOrder and stores it in the page array ppsPage at
+ * position uiPageIndex.
+ *
+ * If the order is higher than 0, it splits the page into multiples and
+ * stores them at position uiPageIndex to uiPageIndex+(1<<uiAllocOrder). */
+static PVRSRV_ERROR
+_AllocOSPage(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ unsigned int gfp_flags,
+ IMG_UINT32 uiAllocOrder,
+ IMG_UINT32 uiMinOrder,
+ IMG_UINT32 uiPageIndex)
+{
+ struct page *psPage;
+ IMG_UINT32 ui32Count;
+
+ /* Allocate the page */
+ DisableOOMKiller();
+ psPage = alloc_pages(gfp_flags, uiAllocOrder);
+ EnableOOMKiller();
+
+ if (psPage == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ /* In case we need to, split the higher order page;
+ this should only be used for order-0 allocations
+ as higher order allocations should use DMA/CMA */
+ if (uiAllocOrder != 0)
+ {
+ split_page(psPage, uiAllocOrder);
+ }
+#endif
+
+ /* Store the page (or multiple split pages) in the page array */
+ for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++)
+ {
+ psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]);
+ }
+
+ return PVRSRV_OK;
+}
+
+/* Allocation of OS pages: We may allocate 2^N order pages at a time for two reasons.
+ *
+ * Firstly to support device pages which are larger than OS. By asking the OS for 2^N
+ * order OS pages at a time we guarantee the device page is contiguous.
+ *
+ * Secondly for performance where we may ask for 2^N order pages to reduce the number
+ * of calls to alloc_pages, and thus reduce time for huge allocations.
+ *
+ * Regardless of page order requested, we need to break them down to track _OS pages.
+ * The maximum order requested is increased if all max order allocations were successful.
+ * If any request fails we reduce the max order.
+ */
+static PVRSRV_ERROR
+_AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiArrayIndex = 0;
+ IMG_UINT32 ui32Order;
+ IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
+ IMG_BOOL bIncreaseMaxOrder = IMG_TRUE;
+
+ IMG_UINT32 ui32NumPageReq;
+ IMG_UINT32 uiPagesToAlloc;
+ IMG_UINT32 uiPagesFromPool = 0;
+
+ unsigned int gfp_flags = _GetGFPFlags(psPageArrayData);
+ IMG_UINT32 ui32GfpFlags;
+ IMG_UINT32 ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY);
+
+ struct page **ppsPageArray = psPageArrayData->pagearray;
+ struct page **ppsPageAttributeArray = NULL;
+
+ uiPagesToAlloc = psPageArrayData->uiTotalNumPages;
+
+ /* Try to get pages from the pool since it is faster;
+ the page pool currently only supports zero-order pages
+ thus currently excludes all DMA/CMA allocated memory */
+ _GetPagesFromPoolLocked(psPageArrayData->psDevNode,
+ psPageArrayData->ui32CPUCacheFlags,
+ uiPagesToAlloc,
+ ui32MinOrder,
+ psPageArrayData->bZero,
+ ppsPageArray,
+ &uiPagesFromPool);
+
+ uiArrayIndex = uiPagesFromPool;
+
+ if ((uiPagesToAlloc - uiPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD)
+ { /* Small allocations: Ask for one device page at a time */
+ ui32Order = ui32MinOrder;
+ bIncreaseMaxOrder = IMG_FALSE;
+ }
+ else
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ /* Large zero-order or none zero-order allocations, ask for
+ MAX(max-order,min-order) order pages at a time; alloc
+ failures throttles this down to ZeroOrder allocations */
+ ui32Order = MAX(g_uiMaxOrder, ui32MinOrder);
+#else
+ /* Because split_pages() is not available on older kernels
+ we cannot mix-and-match any-order pages in the PMR;
+ only same-order pages must be present in page array.
+ So we unconditionally force it to use ui32MinOrder on
+ these older kernels */
+ ui32Order = ui32MinOrder;
+#endif
+ }
+
+ /* Only if asking for more contiguity than we actually need, let it fail */
+ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+ ui32NumPageReq = (1 << ui32Order);
+
+ while (uiArrayIndex < uiPagesToAlloc)
+ {
+ IMG_UINT32 ui32PageRemain = uiPagesToAlloc - uiArrayIndex;
+
+ while (ui32NumPageReq > ui32PageRemain)
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ /* Pages to request is larger than that remaining
+ so ask for less so never over allocate */
+ ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
+#else
+ /* Pages to request is larger than that remaining so
+ do nothing thus over allocate as we do not support
+ mix/match of any-order pages in PMR page-array in
+ older kernels (simplifies page free logic) */
+ PVR_ASSERT(ui32Order == ui32MinOrder);
+#endif
+ ui32NumPageReq = (1 << ui32Order);
+ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+ }
+
+ if (psPageArrayData->bIsCMA)
+ {
+ /* As the DMA/CMA framework rounds-up request to the
+ next power-of-two, we request multiple uiMinOrder
+ pages to satisfy allocation request in order to
+ minimise wasting memory */
+ eError = _AllocOSPage_CMA(psPageArrayData,
+ ui32GfpFlags,
+ ui32Order,
+ ui32MinOrder,
+ uiArrayIndex);
+ }
+ else
+ {
+ /* Allocate uiOrder pages at uiArrayIndex */
+ eError = _AllocOSPage(psPageArrayData,
+ ui32GfpFlags,
+ ui32Order,
+ ui32MinOrder,
+ uiArrayIndex);
+ }
+
+ if (eError == PVRSRV_OK)
+ {
+ /* Successful request. Move onto next. */
+ uiArrayIndex += ui32NumPageReq;
+ }
+ else
+ {
+ if (ui32Order > ui32MinOrder)
+ {
+ /* Last request failed. Let's ask for less next time */
+ ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
+ bIncreaseMaxOrder = IMG_FALSE;
+ ui32NumPageReq = (1 << ui32Order);
+ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+ g_uiMaxOrder = ui32Order;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
+ /* We should not trigger this code path in older kernels,
+ this is enforced by ensuring ui32Order == ui32MinOrder */
+ PVR_ASSERT(ui32Order == ui32MinOrder);
+#endif
+ }
+ else
+ {
+ /* Failed to alloc pages at required contiguity. Failed allocation */
+ PVR_DPF((PVR_DBG_ERROR, "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u (%s)",
+ __FUNCTION__,
+ uiArrayIndex,
+ uiPagesToAlloc,
+ ui32GfpFlags,
+ ui32Order,
+ PVRSRVGetErrorStringKM(eError)));
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+ goto e_free_pages;
+ }
+ }
+ }
+
+ if (bIncreaseMaxOrder && (g_uiMaxOrder < PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM))
+ { /* All successful allocations on max order. Let's ask for more next time */
+ g_uiMaxOrder++;
+ }
+
+ /* Construct table of page pointers to apply attributes */
+ ppsPageAttributeArray = &ppsPageArray[uiPagesFromPool];
+ if (psPageArrayData->bIsCMA)
+ {
+ IMG_UINT32 uiIdx, uiIdy, uiIdz;
+
+ ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiPagesToAlloc);
+ if (ppsPageAttributeArray == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed OSAllocMem() for page attributes table"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_free_pages;
+ }
+
+ for (uiIdx = 0; uiIdx < uiPagesToAlloc; uiIdx += ui32NumPageReq)
+ {
+ uiIdy = uiIdx >> ui32Order;
+ for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++)
+ {
+ ppsPageAttributeArray[uiIdx+uiIdz] = psPageArrayData->pagearray[uiIdy];
+ ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz;
+ }
+ }
+ }
+
+ /* Do the cache management as required */
+ eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
+ ppsPageAttributeArray,
+ uiPagesToAlloc - uiPagesFromPool,
+ psPageArrayData->bZero,
+ psPageArrayData->ui32CPUCacheFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
+ goto e_free_pages;
+ }
+ else
+ {
+ if (psPageArrayData->bIsCMA)
+ {
+ OSFreeMem(ppsPageAttributeArray);
+ }
+ }
+
+ /* Update metadata */
+ psPageArrayData->iNumPagesAllocated = psPageArrayData->uiTotalNumPages;
+ return PVRSRV_OK;
+
+/* Error path */
+e_free_pages:
+ {
+ IMG_UINT32 ui32PageToFree;
+
+ if (psPageArrayData->bIsCMA)
+ {
+ IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order;
+ IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order;
+ PVR_ASSERT(ui32Order == ui32MinOrder);
+
+ if (ppsPageAttributeArray)
+ {
+ OSFreeMem(ppsPageAttributeArray);
+ }
+
+ for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++)
+ {
+ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+ uiDevPageSize,
+ ui32MinOrder,
+ psPageArrayData->dmavirtarray[ui32PageToFree],
+ psPageArrayData->dmaphysarray[ui32PageToFree],
+ ppsPageArray[ui32PageToFree]);
+ psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0;
+ psPageArrayData->dmavirtarray[ui32PageToFree] = NULL;
+ ppsPageArray[ui32PageToFree] = INVALID_PAGE;
+ }
+ }
+ else
+ {
+ /* Free the pages we got from the pool */
+ for(ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
+ {
+ _FreeOSPage(ui32MinOrder,
+ psPageArrayData->bUnsetMemoryType,
+ ppsPageArray[ui32PageToFree]);
+ ppsPageArray[ui32PageToFree] = INVALID_PAGE;
+ }
+
+ for (ui32PageToFree = uiPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++)
+ {
+ _FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]);
+ ppsPageArray[ui32PageToFree] = INVALID_PAGE;
+ }
+ }
+
+ return eError;
+ }
+}
+
+/* Allocation of OS pages: This function is used for sparse allocations.
+ *
+ * Sparse allocations provide only a proportion of sparse physical backing within the total
+ * virtual range. Currently we only support sparse allocations on device pages that are OS
+ * page sized.
+*/
+static PVRSRV_ERROR
+_AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *puiAllocIndices,
+ IMG_UINT32 uiPagesToAlloc)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ struct page **ppsPageArray = psPageArrayData->pagearray;
+ IMG_UINT32 uiOrder;
+ IMG_UINT32 uiPagesFromPool = 0;
+ unsigned int gfp_flags = _GetGFPFlags(psPageArrayData);
+
+ /* We use this page array to receive pages from the pool and then reuse it afterwards to
+ * store pages that need their cache attribute changed on x86*/
+ struct page **ppsTempPageArray;
+ IMG_UINT32 uiTempPageArrayIndex = 0;
+
+ /* Allocate the temporary page array that we need here to receive pages
+ * from the pool and to store pages that need their caching attributes changed */
+ ppsTempPageArray = OSAllocMem(sizeof(struct page*) * uiPagesToAlloc);
+ if (ppsTempPageArray == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed metadata allocation", __FUNCTION__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_exit;
+ }
+
+ uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
+
+ /* Check the requested number of pages if they fit in the page array */
+ if(psPageArrayData->uiTotalNumPages < \
+ (psPageArrayData->iNumPagesAllocated + uiPagesToAlloc))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Trying to allocate more pages than this buffer can handle, "
+ "Request + Allocated < Max! Request %u, Allocated %u, Max %u.",
+ __FUNCTION__,
+ uiPagesToAlloc,
+ psPageArrayData->iNumPagesAllocated,
+ psPageArrayData->uiTotalNumPages));
+ eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+ goto e_free_temp_array;
+ }
+
+ /* Try to get pages from the pool since it is faster */
+ _GetPagesFromPoolLocked(psPageArrayData->psDevNode,
+ psPageArrayData->ui32CPUCacheFlags,
+ uiPagesToAlloc,
+ uiOrder,
+ psPageArrayData->bZero,
+ ppsTempPageArray,
+ &uiPagesFromPool);
+
+ /* Allocate pages from the OS or move the pages that we got from the pool
+ * to the page array */
+ DisableOOMKiller();
+ for (i = 0; i < uiPagesToAlloc; i++)
+ {
+ /* Check if the indices we are allocating are in range */
+ if (puiAllocIndices[i] >= psPageArrayData->uiTotalNumPages)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Given alloc index %u at %u is larger than page array %u.",
+ __FUNCTION__,
+ i,
+ puiAllocIndices[i],
+ psPageArrayData->uiTotalNumPages));
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e_free_pages;
+ }
+
+ /* Check if there is not already a page allocated at this position */
+ if (INVALID_PAGE != ppsPageArray[puiAllocIndices[i]])
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Mapping number %u at page array index %u already exists",
+ __func__,
+ i,
+ puiAllocIndices[i]));
+ eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
+ goto e_free_pages;
+ }
+
+ /* Finally assign a page to the array.
+ * Either from the pool or allocate a new one. */
+ if (uiPagesFromPool != 0)
+ {
+ uiPagesFromPool--;
+ ppsPageArray[puiAllocIndices[i]] = ppsTempPageArray[uiPagesFromPool];
+ }
+ else
+ {
+ ppsPageArray[puiAllocIndices[i]] = alloc_pages(gfp_flags, uiOrder);
+ if(ppsPageArray[puiAllocIndices[i]] != NULL)
+ {
+ /* Reusing the temp page array if it has no pool pages anymore */
+ ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]];
+ uiTempPageArrayIndex++;
+ }
+ else
+ {
+ /* Failed to alloc pages at required contiguity. Failed allocation */
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u",
+ __FUNCTION__,
+ i,
+ uiPagesToAlloc,
+ gfp_flags,
+ uiOrder));
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+ goto e_free_pages;
+ }
+ }
+ }
+ EnableOOMKiller();
+
+ /* Do the cache management as required */
+ eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
+ ppsTempPageArray,
+ uiTempPageArrayIndex,
+ psPageArrayData->bZero,
+ psPageArrayData->ui32CPUCacheFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
+ goto e_free_pages;
+ }
+
+ /* Update metadata */
+ psPageArrayData->iNumPagesAllocated += uiPagesToAlloc;
+
+ /* Free temporary page array */
+ OSFreeMem(ppsTempPageArray);
+ return PVRSRV_OK;
+
+/* Error path */
+e_free_pages:
+ {
+ IMG_UINT32 ui32PageToFree;
+
+ EnableOOMKiller();
+
+ /* Free the pages we got from the pool */
+ for(ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
+ {
+ _FreeOSPage(0,
+ psPageArrayData->bUnsetMemoryType,
+ ppsTempPageArray[ui32PageToFree]);
+ }
+
+ /* Free the pages we just allocated from the OS */
+ for(ui32PageToFree = uiPagesFromPool; ui32PageToFree < i; ui32PageToFree++)
+ {
+ _FreeOSPage(0,
+ IMG_FALSE,
+ ppsPageArray[puiAllocIndices[ui32PageToFree]]);
+
+ ppsPageArray[puiAllocIndices[ui32PageToFree]] = (struct page *) INVALID_PAGE;
+ }
+ }
+
+e_free_temp_array:
+ OSFreeMem(ppsTempPageArray);
+
+e_exit:
+ return eError;
+}
+
+/* Allocate pages for a given page array.
+ *
+ * The executed allocation path depends whether an array with allocation
+ * indices has been passed or not */
+static PVRSRV_ERROR
+_AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *puiAllocIndices,
+ IMG_UINT32 uiPagesToAlloc)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ struct page **ppsPageArray;
+
+ /* Sanity checks */
+ PVR_ASSERT(NULL != psPageArrayData);
+ if (psPageArrayData->bIsCMA)
+ {
+ PVR_ASSERT(psPageArrayData->dmaphysarray != NULL);
+ PVR_ASSERT(psPageArrayData->dmavirtarray != NULL);
+ }
+ PVR_ASSERT(psPageArrayData->pagearray != NULL);
+ PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+
+ ppsPageArray = psPageArrayData->pagearray;
+
+ /* Go the sparse alloc path if we have an array with alloc indices.*/
+ if (puiAllocIndices != NULL)
+ {
+ eError = _AllocOSPages_Sparse(psPageArrayData,
+ puiAllocIndices,
+ uiPagesToAlloc);
+ }
+ else
+ {
+ eError = _AllocOSPages_Fast(psPageArrayData);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e_exit;
+ }
+
+ if (psPageArrayData->bPoisonOnAlloc)
+ {
+ for (i = 0; i < uiPagesToAlloc; i++)
+ {
+ IMG_UINT32 uiIdx = puiAllocIndices ? puiAllocIndices[i] : i;
+ _PoisonPages(ppsPageArray[uiIdx],
+ 0,
+ _AllocPoison,
+ _AllocPoisonSize);
+ }
+ }
+
+ _DumpPageArray(ppsPageArray, psPageArrayData->uiTotalNumPages);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ {
+ for (i = 0; i < uiPagesToAlloc; i++)
+ {
+ IMG_CPU_PHYADDR sCPUPhysAddr;
+ IMG_UINT32 uiIdx = puiAllocIndices ? puiAllocIndices[i] : i;
+
+ sCPUPhysAddr.uiAddr = page_to_phys(ppsPageArray[uiIdx]);
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+ NULL,
+ sCPUPhysAddr,
+ 1 << psPageArrayData->uiLog2DevPageSize,
+ NULL);
+ }
+ }
+#else
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, uiPagesToAlloc * PAGE_SIZE);
+#endif
+#endif
+
+ PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData));
+ return PVRSRV_OK;
+
+e_exit:
+ return eError;
+}
+
+/* Same as _FreeOSPage except free memory using DMA framework */
+static INLINE void
+_FreeOSPage_CMA(struct device *dev,
+ size_t alloc_size,
+ IMG_UINT32 uiOrder,
+ void *virt_addr,
+ dma_addr_t dev_addr,
+ struct page *psPage)
+{
+ if (DMA_IS_ALLOCPG_ADDR(dev_addr))
+ {
+#if defined(CONFIG_X86)
+ void *pvPageVAddr = page_address(psPage);
+ if (pvPageVAddr)
+ {
+ int ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
+ if (ret)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to reset page attribute",
+ __FUNCTION__));
+ }
+ }
+#endif
+ __free_pages(psPage, uiOrder);
+ }
+ else
+ {
+ dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr));
+ }
+}
+
+/* Free a single page back to the OS.
+ * Make sure the cache type is set back to the default value.
+ *
+ * Note:
+ * We must _only_ check bUnsetMemoryType in the case where we need to free
+ * the page back to the OS since we may have to revert the cache properties
+ * of the page to the default as given by the OS when it was allocated. */
+static void
+_FreeOSPage(IMG_UINT32 uiOrder,
+ IMG_BOOL bUnsetMemoryType,
+ struct page *psPage)
+{
+
+#if defined(CONFIG_X86)
+ void *pvPageVAddr;
+ pvPageVAddr = page_address(psPage);
+
+ if (pvPageVAddr && bUnsetMemoryType == IMG_TRUE)
+ {
+ int ret;
+
+ ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
+ if (ret)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute", __FUNCTION__));
+ }
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType);
+#endif
+ __free_pages(psPage, uiOrder);
+}
+
+/* Free the struct holding the metadata */
+static PVRSRV_ERROR
+_FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+ PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData));
+
+ /* Check if the page array actually still exists.
+ * It might be the case that has been moved to the page pool */
+ if (psPageArrayData->pagearray != NULL)
+ {
+ OSFreeMemNoStats(psPageArrayData->pagearray);
+ }
+
+ kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
+
+ return PVRSRV_OK;
+}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+/* _FreeOSPages_MemStats: Depends on the bridge lock already being held */
+static void
+_FreeOSPages_MemStats(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 ui32NumPages)
+{
+ struct page **ppsPageArray;
+ #if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ IMG_UINT32 ui32PageIndex;
+ #endif
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: psPageArrayData %p, ui32NumPages %u", __FUNCTION__, psPageArrayData, ui32NumPages));
+ PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
+
+ ppsPageArray = psPageArrayData->pagearray;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, ui32NumPages * PAGE_SIZE);
+#else
+ for(ui32PageIndex = 0; ui32PageIndex < ui32NumPages; ui32PageIndex++)
+ {
+ IMG_CPU_PHYADDR sCPUPhysAddr;
+ IMG_UINT32 uiArrayIndex = (pai32FreeIndices) ? pai32FreeIndices[ui32PageIndex] : ui32PageIndex;
+
+ sCPUPhysAddr.uiAddr = page_to_phys(ppsPageArray[uiArrayIndex]);
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, sCPUPhysAddr.uiAddr);
+ }
+#endif
+#endif
+}
+#endif /* PVRSRV_ENABLE_PROCESS_STATS */
+
+/* Free all or some pages from a sparse page array */
+static PVRSRV_ERROR
+_FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 ui32FreePageCount)
+{
+ IMG_BOOL bSuccess;
+ IMG_UINT32 uiOrder;
+ IMG_UINT32 uiPageIndex, i = 0, uiTempIdx;
+ struct page **ppsPageArray;
+ IMG_UINT32 uiNumPages;
+
+ struct page **ppsTempPageArray;
+ IMG_UINT32 uiTempArraySize;
+
+ /* We really should have something to free before we call this */
+ PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
+
+ if(pai32FreeIndices == NULL)
+ {
+ uiNumPages = psPageArrayData->uiTotalNumPages;
+ uiTempArraySize = psPageArrayData->iNumPagesAllocated;
+ }
+ else
+ {
+ uiNumPages = ui32FreePageCount;
+ uiTempArraySize = ui32FreePageCount;
+ }
+
+ /* OSAllocMemNoStats required because this code may be run without the bridge lock held */
+ ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize);
+ if (ppsTempPageArray == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __FUNCTION__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ ppsPageArray = psPageArrayData->pagearray;
+ uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
+
+ /* Poison if necessary */
+ if (psPageArrayData->bPoisonOnFree)
+ {
+ for (i = 0; i < uiNumPages; i ++)
+ {
+ uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i ;
+ if(INVALID_PAGE != ppsPageArray[uiPageIndex])
+ {
+ _PoisonPages(ppsPageArray[uiPageIndex],
+ 0,
+ _FreePoison,
+ _FreePoisonSize);
+ }
+ }
+ }
+
+ /* Put pages in a contiguous array so further processing is easier */
+ uiTempIdx = 0;
+ for (i = 0; i < uiNumPages; i++)
+ {
+ uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i;
+ if(INVALID_PAGE != ppsPageArray[uiPageIndex])
+ {
+ ppsTempPageArray[uiTempIdx] = ppsPageArray[uiPageIndex];
+ uiTempIdx++;
+ ppsPageArray[uiPageIndex] = (struct page *) INVALID_PAGE;
+ }
+ }
+
+ /* Try to move the temp page array to the pool */
+ bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
+ ppsTempPageArray,
+ psPageArrayData->bUnpinned,
+ uiOrder,
+ uiTempIdx);
+ if (bSuccess)
+ {
+ goto exit_ok;
+ }
+
+ /* Free pages and reset page caching attributes on x86 */
+#if defined(CONFIG_X86)
+ if (uiTempIdx != 0 && psPageArrayData->bUnsetMemoryType == IMG_TRUE)
+ {
+ int iError;
+ iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx);
+
+ if (iError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
+ }
+ }
+#endif
+
+ /* Free the pages */
+ for (i = 0; i < uiTempIdx; i++)
+ {
+ __free_pages(ppsTempPageArray[i], uiOrder);
+ }
+
+ /* Free the temp page array here if it did not move to the pool */
+ OSFreeMemNoStats(ppsTempPageArray);
+
+exit_ok:
+ /* Update metadata */
+ psPageArrayData->iNumPagesAllocated -= uiTempIdx;
+ PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+ return PVRSRV_OK;
+}
+
+/* Free all the pages in a page array */
+static PVRSRV_ERROR
+_FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+ IMG_BOOL bSuccess;
+ IMG_UINT32 uiOrder;
+ IMG_UINT32 i = 0;
+ IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumPages;
+
+ struct page **ppsPageArray = psPageArrayData->pagearray;
+ uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
+
+ /* We really should have something to free before we call this */
+ PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
+
+ /* Poison pages if necessary */
+ if (psPageArrayData->bPoisonOnFree)
+ {
+ for (i = 0; i < uiNumPages; i++)
+ {
+ _PoisonPages(ppsPageArray[i],
+ 0,
+ _FreePoison,
+ _FreePoisonSize);
+ }
+ }
+
+ /* Try to move the page array to the pool */
+ bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
+ ppsPageArray,
+ psPageArrayData->bUnpinned,
+ uiOrder,
+ uiNumPages);
+ if (bSuccess)
+ {
+ psPageArrayData->pagearray = NULL;
+ goto exit_ok;
+ }
+
+ if (psPageArrayData->bIsCMA)
+ {
+ IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder;
+ IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder;
+
+ for (i = 0; i < uiDevNumPages; i++)
+ {
+ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+ uiDevPageSize,
+ uiOrder,
+ psPageArrayData->dmavirtarray[i],
+ psPageArrayData->dmaphysarray[i],
+ ppsPageArray[i]);
+ psPageArrayData->dmaphysarray[i] = (dma_addr_t)0;
+ psPageArrayData->dmavirtarray[i] = NULL;
+ ppsPageArray[i] = INVALID_PAGE;
+ }
+ }
+ else
+ {
+#if defined(CONFIG_X86)
+ if (psPageArrayData->bUnsetMemoryType == IMG_TRUE)
+ {
+ int ret;
+
+ ret = set_pages_array_wb(ppsPageArray, uiNumPages);
+ if (ret)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
+ }
+ }
+#endif
+
+ for (i = 0; i < uiNumPages; i++)
+ {
+ _FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]);
+ ppsPageArray[i] = INVALID_PAGE;
+ }
+ }
+
+exit_ok:
+ /* Update metadata */
+ psPageArrayData->iNumPagesAllocated = 0;
+ return PVRSRV_OK;
+}
+
+/* Free pages from a page array.
+ * Takes care of mem stats and chooses correct free path depending on parameters. */
+static PVRSRV_ERROR
+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 ui32FreePageCount)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiNumPages;
+
+ /* Check how many pages do we have to free */
+ if(pai32FreeIndices == NULL)
+ {
+ uiNumPages = psPageArrayData->iNumPagesAllocated;
+ }
+ else
+ {
+ uiNumPages = ui32FreePageCount;
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ _FreeOSPages_MemStats(psPageArrayData, pai32FreeIndices, uiNumPages);
+#endif
+
+ /* Go the sparse or non-sparse path */
+ if (psPageArrayData->iNumPagesAllocated != psPageArrayData->uiTotalNumPages
+ || pai32FreeIndices != NULL)
+ {
+ eError = _FreeOSPages_Sparse(psPageArrayData,
+ pai32FreeIndices,
+ uiNumPages);
+ }
+ else
+ {
+ eError = _FreeOSPages_Fast(psPageArrayData);
+ }
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed"));
+ }
+
+ _DumpPageArray(psPageArrayData->pagearray, psPageArrayData->uiTotalNumPages);
+
+ return eError;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* destructor func is called after last reference disappears, but
+ before PMR itself is freed. */
+static PVRSRV_ERROR
+PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PVRSRV_ERROR eError;
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+
+ /* We can't free pages until now. */
+ if (psOSPageArrayData->iNumPagesAllocated != 0)
+ {
+ _PagePoolLock();
+ if (psOSPageArrayData->bUnpinned == IMG_TRUE)
+ {
+ _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
+ }
+ _PagePoolUnlock();
+
+ eError = _FreeOSPages(psOSPageArrayData,
+ NULL,
+ 0);
+ PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+ }
+
+ eError = _FreeOSPagesArray(psOSPageArrayData);
+ PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+ return PVRSRV_OK;
+}
+
+/* callback function for locking the system physical page addresses.
+ This function must be called before the lookup address func. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PVRSRV_ERROR eError;
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+ if (psOSPageArrayData->bOnDemand)
+ {
+ /* Allocate Memory for deferred allocation */
+ eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumPages);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ eError = PVRSRV_OK;
+ return eError;
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ /* Just drops the refcount. */
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+ if (psOSPageArrayData->bOnDemand)
+ {
+ /* Free Memory for deferred allocation */
+ eError = _FreeOSPages(psOSPageArrayData,
+ NULL,
+ 0);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ PVR_ASSERT (eError == PVRSRV_OK);
+ return eError;
+}
+
+/* N.B. It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+ IMG_UINT32 uiPageSize = 1U << psOSPageArrayData->uiLog2DevPageSize;
+ IMG_UINT32 uiInPageOffset;
+ IMG_UINT32 uiPageIndex;
+ IMG_UINT32 uiIdx;
+
+ if (psOSPageArrayData->uiLog2DevPageSize < ui32Log2PageSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Requested physical addresses from PMR "
+ "for incompatible contiguity %u!",
+ __FUNCTION__,
+ ui32Log2PageSize));
+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+ }
+
+ for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++)
+ {
+ if (pbValid[uiIdx])
+ {
+ uiPageIndex = puiOffset[uiIdx] >> psOSPageArrayData->uiLog2DevPageSize;
+ uiInPageOffset = puiOffset[uiIdx] - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << psOSPageArrayData->uiLog2DevPageSize);
+
+ PVR_ASSERT(uiPageIndex < psOSPageArrayData->uiTotalNumPages);
+ PVR_ASSERT(uiInPageOffset < uiPageSize);
+
+ psDevPAddr[uiIdx].uiAddr = page_to_phys(psOSPageArrayData->pagearray[uiPageIndex]);
+ psDevPAddr[uiIdx].uiAddr += uiInPageOffset;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ {
+ void *pvBase;
+ IMG_UINT32 ui32PageCount;
+} PMR_OSPAGEARRAY_KERNMAP_DATA;
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+ size_t uiOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ IMG_HANDLE *phHandleOut,
+ PMR_FLAGS_T ulFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+ void *pvAddress;
+ pgprot_t prot = PAGE_KERNEL;
+ IMG_UINT32 ui32PageOffset;
+ size_t uiMapOffset;
+ IMG_UINT32 ui32PageCount;
+ IMG_UINT32 uiLog2DevPageSize = psOSPageArrayData->uiLog2DevPageSize;
+ PMR_OSPAGEARRAY_KERNMAP_DATA *psData;
+
+ /*
+ Zero offset and size as a special meaning which means map in the
+ whole of the PMR, this is due to fact that the places that call
+ this callback might not have access to be able to determine the
+ physical size
+ */
+ if ((uiOffset == 0) && (uiSize == 0))
+ {
+ ui32PageOffset = 0;
+ uiMapOffset = 0;
+ ui32PageCount = psOSPageArrayData->iNumPagesAllocated;
+ }
+ else
+ {
+ size_t uiEndoffset;
+
+ ui32PageOffset = uiOffset >> uiLog2DevPageSize;
+ uiMapOffset = uiOffset - (ui32PageOffset << uiLog2DevPageSize);
+ uiEndoffset = uiOffset + uiSize - 1;
+ // Add one as we want the count, not the offset
+ ui32PageCount = (uiEndoffset >> uiLog2DevPageSize) + 1;
+ ui32PageCount -= ui32PageOffset;
+ }
+
+ if (psOSPageArrayData->bIsCMA)
+ {
+ prot = pgprot_noncached(prot);
+ }
+ else
+ {
+ switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags))
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ prot = pgprot_noncached(prot);
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+ prot = pgprot_writecombine(prot);
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+ break;
+
+ default:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ }
+
+ psData = OSAllocMem(sizeof(*psData));
+ if (psData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+ pvAddress = vmap(&psOSPageArrayData->pagearray[ui32PageOffset],
+ ui32PageCount,
+ VM_READ | VM_WRITE,
+ prot);
+#else
+ pvAddress = vm_map_ram(&psOSPageArrayData->pagearray[ui32PageOffset],
+ ui32PageCount,
+ -1,
+ prot);
+#endif
+ if (pvAddress == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ *ppvKernelAddressOut = pvAddress + uiMapOffset;
+ psData->pvBase = pvAddress;
+ psData->ui32PageCount = ui32PageCount;
+ *phHandleOut = psData;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+ e1:
+ OSFreeMem(psData);
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE hHandle)
+{
+ PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle;
+ PVR_UNREFERENCED_PARAMETER(pvPriv);
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+ vunmap(psData->pvBase);
+#else
+ vm_unmap_ram(psData->pvBase, psData->ui32PageCount);
+#endif
+ OSFreeMem(psData);
+}
+
+static
+PVRSRV_ERROR PMRUnpinOSMem(PMR_IMPL_PRIVDATA pPriv)
+{
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Lock down the pool and add the array to the unpin list */
+ _PagePoolLock();
+
+ /* Sanity check */
+ PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_FALSE);
+ PVR_ASSERT(psOSPageArrayData->bOnDemand == IMG_FALSE);
+
+ eError = _AddUnpinListEntryUnlocked(psOSPageArrayData);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not able to add allocation to unpinned list (%d).",
+ __FUNCTION__,
+ eError));
+
+ goto e_exit;
+ }
+
+ psOSPageArrayData->bUnpinned = IMG_TRUE;
+
+e_exit:
+ _PagePoolUnlock();
+ return eError;
+}
+
+static
+PVRSRV_ERROR PMRPinOSMem(PMR_IMPL_PRIVDATA pPriv,
+ PMR_MAPPING_TABLE *psMappingTable)
+{
+ PVRSRV_ERROR eError;
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
+ IMG_UINT32 *pui32MapTable = NULL;
+ IMG_UINT32 i,j=0, ui32Temp=0;
+
+ _PagePoolLock();
+
+ /* Sanity check */
+ PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_TRUE);
+
+ psOSPageArrayData->bUnpinned = IMG_FALSE;
+
+ /* If there are still pages in the array remove entries from the pool */
+ if (psOSPageArrayData->iNumPagesAllocated != 0)
+ {
+ _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
+ _PagePoolUnlock();
+
+ eError = PVRSRV_OK;
+ goto e_exit_mapalloc_failure;
+ }
+ _PagePoolUnlock();
+
+ /* If pages were reclaimed we allocate new ones and
+ * return PVRSRV_ERROR_PMR_NEW_MEMORY */
+ if (psMappingTable->ui32NumVirtChunks == 1)
+ {
+ eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumPages);
+ }
+ else
+ {
+ pui32MapTable = (IMG_UINT32 *)OSAllocMem(sizeof(*pui32MapTable) * psMappingTable->ui32NumPhysChunks);
+ if(NULL == pui32MapTable)
+ {
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not able to Alloc Map Table.",
+ __FUNCTION__));
+ goto e_exit_mapalloc_failure;
+ }
+
+ for (i = 0,j=0; i < psMappingTable->ui32NumVirtChunks; i++)
+ {
+ ui32Temp = psMappingTable->aui32Translation[i];
+ if (TRANSLATION_INVALID != ui32Temp)
+ {
+ pui32MapTable[j++] = ui32Temp;
+ }
+ }
+ eError = _AllocOSPages(psOSPageArrayData, pui32MapTable, psMappingTable->ui32NumPhysChunks);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not able to get new pages for unpinned allocation.",
+ __FUNCTION__));
+
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+ goto e_exit;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Allocating new pages for unpinned allocation. "
+ "Old content is lost!",
+ __FUNCTION__));
+
+ eError = PVRSRV_ERROR_PMR_NEW_MEMORY;
+
+e_exit:
+ OSFreeMem(pui32MapTable);
+e_exit_mapalloc_failure:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function PMRChangeSparseMemOSMem
+@Description This function Changes the sparse mapping by allocating & freeing
+ of pages. It does also change the GPU and CPU maps accordingly
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR);
+ PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
+ struct page **psPageArray = psPMRPageArrayData->pagearray;
+ struct page *psPage;
+
+ IMG_UINT32 ui32AdtnlAllocPages = 0; /*<! Number of pages to alloc from the OS */
+ IMG_UINT32 ui32AdtnlFreePages = 0; /*<! Number of pages to free back to the OS */
+ IMG_UINT32 ui32CommonRequestCount = 0; /*<! Number of pages to move position in the page array */
+ IMG_UINT32 ui32Loop = 0;
+ IMG_UINT32 ui32Index = 0;
+ IMG_UINT32 uiAllocpgidx ;
+ IMG_UINT32 uiFreepgidx;
+ IMG_UINT32 ui32Order = psPMRPageArrayData->uiLog2DevPageSize - PAGE_SHIFT;
+
+ /* Check SPARSE flags and calculate pages to allocate and free */
+ if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
+ {
+ ui32CommonRequestCount = (ui32AllocPageCount > ui32FreePageCount) ?
+ ui32FreePageCount : ui32AllocPageCount;
+
+ PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
+ }
+
+ if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
+ {
+ ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequestCount;
+ }
+ else
+ {
+ ui32AllocPageCount = 0;
+ }
+
+ if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
+ {
+ ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequestCount;
+ }
+ else
+ {
+ ui32FreePageCount = 0;
+ }
+
+ if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return eError;
+ }
+
+ /* The incoming request is classified into two operations independent of
+ * each other: alloc & free pages.
+ * These operations can be combined with two mapping operations as well
+ * which are GPU & CPU space mappings.
+ *
+ * From the alloc and free page requests, the net amount of pages to be
+ * allocated or freed is computed. Pages that were requested to be freed
+ * will be reused to fulfil alloc requests.
+ *
+ * The order of operations is:
+ * 1. Allocate new pages from the OS
+ * 2. Move the free pages from free request to alloc positions.
+ * 3. Free the rest of the pages not used for alloc
+ *
+ * Alloc parameters are validated at the time of allocation
+ * and any error will be handled then. */
+
+ /* Validate the free indices */
+ if (ui32FreePageCount)
+ {
+ if (NULL != pai32FreeIndices){
+
+ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+ {
+ uiFreepgidx = pai32FreeIndices[ui32Loop];
+
+ if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ if (INVALID_PAGE == psPageArray[uiFreepgidx])
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ }
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return eError;
+ }
+ }
+
+ /* Validate the alloc indices */
+ for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
+ {
+ uiAllocpgidx = pai32AllocIndices[ui32Loop];
+
+ if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+ {
+ if ((INVALID_PAGE != psPageArray[uiAllocpgidx]) ||
+ (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ }
+ else
+ {
+ if ((INVALID_PAGE == psPageArray[uiAllocpgidx]) ||
+ (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]) )
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ }
+ }
+
+ ui32Loop = 0;
+
+ /* Allocate new pages from the OS */
+ if (0 != ui32AdtnlAllocPages)
+ {
+ eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: New Addtl Allocation of pages failed",
+ __FUNCTION__));
+ goto e0;
+ }
+
+ /*Mark the corresponding pages of translation table as valid */
+ for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
+ {
+ psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
+ }
+ }
+
+
+ ui32Index = ui32Loop;
+
+ /* Move the corresponding free pages to alloc request */
+ for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++)
+ {
+ uiAllocpgidx = pai32AllocIndices[ui32Index];
+ uiFreepgidx = pai32FreeIndices[ui32Loop];
+ psPage = psPageArray[uiAllocpgidx];
+ psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
+
+ /* Is remap mem used in real world scenario? Should it be turned to a
+ * debug feature? The condition check needs to be out of loop, will be
+ * done at later point though after some analysis */
+ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+ {
+ psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
+ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+ psPageArray[uiFreepgidx] = (struct page *)INVALID_PAGE;
+ }
+ else
+ {
+ psPageArray[uiFreepgidx] = psPage;
+ psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
+ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+ }
+
+ /* Be sure to honour the attributes associated with the allocation
+ * such as zeroing, poisoning etc. */
+ if (psPMRPageArrayData->bPoisonOnAlloc)
+ {
+ _PoisonPages(psPageArray[uiAllocpgidx],
+ ui32Order,
+ _AllocPoison,
+ _AllocPoisonSize);
+ }
+ else
+ {
+ if (psPMRPageArrayData->bZero)
+ {
+ char a = 0;
+ _PoisonPages(psPageArray[uiAllocpgidx],
+ ui32Order,
+ &a,
+ 1);
+ }
+ }
+ }
+
+ /* Free the additional free pages */
+ if (0 != ui32AdtnlFreePages)
+ {
+ eError = _FreeOSPages(psPMRPageArrayData,
+ &pai32FreeIndices[ui32Loop],
+ ui32AdtnlFreePages);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ while (ui32Loop < ui32FreePageCount)
+ {
+ psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Loop]] = TRANSLATION_INVALID;
+ ui32Loop++;
+ }
+ }
+
+ eError = PVRSRV_OK;
+
+e0:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function PMRChangeSparseMemCPUMapOSMem
+@Description This function Changes CPU maps accordingly
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices)
+{
+ struct page **psPageArray;
+ PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
+ IMG_CPU_PHYADDR sCPUPAddr;
+
+ sCPUPAddr.uiAddr = 0;
+ psPageArray = psPMRPageArrayData->pagearray;
+
+ return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
+ sCpuVAddrBase,
+ sCPUPAddr,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ IMG_FALSE);
+}
+
+static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = {
+ .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem,
+ .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem,
+ .pfnDevPhysAddr = &PMRSysPhysAddrOSMem,
+ .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem,
+ .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem,
+ .pfnReadBytes = NULL,
+ .pfnWriteBytes = NULL,
+ .pfnUnpinMem = &PMRUnpinOSMem,
+ .pfnPinMem = &PMRPinOSMem,
+ .pfnChangeSparseMem = &PMRChangeSparseMemOSMem,
+ .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem,
+ .pfnFinalize = &PMRFinalizeOSMem,
+};
+
+PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *puiAllocIndices,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ PMR **ppsPMRPtr)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ PMR *psPMR;
+ struct _PMR_OSPAGEARRAY_DATA_ *psPrivData;
+ PMR_FLAGS_T uiPMRFlags;
+ PHYS_HEAP *psPhysHeap;
+ IMG_BOOL bZero;
+ IMG_BOOL bIsCMA;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bPoisonOnFree;
+ IMG_BOOL bOnDemand;
+ IMG_BOOL bCpuLocal;
+ IMG_BOOL bFwLocal;
+ IMG_UINT32 ui32CPUCacheFlags = DevmemCPUCacheMode(psDevNode, uiFlags);
+ if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags))
+ {
+ ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN;
+ }
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /*
+ * The host driver (but not guest) can still use this factory for firmware
+ * allocations
+ */
+ PVR_ASSERT(!PVRSRV_CHECK_FW_LOCAL(uiFlags));
+#endif
+
+ /*
+ * Silently round up alignment/pagesize if request was less that PAGE_SHIFT
+ * because it would never be harmful for memory to be _more_ contiguous that
+ * was desired.
+ */
+ uiLog2PageSize = PAGE_SHIFT > uiLog2PageSize ? PAGE_SHIFT : uiLog2PageSize;
+
+ /* In case we have a non-sparse allocation tolerate bad requests and round up.
+ * For sparse allocations the users have to make sure to meet the right
+ * requirements. */
+ if (ui32NumPhysChunks == ui32NumVirtChunks &&
+ ui32NumVirtChunks == 1)
+ {
+ /* Round up allocation size to at least a full PAGE_SIZE */
+ uiSize = PVR_ALIGN(uiSize, PAGE_SIZE);
+ uiChunkSize = uiSize;
+ }
+
+ /*
+ * Use CMA framework if order is greater than OS page size; please note
+ * that OSMMapPMRGeneric() has the same expectation as well.
+ */
+ bIsCMA = uiLog2PageSize > PAGE_SHIFT ? IMG_TRUE : IMG_FALSE;
+ bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bCpuLocal = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bFwLocal = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
+
+ if (bZero && bPoisonOnAlloc)
+ {
+ /* Zero on Alloc and Poison on Alloc are mutually exclusive */
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errorOnParam;
+ }
+
+ /* Create Array structure that hold the physical pages */
+ eError = _AllocOSPageArray(psDevNode,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ uiLog2PageSize,
+ bZero,
+ bIsCMA,
+ bPoisonOnAlloc,
+ bPoisonOnFree,
+ bOnDemand,
+ ui32CPUCacheFlags,
+ &psPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnAllocPageArray;
+ }
+
+ if (!bOnDemand)
+ {
+ /* Do we fill the whole page array or just parts (sparse)? */
+ if (ui32NumPhysChunks == ui32NumVirtChunks)
+ {
+ /* Allocate the physical pages */
+ eError = _AllocOSPages(psPrivData, NULL, psPrivData->uiTotalNumPages);
+ }
+ else
+ {
+ if (ui32NumPhysChunks != 0)
+ {
+ /* Calculate the number of pages we want to allocate */
+ IMG_UINT32 uiPagesToAlloc =
+ (IMG_UINT32) ((((ui32NumPhysChunks * uiChunkSize) - 1) >> uiLog2PageSize) + 1);
+
+ /* Make sure calculation is correct */
+ PVR_ASSERT(((PMR_SIZE_T) uiPagesToAlloc << uiLog2PageSize) ==
+ (ui32NumPhysChunks * uiChunkSize) );
+
+ /* Allocate the physical pages */
+ eError = _AllocOSPages(psPrivData, puiAllocIndices,
+ uiPagesToAlloc);
+ }
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnAllocPages;
+ }
+ }
+
+ /*
+ * In this instance, we simply pass flags straight through.
+ *
+ * Generically, uiFlags can include things that control the PMR factory, but
+ * we don't need any such thing (at the time of writing!), and our caller
+ * specifies all PMR flags so we don't need to meddle with what was given to
+ * us.
+ */
+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+ /*
+ * Check no significant bits were lost in cast due to different bit widths
+ * for flags
+ */
+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+ if (bOnDemand)
+ {
+ PDUMPCOMMENT("Deferred Allocation PMR (UMA)");
+ }
+
+ if (bFwLocal)
+ {
+ PDUMPCOMMENT("FW_LOCAL allocation requested");
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+ }
+ else if (bCpuLocal)
+ {
+ PDUMPCOMMENT("CPU_LOCAL allocation requested");
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL];
+ }
+ else
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+ }
+
+ eError = PMRCreatePMR(psDevNode,
+ psPhysHeap,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ puiAllocIndices,
+ uiLog2PageSize,
+ uiPMRFlags,
+ pszAnnotation,
+ &_sPMROSPFuncTab,
+ psPrivData,
+ PMR_TYPE_OSMEM,
+ &psPMR,
+ IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnCreate;
+ }
+
+ *ppsPMRPtr = psPMR;
+
+ return PVRSRV_OK;
+
+errorOnCreate:
+ if (!bOnDemand)
+ {
+ eError2 = _FreeOSPages(psPrivData, NULL, 0);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+ }
+
+errorOnAllocPages:
+ eError2 = _FreeOSPagesArray(psPrivData);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Linux OS physmem implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PHYSMEM_OSMEM_LINUX_H__
+#define __PHYSMEM_OSMEM_LINUX_H__
+
+void LinuxInitPhysmem(void);
+void LinuxDeinitPhysmem(void);
+
+#endif /* __PHYSMEM_OSMEM_LINUX_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Implementation of PMR functions for Trusted Device secure memory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks for physical memory imported
+ from a trusted environment. The driver cannot acquire CPU
+ mappings for this secure memory.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "physmem_tdsecbuf.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+
+#if defined (SUPPORT_TRUSTED_DEVICE)
+
+#if !defined(NO_HARDWARE)
+
+typedef struct _PMR_TDSECBUF_DATA_ {
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PHYS_HEAP *psTDSecBufPhysHeap;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_UINT64 ui64Size;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT64 ui64SecBufHandle;
+} PMR_TDSECBUF_DATA;
+
+
+/*
+ * Implementation of callback functions
+ */
+
+static PVRSRV_ERROR PMRSysPhysAddrTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+ IMG_UINT32 i;
+
+ if (psPrivData->ui32Log2PageSize != ui32Log2PageSize)
+ {
+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+ }
+
+ for (i = 0; i < ui32NumOfPages; i++)
+ {
+ psDevPAddr[i].uiAddr = psPrivData->sDevPAddr.uiAddr + puiOffset[i];
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRFinalizeTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psPrivData->psDevNode->psDevConfig;
+ PVRSRV_ERROR eError;
+
+ eError = psDevConfig->pfnTDSecureBufFree(psDevConfig->hSysData,
+ psPrivData->ui64SecBufHandle);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRFinalizeTDSecBufMem: TDSecBufFree not implemented on the Trusted Device!"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRFinalizeTDSecBufMem: TDSecBufFree cannot free the resource!"));
+ }
+ return eError;
+ }
+
+ PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+ OSFreeMem(psPrivData);
+
+ return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRTDSecBufFuncTab = {
+ .pfnDevPhysAddr = &PMRSysPhysAddrTDSecBufMem,
+ .pfnFinalize = &PMRFinalizeTDSecBufMem,
+};
+
+
+/*
+ * Public functions
+ */
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+ RGX_DATA *psRGXData = (RGX_DATA *)(psDevConfig->hDevData);
+ PMR_TDSECBUF_DATA *psPrivData = NULL;
+ PMR *psPMR = NULL;
+ IMG_UINT32 uiMappingTable = 0;
+ PMR_FLAGS_T uiPMRFlags;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+ /* In this instance, we simply pass flags straight through.
+ * Generically, uiFlags can include things that control the PMR
+ * factory, but we don't need any such thing (at the time of
+ * writing!), and our caller specifies all PMR flags so we don't
+ * need to meddle with what was given to us.
+ */
+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+ /* Check no significant bits were lost in cast due to different bit widths for flags */
+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+ /* Many flags can be dropped as the driver cannot access this memory
+ * and it is assumed that the trusted zone is physically contiguous
+ */
+ uiPMRFlags &= ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE |
+ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK);
+
+ psPrivData = OSAllocZMem(sizeof(PMR_TDSECBUF_DATA));
+ if (psPrivData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errorOnAllocData;
+ }
+
+ /* Get required info for the TD Secure Buffer physical heap */
+ if (!psRGXData->bHasTDSecureBufPhysHeap)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Trusted Device physical heap not available!"));
+ eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+ goto errorOnAcquireHeap;
+ }
+
+ eError = PhysHeapAcquire(psRGXData->uiTDSecureBufPhysHeapID,
+ &psPrivData->psTDSecBufPhysHeap);
+ if (eError != PVRSRV_OK) goto errorOnAcquireHeap;
+
+ psPrivData->ui64Size = uiSize;
+
+ if (psDevConfig->pfnTDSecureBufAlloc && psDevConfig->pfnTDSecureBufFree)
+ {
+ PVRSRV_TD_SECBUF_PARAMS sTDSecBufParams;
+
+ psPrivData->psDevNode = psDevNode;
+
+ /* Ask the Trusted Device to allocate secure memory */
+ sTDSecBufParams.uiSize = uiSize;
+ sTDSecBufParams.uiAlign = 1 << uiLog2Align;
+
+ /* These will be returned by pfnTDSecureBufAlloc on success */
+ sTDSecBufParams.psSecBufAddr = &psPrivData->sCpuPAddr;
+ sTDSecBufParams.pui64SecBufHandle = &psPrivData->ui64SecBufHandle;
+
+ eError = psDevConfig->pfnTDSecureBufAlloc(psDevConfig->hSysData,
+ &sTDSecBufParams);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc not implemented on the Trusted Device!"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc cannot allocate the resource!"));
+ }
+ goto errorOnAlloc;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc/Free not implemented!"));
+ eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+ goto errorOnAlloc;
+ }
+
+ PhysHeapCpuPAddrToDevPAddr(psPrivData->psTDSecBufPhysHeap,
+ 1,
+ &psPrivData->sDevPAddr,
+ &psPrivData->sCpuPAddr);
+
+ /* Check that the secure buffer has the requested alignment */
+ if ((((1ULL << uiLog2Align) - 1) & psPrivData->sCpuPAddr.uiAddr) != 0)
+ /* Check that the secure buffer is aligned to a Rogue cache line */
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Trusted Device physical heap has the wrong alignment!"
+ "Physical address 0x%llx, alignment mask 0x%llx",
+ (unsigned long long) psPrivData->sCpuPAddr.uiAddr,
+ ((1ULL << uiLog2Align) - 1)));
+ eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+ goto errorOnCheckAlign;
+ }
+
+ psPrivData->ui32Log2PageSize = uiLog2Align;
+
+ eError = PMRCreatePMR(psDevNode,
+ psPrivData->psTDSecBufPhysHeap,
+ psPrivData->ui64Size,
+ psPrivData->ui64Size,
+ 1, /* ui32NumPhysChunks */
+ 1, /* ui32NumVirtChunks */
+ &uiMappingTable, /* pui32MappingTable (not used) */
+ uiLog2Align,
+ uiPMRFlags,
+ "TDSECUREBUF_PMR",
+ &_sPMRTDSecBufFuncTab,
+ psPrivData,
+ PMR_TYPE_TDSECBUF,
+ &psPMR,
+ IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnCreatePMR;
+ }
+
+#if defined(PVR_RI_DEBUG)
+ eError = RIWritePMREntryKM(psPMR,
+ sizeof("TDSecureBuffer"),
+ "TDSecureBuffer",
+ psPrivData->ui64Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Failed to write PMR entry (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ }
+#endif
+
+ *ppsPMRPtr = psPMR;
+ *pui64SecBufHandle = psPrivData->ui64SecBufHandle;
+
+ return PVRSRV_OK;
+
+
+errorOnCreatePMR:
+errorOnCheckAlign:
+ eError = psDevConfig->pfnTDSecureBufFree(psDevConfig->hSysData,
+ psPrivData->ui64SecBufHandle);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufFree not implemented on the Trusted Device!"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufFree cannot free the resource!"));
+ }
+ }
+errorOnAlloc:
+ PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+errorOnAcquireHeap:
+ OSFreeMem(psPrivData);
+
+errorOnAllocData:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+#else /* NO_HARDWARE */
+
+#include "physmem_osmem.h"
+
+typedef struct _PMR_TDSECBUF_DATA_ {
+ PHYS_HEAP *psTDSecBufPhysHeap;
+ PMR *psOSMemPMR;
+ IMG_UINT32 ui32Log2PageSize;
+} PMR_TDSECBUF_DATA;
+
+
+/*
+ * Implementation of callback functions
+ */
+
+static PVRSRV_ERROR
+PMRLockPhysAddressesTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+ return PMRLockSysPhysAddresses(psPrivData->psOSMemPMR);
+}
+
+static PVRSRV_ERROR
+PMRUnlockPhysAddressesTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+ return PMRUnlockSysPhysAddresses(psPrivData->psOSMemPMR);
+}
+
+static PVRSRV_ERROR
+PMRSysPhysAddrTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+ /* On the assumption that this PMR was created with
+ * NumPhysChunks == NumVirtChunks then
+ * puiOffset[0] == uiLogicalOffset
+ */
+
+ return PMR_DevPhysAddr(psPrivData->psOSMemPMR,
+ ui32Log2PageSize,
+ ui32NumOfPages,
+ puiOffset[0],
+ psDevPAddr,
+ pbValid);
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+ size_t uiOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ IMG_HANDLE *phHandleOut,
+ PMR_FLAGS_T ulFlags)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+ size_t uiLengthOut;
+
+ PVR_UNREFERENCED_PARAMETER(ulFlags);
+
+ return PMRAcquireKernelMappingData(psPrivData->psOSMemPMR,
+ uiOffset,
+ uiSize,
+ ppvKernelAddressOut,
+ &uiLengthOut,
+ phHandleOut);
+}
+
+static void
+PMRReleaseKernelMappingDataTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE hHandle)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+ PMRReleaseKernelMappingData(psPrivData->psOSMemPMR, hHandle);
+}
+
+static PVRSRV_ERROR PMRFinalizeTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+ PMRUnrefPMR(psPrivData->psOSMemPMR);
+ PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+ OSFreeMem(psPrivData);
+
+ return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRTDSecBufFuncTab = {
+ .pfnLockPhysAddresses = &PMRLockPhysAddressesTDSecBufMem,
+ .pfnUnlockPhysAddresses = &PMRUnlockPhysAddressesTDSecBufMem,
+ .pfnDevPhysAddr = &PMRSysPhysAddrTDSecBufMem,
+ .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataTDSecBufMem,
+ .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataTDSecBufMem,
+ .pfnFinalize = &PMRFinalizeTDSecBufMem,
+};
+
+
+/*
+ * Public functions
+ */
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+ RGX_DATA *psRGXData = (RGX_DATA *)(psDevConfig->hDevData);
+ PMR_TDSECBUF_DATA *psPrivData = NULL;
+ PMR *psPMR = NULL;
+ PMR *psOSPMR = NULL;
+ IMG_UINT32 uiMappingTable = 0;
+ PMR_FLAGS_T uiPMRFlags;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* In this instance, we simply pass flags straight through.
+ * Generically, uiFlags can include things that control the PMR
+ * factory, but we don't need any such thing (at the time of
+ * writing!), and our caller specifies all PMR flags so we don't
+ * need to meddle with what was given to us.
+ */
+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+ /* Check no significant bits were lost in cast due to different bit widths for flags */
+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+ psPrivData = OSAllocZMem(sizeof(PMR_TDSECBUF_DATA));
+ if (psPrivData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errorOnAllocData;
+ }
+
+ /* Get required info for the TD Secure Buffer physical heap */
+ if (!psRGXData->bHasTDSecureBufPhysHeap)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Trusted Device physical heap not available!"));
+ eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+ goto errorOnAcquireHeap;
+ }
+
+ eError = PhysHeapAcquire(psRGXData->uiTDSecureBufPhysHeapID,
+ &psPrivData->psTDSecBufPhysHeap);
+ if (eError != PVRSRV_OK) goto errorOnAcquireHeap;
+
+ psPrivData->ui32Log2PageSize = uiLog2Align;
+
+ /* Note that this PMR is only used to copy the FW blob to memory and
+ * to dump this memory to pdump, it doesn't need to have the alignment
+ * requested by the caller
+ */
+ eError = PhysmemNewOSRamBackedPMR(psDevNode,
+ uiSize,
+ uiSize,
+ 1, /* ui32NumPhysChunks */
+ 1, /* ui32NumVirtChunks */
+ &uiMappingTable,
+ psPrivData->ui32Log2PageSize,
+ uiFlags,
+ "TDSECUREBUF_OSMEM",
+ &psOSPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnCreateOSPMR;
+ }
+
+ /* This is the primary PMR dumped with correct memspace and alignment */
+ eError = PMRCreatePMR(psDevNode,
+ psPrivData->psTDSecBufPhysHeap,
+ uiSize,
+ uiSize,
+ 1, /* ui32NumPhysChunks */
+ 1, /* ui32NumVirtChunks */
+ &uiMappingTable, /* pui32MappingTable (not used) */
+ uiLog2Align,
+ uiPMRFlags,
+ "TDSECUREBUF_PMR",
+ &_sPMRTDSecBufFuncTab,
+ psPrivData,
+ PMR_TYPE_TDSECBUF,
+ &psPMR,
+ IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnCreateTDPMR;
+ }
+
+#if defined(PVR_RI_DEBUG)
+ eError = RIWritePMREntryKM(psPMR,
+ sizeof("TDSecureBuffer"),
+ "TDSecureBuffer",
+ psPrivData->ui64Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Failed to write PMR entry (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ }
+#endif
+
+ psPrivData->psOSMemPMR = psOSPMR;
+ *ppsPMRPtr = psPMR;
+ *pui64SecBufHandle = 0x0ULL;
+
+ return PVRSRV_OK;
+
+errorOnCreateTDPMR:
+ PMRUnrefPMR(psOSPMR);
+
+errorOnCreateOSPMR:
+ PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+
+errorOnAcquireHeap:
+ OSFreeMem(psPrivData);
+
+errorOnAllocData:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+#endif /* NO_HARDWARE */
+
+#else /* SUPPORT_TRUSTED_DEVICE */
+
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiLog2Align);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+ PVR_UNREFERENCED_PARAMETER(pui64SecBufHandle);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+#endif
+
+PVRSRV_ERROR PhysmemImportSecBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32Log2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle)
+{
+ return PhysmemNewTDSecureBufPMR(psConnection,
+ psDevNode,
+ uiSize,
+ (PMR_LOG2ALIGN_T)ui32Log2Align,
+ uiFlags,
+ ppsPMRPtr,
+ pui64SecBufHandle);
+};
+
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Header for secure buffer PMR factory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks importing secure buffer
+ allocations.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _PHYSMEM_TDSECBUF_H_
+#define _PHYSMEM_TDSECBUF_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "pmr.h"
+
+/*
+ * PhysmemNewTDSecureBufPMR
+ *
+ * This function is used as part of the facility to provide secure buffer
+ * memory. A default implementation is provided but it can be replaced by
+ * the SoC implementor if necessary.
+ *
+ * Calling this function will create a PMR for a memory allocation made
+ * in "secure buffer memory". It will only be writable by a trusted
+ * entity and when the feature is enabled on the SoC the GPU will only
+ * be able to perform operations permitted by security rules.
+ */
+
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle);
+
+PVRSRV_ERROR PhysmemImportSecBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32Log2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle);
+
+#endif /* _PHYSMEM_TDSECBUF_H_ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Physmem (PMR) abstraction
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ the "PMR" abstraction. A PMR (Physical Memory Resource)
+ represents some unit of physical memory which is
+ allocated/freed/mapped/unmapped as an indivisible unit
+ (higher software levels provide an abstraction above that
+ to deal with dividing this down into smaller manageable units).
+ Importantly, this module knows nothing of virtual memory, or
+ of MMUs etc., with one excusable exception. We have the
+ concept of a "page size", which really means nothing in
+ physical memory, but represents a "contiguity quantum" such
+ that the higher level modules which map this memory are able
+ to verify that it matches the needs of the page size for the
+ virtual realm into which it is being mapped.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "devicemem_server_utils.h"
+
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "pmr_impl.h"
+#include "pmr_os.h"
+#include "pvrsrv.h"
+
+#include "allocmem.h"
+#include "lock.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "secure_export.h"
+#include "ossecure_export.h"
+#endif
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+/* ourselves */
+#include "pmr.h"
+
+/* A "context" for the physical memory block resource allocator.
+
+ Context is probably the wrong word.
+
+ There is almost certainly only one of these, ever, in the system.
+ But, let's keep the notion of a context anyway, "just-in-case".
+*/
+static struct _PMR_CTX_
+{
+ /* For debugging, and PDump, etc., let's issue a forever
+ incrementing serial number to each allocation. */
+ IMG_UINT64 uiNextSerialNum;
+
+ /* For security, we only allow a PMR to be mapped if the caller
+ knows its key. We can pseudo-randomly generate keys */
+ IMG_UINT64 uiNextKey;
+
+ /* For debugging only, I guess: Number of live PMRs */
+ IMG_UINT32 uiNumLivePMRs;
+
+ /* Lock for this structure */
+ POS_LOCK hLock;
+
+ /* In order to seed the uiNextKey, we enforce initialisation at
+ driver load time. Also, we can debug check at driver unload
+ that the PMR count is zero. */
+ IMG_BOOL bModuleInitialised;
+} _gsSingletonPMRContext = { 1, 0, 0, NULL, IMG_FALSE };
+
+
+/* A PMR. One per physical allocation. May be "shared".
+
+ "shared" is ambiguous. We need to be careful with terminology.
+ There are two ways in which a PMR may be "shared" and we need to be
+ sure that we are clear which we mean.
+
+ i) multiple small allocations living together inside one PMR;
+
+ ii) one single allocation filling a PMR but mapped into multiple
+ memory contexts.
+
+ This is more important further up the stack - at this level, all we
+ care is that the PMR is being referenced multiple times.
+*/
+struct _PMR_
+{
+ /* This object is strictly refcounted. References include:
+ - mapping
+ - live handles (to this object)
+ - live export handles
+ (thus it is normal for allocated and exported memory to have a refcount of 3)
+ The object is destroyed when and only when the refcount reaches 0
+ */
+
+ /* Device node on which this PMR was created and is valid */
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ /*
+ Physical address translation (device <> cpu) is done on a per device
+ basis which means we need the physical heap info
+ */
+ PHYS_HEAP *psPhysHeap;
+
+ IMG_UINT32 uiRefCount;
+
+ /* lock count - this is the number of times
+ PMRLockSysPhysAddresses() has been called, less the number of
+ PMRUnlockSysPhysAddresses() calls. This is arguably here for
+ debug reasons only, as the refcount is already incremented as a
+ matter of course. Really, this just allows us to trap protocol
+ errors: i.e. calling PMRSysPhysAddr(),
+ without a lock, or calling PMRUnlockSysPhysAddresses() too many
+ or too few times. */
+ IMG_UINT32 uiLockCount;
+
+ /* Lock for this structure */
+ POS_LOCK hLock;
+
+ /* Incrementing serial number to each allocation. */
+ IMG_UINT64 uiSerialNum;
+
+ /* For security, we only allow a PMR to be mapped if the caller
+ knows its key. We can pseudo-randomly generate keys */
+ PMR_PASSWORD_T uiKey;
+
+ /* Callbacks for per-flavour functions */
+ const PMR_IMPL_FUNCTAB *psFuncTab;
+
+ /* Data associated with the "subtype" */
+ PMR_IMPL_PRIVDATA pvFlavourData;
+
+ /* What kind of PMR do we have? */
+ PMR_IMPL_TYPE eFlavour;
+
+ /* And for pdump */
+ const IMG_CHAR *pszPDumpDefaultMemspaceName;
+#if defined(PDUMP)
+ /* Allocation annotation */
+ IMG_CHAR *pszAnnotation;
+
+ IMG_HANDLE hPDumpAllocHandle;
+
+ /* Whether PDumping of this PMR must be persistent
+ * (i.e. it must be present in every future PDump stream as well)
+ */
+ IMG_BOOL bForcePersistent;
+
+ IMG_UINT32 uiNumPDumpBlocks;
+#endif
+
+ /* Logical size of allocation. "logical", because a PMR can
+ represent memory that will never physically exist. This is the
+ amount of virtual space that the PMR would consume when it's
+ mapped into a virtual allocation. */
+ PMR_SIZE_T uiLogicalSize;
+
+ /* Mapping table for the allocation.
+ PMR's can be sparse in which case not all the "logic" addresses
+ in it are valid. We need to know which addresses are and aren't
+ valid when mapping or reading the PMR.
+ The mapping table translates "logical" offsets into physical
+ offsets which is what we always pass to the PMR factory
+ (so it doesn't have to be concerned about sparseness issues) */
+ PMR_MAPPING_TABLE *psMappingTable;
+
+ /* Minimum Physical Contiguity Guarantee. Might be called "page
+ size", but that would be incorrect, as page size is something
+ meaningful only in virtual realm. This contiguity guarantee
+ provides an inequality that can be verified/asserted/whatever
+ to ensure that this PMR conforms to the page size requirement
+ of the place the PMR gets mapped. (May be used to select an
+ appropriate heap in variable page size systems)
+
+ The absolutely necessary condition is this:
+
+ device MMU page size <= actual physical contiguity.
+
+ We go one step further in order to be able to provide an early warning / early compatibility check and say this:
+
+ device MMU page size <= 2**(uiLog2ContiguityGuarantee) <= actual physical contiguity.
+
+ In this way, it is possible to make the page table reservation
+ in the device MMU without even knowing the granularity of the
+ physical memory (i.e. useful for being able to allocate virtual
+ before physical)
+ */
+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+
+ /* Flags. We store a copy of the "PMR flags" (usually a subset of
+ the flags given at allocation time) and return them to any
+ caller of PMR_Flags(). The intention of these flags is that
+ the ones stored here are used to represent permissions, such
+ that no one is able to map a PMR in a mode in which they are not
+ allowed, e.g. writeable for a read-only PMR, etc. */
+ PMR_FLAGS_T uiFlags;
+
+ /* Do we really need this? For now we'll keep it, until we know we don't. */
+ /* NB: this is not the "memory context" in client terms - this is
+ _purely_ the "PMR" context, of which there is almost certainly only
+ ever one per system as a whole, but we'll keep the concept
+ anyway, just-in-case. */
+ struct _PMR_CTX_ *psContext;
+
+#if defined(PVR_RI_DEBUG)
+ /*
+ * Stored handle to PMR RI entry
+ */
+ void *hRIHandle;
+#endif
+};
+
+/* do we need a struct for the export handle? I'll use one for now, but if nothing goes in it, we'll lose it */
+struct _PMR_EXPORT_
+{
+ struct _PMR_ *psPMR;
+};
+
+struct _PMR_PAGELIST_
+{
+ struct _PMR_ *psReferencePMR;
+};
+
+
+#define MIN3(a,b,c) (((a) < (b)) ? (((a) < (c)) ? (a):(c)) : (((b) < (c)) ? (b):(c)))
+
+static PVRSRV_ERROR
+_PMRCreate(PMR_SIZE_T uiLogicalSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+ PMR_FLAGS_T uiFlags,
+ PMR **ppsPMR)
+{
+ void *pvPMRLinAddr;
+ PMR *psPMR;
+ PMR_MAPPING_TABLE *psMappingTable;
+ struct _PMR_CTX_ *psContext;
+ IMG_UINT32 i,ui32Temp=0;
+ IMG_UINT32 ui32Remainder;
+ PVRSRV_ERROR eError;
+
+ psContext = &_gsSingletonPMRContext;
+
+
+ /* Extra checks required for sparse PMRs */
+ if (uiLogicalSize != uiChunkSize)
+ {
+ /* Check the logical size and chunk information agree with each other */
+ if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumVirtChunks = %d)",
+ __FUNCTION__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks));
+ return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+ }
+
+ /* Check that the chunk size is a multiple of the contiguity */
+ OSDivide64(uiChunkSize, (1<< uiLog2ContiguityGuarantee), &ui32Remainder);
+ if (ui32Remainder)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Bad chunk size, must be a multiple of the contiguity "
+ "(uiChunkSize = 0x%llx, uiLog2ContiguityGuarantee = %u)",
+ __FUNCTION__,
+ (unsigned long long) uiChunkSize,
+ uiLog2ContiguityGuarantee));
+ return PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE;
+ }
+ }
+
+ pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks);
+
+ if (pvPMRLinAddr == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psPMR = (PMR *) pvPMRLinAddr;
+ psMappingTable = (PMR_MAPPING_TABLE *) (((IMG_CHAR *) pvPMRLinAddr) + sizeof(*psPMR));
+
+ eError = OSLockCreate(&psPMR->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(psPMR);
+ return eError;
+ }
+
+ /* Setup the mapping table */
+ psMappingTable->uiChunkSize = uiChunkSize;
+ psMappingTable->ui32NumVirtChunks = ui32NumVirtChunks;
+ psMappingTable->ui32NumPhysChunks = ui32NumPhysChunks;
+ OSCachedMemSet(&psMappingTable->aui32Translation[0], 0xFF, sizeof(psMappingTable->aui32Translation[0])*
+ ui32NumVirtChunks);
+ for (i=0; i<ui32NumPhysChunks; i++)
+ {
+ ui32Temp = pui32MappingTable[i];
+ psMappingTable->aui32Translation[ui32Temp] = ui32Temp;
+ }
+
+ /* Setup the PMR */
+ psPMR->uiRefCount = 0;
+ psPMR->uiLockCount = 0;
+ psPMR->psContext = psContext;
+ psPMR->uiLogicalSize = uiLogicalSize;
+ psPMR->uiLog2ContiguityGuarantee = uiLog2ContiguityGuarantee;
+ psPMR->uiFlags = uiFlags;
+ psPMR->psMappingTable = psMappingTable;
+ psPMR->uiKey = psContext->uiNextKey;
+ psPMR->uiSerialNum = psContext->uiNextSerialNum;
+
+#if defined(PVR_RI_DEBUG)
+ psPMR->hRIHandle = NULL;
+#endif
+
+ OSLockAcquire(psContext->hLock);
+ psContext->uiNextKey = (0x80200003 * psContext->uiNextKey)
+ ^ (0xf00f0081 * (uintptr_t)pvPMRLinAddr);
+ psContext->uiNextSerialNum ++;
+ *ppsPMR = psPMR;
+ PVR_DPF((PVR_DBG_MESSAGE, "pmr.c: created PMR @0x%p", psPMR));
+ /* Increment live PMR count */
+ psContext->uiNumLivePMRs ++;
+ OSLockRelease(psContext->hLock);
+
+ return PVRSRV_OK;
+}
+
+static IMG_UINT32
+_RefNoLock(PMR *psPMR)
+{
+ PVR_ASSERT(psPMR->uiRefCount > 0);
+ /* We need to ensure that this function is always executed under
+ * PMRLock. The only exception acceptable is the unloading of the driver.
+ */
+ psPMR->uiRefCount++;
+ return psPMR->uiRefCount;
+}
+
+static IMG_UINT32
+_UnrefNoLock(PMR *psPMR)
+{
+ PVR_ASSERT(psPMR->uiRefCount > 0);
+ /* We need to ensure that this function is always executed under
+ * PMRLock. The only exception acceptable is the unloading of the driver.
+ */
+ psPMR->uiRefCount--;
+ return psPMR->uiRefCount;
+}
+
+static void
+_Ref(PMR *psPMR)
+{
+ OSLockAcquire(psPMR->hLock);
+ _RefNoLock(psPMR);
+ OSLockRelease(psPMR->hLock);
+}
+
+static void
+_UnrefAndMaybeDestroy(PMR *psPMR)
+{
+ PVRSRV_ERROR eError2;
+ struct _PMR_CTX_ *psCtx;
+ IMG_UINT32 uiRefCount;
+
+ PVR_ASSERT(psPMR != NULL);
+ PVR_ASSERT(psPMR->uiRefCount > 0);
+
+ OSLockAcquire(psPMR->hLock);
+ uiRefCount = _UnrefNoLock(psPMR);
+ OSLockRelease(psPMR->hLock);
+
+ if (uiRefCount == 0)
+ {
+#if defined(PDUMP)
+ PDumpPMRFreePMR(psPMR,
+ psPMR->uiLogicalSize,
+ (1 << psPMR->uiLog2ContiguityGuarantee),
+ psPMR->uiLog2ContiguityGuarantee,
+ psPMR->hPDumpAllocHandle);
+
+ OSFreeMem(psPMR->pszAnnotation);
+#endif
+
+ if (psPMR->psFuncTab->pfnFinalize != NULL)
+ {
+ eError2 = psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData);
+ PVR_ASSERT (eError2 == PVRSRV_OK); /* can we do better? */
+ }
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ OSLockAcquire(psPMR->hLock);
+ PVR_ASSERT(psPMR->uiLockCount == 0);
+ OSLockRelease(psPMR->hLock);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Delete RI entry */
+ if (psPMR->hRIHandle)
+ {
+ eError = RIDeletePMREntryKM (psPMR->hRIHandle);
+ }
+ }
+#endif /* if defined(PVR_RI_DEBUG) */
+ psCtx = psPMR->psContext;
+
+ OSLockDestroy(psPMR->hLock);
+
+ OSFreeMem(psPMR);
+
+ /* Decrement live PMR count. Probably only of interest for debugging */
+ PVR_ASSERT(psCtx->uiNumLivePMRs > 0);
+
+ OSLockAcquire(psCtx->hLock);
+ psCtx->uiNumLivePMRs --;
+ OSLockRelease(psCtx->hLock);
+ }
+}
+
+static IMG_BOOL _PMRIsSparse(const PMR *psPMR)
+{
+ if ((psPMR->psMappingTable->ui32NumVirtChunks == psPMR->psMappingTable->ui32NumPhysChunks) && \
+ (psPMR->psMappingTable->ui32NumVirtChunks == 1))
+ {
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+
+PVRSRV_ERROR
+PMRCreatePMR(PVRSRV_DEVICE_NODE *psDevNode,
+ PHYS_HEAP *psPhysHeap,
+ PMR_SIZE_T uiLogicalSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+ PMR_FLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ const PMR_IMPL_FUNCTAB *psFuncTab,
+ PMR_IMPL_PRIVDATA pvPrivData,
+ PMR_IMPL_TYPE eType,
+ PMR **ppsPMRPtr,
+ IMG_BOOL bForcePersistent)
+{
+ PMR *psPMR = NULL;
+ PVRSRV_ERROR eError;
+
+ eError = _PMRCreate(uiLogicalSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2ContiguityGuarantee,
+ uiFlags,
+ &psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ psPMR->psDevNode = psDevNode;
+ psPMR->psPhysHeap = psPhysHeap;
+ psPMR->psFuncTab = psFuncTab;
+ psPMR->pszPDumpDefaultMemspaceName = PhysHeapPDumpMemspaceName(psPhysHeap);
+ psPMR->pvFlavourData = pvPrivData;
+ psPMR->eFlavour = eType;
+ psPMR->uiRefCount = 1;
+
+#if defined(PDUMP)
+ {
+ PMR_FLAGS_T uiFlags = psPMR->uiFlags;
+ IMG_BOOL bInitialise = IMG_FALSE;
+ IMG_UINT32 ui32InitValue = 0;
+
+ psPMR->bForcePersistent = bForcePersistent;
+
+ psPMR->pszAnnotation = OSAllocMem(sizeof(IMG_CHAR) * PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH);
+ if (psPMR->pszAnnotation == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ OSStringNCopy(psPMR->pszAnnotation, pszAnnotation, PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH);
+
+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
+ {
+ bInitialise = IMG_TRUE;
+ }
+ else if(PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+ {
+ ui32InitValue = 0xDEADBEEF;
+ bInitialise = IMG_TRUE;
+ }
+
+ PDumpPMRMallocPMR(psPMR,
+ (uiChunkSize * ui32NumVirtChunks),
+ 1ULL<<uiLog2ContiguityGuarantee,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2ContiguityGuarantee,
+ bInitialise,
+ ui32InitValue,
+ bForcePersistent,
+ &psPMR->hPDumpAllocHandle);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(bForcePersistent);
+#endif
+
+ *ppsPMRPtr = psPMR;
+
+ return PVRSRV_OK;
+
+ /*
+ * error exit paths follow
+ */
+#if defined(PDUMP)
+e1:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate memory for PMR PDump Annotation, OOM.", __func__));
+#endif
+e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR,
+ IMG_UINT32 ui32NestingLevel)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ OSLockAcquireNested(psPMR->hLock, ui32NestingLevel);
+ /* We also count the locks as references, so that the PMR is not
+ freed while someone is using a physical address. */
+ /* "lock" here simply means incrementing the refcount. It means
+ the refcount is multipurpose, but that's okay. We only have to
+ promise that physical addresses are valid after this point, and
+ remain valid until the corresponding
+ PMRUnlockSysPhysAddressesOSMem() */
+ _RefNoLock(psPMR);
+
+ /* Also count locks separately from other types of references, to
+ allow for debug assertions */
+ psPMR->uiLockCount++;
+
+ /* Only call callback if lockcount transitions from 0 to 1 */
+ if (psPMR->uiLockCount == 1)
+ {
+ if (psPMR->psFuncTab->pfnLockPhysAddresses != NULL)
+ {
+ /* must always have lock and unlock in pairs! */
+ PVR_ASSERT(psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL);
+
+ eError = psPMR->psFuncTab->pfnLockPhysAddresses(psPMR->pvFlavourData);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+ }
+ }
+ OSLockRelease(psPMR->hLock);
+
+ return PVRSRV_OK;
+
+ e1:
+ psPMR->uiLockCount--;
+ _UnrefNoLock(psPMR);
+ PVR_ASSERT(psPMR->uiRefCount != 0);
+ OSLockRelease(psPMR->hLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR)
+{
+ return PMRLockSysPhysAddressesNested(psPMR, 0);
+}
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR)
+{
+ return PMRUnlockSysPhysAddressesNested(psPMR, 2);
+}
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ OSLockAcquireNested(psPMR->hLock, ui32NestingLevel);
+ PVR_ASSERT(psPMR->uiLockCount > 0);
+ psPMR->uiLockCount--;
+
+ if (psPMR->uiLockCount == 0)
+ {
+ if (psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL)
+ {
+ PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != NULL);
+
+ eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData);
+ /* must never fail */
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ }
+
+ OSLockRelease(psPMR->hLock);
+
+ /* We also count the locks as references, so that the PMR is not
+ freed while someone is using a physical address. */
+ _UnrefAndMaybeDestroy(psPMR);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ OSLockAcquire(psPMR->hLock);
+ /* Stop if we still have references on the PMR */
+ if ( ( bDevMapped && (psPMR->uiRefCount > 2))
+ || (!bDevMapped && (psPMR->uiRefCount > 1)) )
+ {
+ OSLockRelease(psPMR->hLock);
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: PMR is still referenced %u times. "
+ "That means this PMR is probably exported or used somewhere else. "
+ "Allowed are 2 references if it is mapped to device, otherwise 1.",
+ __func__,
+ psPMR->uiRefCount));
+
+ eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+ goto e_exit;
+ }
+ OSLockRelease(psPMR->hLock);
+
+ if(psPMR->psFuncTab->pfnUnpinMem != NULL)
+ {
+ eError = psPMR->psFuncTab->pfnUnpinMem(psPMR->pvFlavourData);
+ }
+
+e_exit:
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRPinPMR(PMR *psPMR)
+{
+ PVRSRV_ERROR eError= PVRSRV_OK;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ if(psPMR->psFuncTab->pfnPinMem != NULL)
+ {
+ eError = psPMR->psFuncTab->pfnPinMem(psPMR->pvFlavourData,
+ psPMR->psMappingTable);
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRMakeLocalImportHandle(PMR *psPMR,
+ PMR **ppsPMR)
+{
+ PMRRefPMR(psPMR);
+ *ppsPMR = psPMR;
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnmakeLocalImportHandle(PMR *psPMR)
+{
+ PMRUnrefPMR(psPMR);
+ return PVRSRV_OK;
+}
+
+/*
+ Note:
+ We pass back the PMR as it was passed in as a different handle type
+ (DEVMEM_MEM_IMPORT) and it allows us to change the import structure
+ type if we should need to embed any meta data in it.
+*/
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+ PMR **ppsPMR,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ _Ref(psPMR);
+
+ /* Return the PMR */
+ *ppsPMR = psPMR;
+ *puiSize = psPMR->uiLogicalSize;
+ *puiAlign = 1ULL << psPMR->uiLog2ContiguityGuarantee;
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+ IMG_UINT64 *pui64UID)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ *pui64UID = psPMR->uiSerialNum;
+
+ return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+ PMR_EXPORT **ppsPMRExportPtr,
+ PMR_SIZE_T *puiSize,
+ PMR_LOG2ALIGN_T *puiLog2Contig,
+ PMR_PASSWORD_T *puiPassword)
+{
+ IMG_UINT64 uiPassword;
+ PMR_EXPORT *psPMRExport;
+
+ uiPassword = psPMR->uiKey;
+
+ psPMRExport = OSAllocMem(sizeof(*psPMRExport));
+ if (psPMRExport == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psPMRExport->psPMR = psPMR;
+ _Ref(psPMR);
+
+ *ppsPMRExportPtr = psPMRExport;
+ *puiSize = psPMR->uiLogicalSize;
+ *puiLog2Contig = psPMR->uiLog2ContiguityGuarantee;
+ *puiPassword = uiPassword;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport)
+{
+ /* FIXME: probably shouldn't be assertions? */
+ PVR_ASSERT(psPMRExport != NULL);
+ PVR_ASSERT(psPMRExport->psPMR != NULL);
+ PVR_ASSERT(psPMRExport->psPMR->uiRefCount > 0);
+
+ _UnrefAndMaybeDestroy(psPMRExport->psPMR);
+
+ OSFreeMem(psPMRExport);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRImportPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PMR_EXPORT *psPMRExport,
+ PMR_PASSWORD_T uiPassword,
+ PMR_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Contig,
+ PMR **ppsPMR)
+{
+ PMR *psPMR;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* FIXME: probably shouldn't be assertions? */
+ PVR_ASSERT(psPMRExport != NULL);
+ PVR_ASSERT(psPMRExport->psPMR != NULL);
+ PVR_ASSERT(psPMRExport->psPMR->uiRefCount > 0);
+
+ psPMR = psPMRExport->psPMR;
+
+ if (psPMR->psDevNode != psDevNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device\n", __func__));
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+
+ if (psPMR->uiKey != uiPassword)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PMRImport: password given = %016llx, expected = %016llx\n",
+ uiPassword,
+ psPMR->uiKey));
+ return PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR;
+ }
+
+ if (psPMR->uiLogicalSize != uiSize || psPMR->uiLog2ContiguityGuarantee != uiLog2Contig)
+ {
+ return PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES;
+ }
+
+ _Ref(psPMR);
+
+ *ppsPMR = psPMR;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR)
+{
+ _UnrefAndMaybeDestroy(psPMR);
+
+ return PVRSRV_OK;
+}
+
+#else /* if defined(SUPPORT_INSECURE_EXPORT) */
+
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+ PMR_EXPORT **ppsPMRExportPtr,
+ PMR_SIZE_T *puiSize,
+ PMR_LOG2ALIGN_T *puiLog2Contig,
+ PMR_PASSWORD_T *puiPassword)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(ppsPMRExportPtr);
+ PVR_UNREFERENCED_PARAMETER(puiSize);
+ PVR_UNREFERENCED_PARAMETER(puiLog2Contig);
+ PVR_UNREFERENCED_PARAMETER(puiPassword);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMRExport);
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRImportPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PMR_EXPORT *psPMRExport,
+ PMR_PASSWORD_T uiPassword,
+ PMR_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Contig,
+ PMR **ppsPMR)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(psPMRExport);
+ PVR_UNREFERENCED_PARAMETER(uiPassword);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiLog2Contig);
+ PVR_UNREFERENCED_PARAMETER(ppsPMR);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ return PVRSRV_OK;
+}
+#endif /* if defined(SUPPORT_INSECURE_EXPORT) */
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ PMR *psPMR,
+ IMG_SECURE_TYPE *phSecure,
+ PMR **ppsPMR,
+ CONNECTION_DATA **ppsSecureConnection)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+ /* We are acquiring reference to PMR here because OSSecureExport
+ * releases bridge lock and PMR lock for a moment and we don't want PMR
+ * to be removed by other thread in the meantime. */
+ _Ref(psPMR);
+
+ eError = OSSecureExport(psConnection,
+ (void *) psPMR,
+ phSecure,
+ ppsSecureConnection);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ *ppsPMR = psPMR;
+
+ return PVRSRV_OK;
+e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ _UnrefAndMaybeDestroy(psPMR);
+ return eError;
+}
+
+PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR)
+{
+ _UnrefAndMaybeDestroy(psPMR);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PMRSecureImportPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_SECURE_TYPE hSecure,
+ PMR **ppsPMR,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMR;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ eError = OSSecureImport(hSecure, (void **) &psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ if (psPMR->psDevNode != psDevNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device\n", __func__));
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+
+ _Ref(psPMR);
+
+ /* Return the PMR */
+ *ppsPMR = psPMR;
+ *puiSize = psPMR->uiLogicalSize;
+ *puiAlign = 1 << psPMR->uiLog2ContiguityGuarantee;
+ return PVRSRV_OK;
+e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR)
+{
+ _UnrefAndMaybeDestroy(psPMR);
+ return PVRSRV_OK;
+}
+#endif
+
+#if defined(PVR_RI_DEBUG)
+PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR,
+ void *hRIHandle)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ psPMR->hRIHandle = hRIHandle;
+ return PVRSRV_OK;
+}
+#endif
+
+static PVRSRV_ERROR
+_PMRAcquireKernelMappingData(PMR *psPMR,
+ size_t uiLogicalOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ size_t *puiLengthOut,
+ IMG_HANDLE *phPrivOut,
+ IMG_BOOL bMapSparse)
+{
+ PVRSRV_ERROR eError;
+ void *pvKernelAddress;
+ IMG_HANDLE hPriv;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ if (_PMRIsSparse(psPMR) && !bMapSparse)
+ {
+ /* Generally we don't support mapping of sparse allocations but if there
+ is a justified need we can do that by passing IMG_TRUE in bMapSparse.
+ Although the callback is supported by the PMR it will always map
+ the physical 1:1 as sparseness issues are handled here in the core */
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+
+ /* Acquire/Release functions must be overridden in pairs */
+ if (psPMR->psFuncTab->pfnAcquireKernelMappingData == NULL)
+ {
+ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData == NULL);
+
+ /* If PMR implementation does not supply this pair of
+ functions, it means they do not permit the PMR to be mapped
+ into kernel memory at all */
+ eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ goto e0;
+ }
+ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL);
+
+ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+ uiLogicalOffset,
+ uiSize,
+ &pvKernelAddress,
+ &hPriv,
+ psPMR->uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ *ppvKernelAddressOut = pvKernelAddress;
+ if (uiSize == 0)
+ {
+ /* Zero size means map the whole PMR in ...*/
+ *puiLengthOut = (size_t)psPMR->uiLogicalSize;
+ }
+ else if (uiSize > (1 << psPMR->uiLog2ContiguityGuarantee))
+ {
+ /* ... map in the requested pages ...*/
+ *puiLengthOut = uiSize;
+ }
+ else
+ {
+ /* ... otherwise we just map in one page */
+ *puiLengthOut = 1 << psPMR->uiLog2ContiguityGuarantee;
+ }
+ *phPrivOut = hPriv;
+
+ return PVRSRV_OK;
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+ size_t uiLogicalOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ size_t *puiLengthOut,
+ IMG_HANDLE *phPrivOut)
+{
+ return _PMRAcquireKernelMappingData(psPMR,
+ uiLogicalOffset,
+ uiSize,
+ ppvKernelAddressOut,
+ puiLengthOut,
+ phPrivOut,
+ IMG_FALSE);
+}
+
+PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+ size_t uiLogicalOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ size_t *puiLengthOut,
+ IMG_HANDLE *phPrivOut)
+{
+ return _PMRAcquireKernelMappingData(psPMR,
+ uiLogicalOffset,
+ uiSize,
+ ppvKernelAddressOut,
+ puiLengthOut,
+ phPrivOut,
+ IMG_TRUE);
+}
+
+PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+ IMG_HANDLE hPriv)
+{
+ PVR_ASSERT (psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL);
+ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL);
+
+ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+ hPriv);
+
+ return PVRSRV_OK;
+}
+
+#if defined(INTEGRITY_OS)
+
+PVRSRV_ERROR
+PMRMapMemoryObject(PMR *psPMR,
+ IMG_HANDLE *phMemObj,
+ IMG_HANDLE hPriv)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT (psPMR->psFuncTab->pfnMapMemoryObject != NULL);
+
+ eError = psPMR->psFuncTab->pfnMapMemoryObject(hPriv, phMemObj);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRUnmapMemoryObject(PMR *psPMR,
+ IMG_HANDLE hPriv)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT (psPMR->psFuncTab->pfnMapMemoryObject != NULL);
+
+ eError = psPMR->psFuncTab->pfnUnmapMemoryObject(hPriv);
+
+ return eError;
+}
+
+#if defined(USING_HYPERVISOR)
+IMG_HANDLE PMRGetPmr(PMR *psPMR, size_t ulOffset)
+{
+ PVR_ASSERT(psPMR->psFuncTab->pfnGetPmr != NULL);
+ return psPMR->psFuncTab->pfnGetPmr(psPMR->pvFlavourData, ulOffset);
+}
+#endif
+#endif /* INTEGRITY_OS */
+
+/*
+ _PMRLogicalOffsetToPhysicalOffset
+
+ Translate between the "logical" offset which the upper levels
+ provide and the physical offset which is what the PMR
+ factories works on.
+
+ As well as returning the physical offset we return the number of
+ bytes remaining till the next chunk and if this chunk is valid.
+
+ For multi-page operations, upper layers communicate their
+ Log2PageSize else argument is redundant (set to zero).
+*/
+
+static void
+_PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset,
+ IMG_UINT32 *pui32BytesRemain,
+ IMG_BOOL *bValid)
+{
+ PMR_MAPPING_TABLE *psMappingTable = psPMR->psMappingTable;
+ IMG_DEVMEM_OFFSET_T uiPageSize = 1ULL << ui32Log2PageSize;
+ IMG_DEVMEM_OFFSET_T uiOffset = uiLogicalOffset;
+ IMG_UINT64 ui64ChunkIndex;
+ IMG_UINT32 ui32Remain;
+ IMG_UINT32 idx;
+
+ /* Must be translating at least a page */
+ PVR_ASSERT(ui32NumOfPages);
+
+ if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumVirtChunks)
+ {
+ /* Fast path the common case, as logical and physical offsets are
+ equal we assume the ui32NumOfPages span is also valid */
+ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiOffset);
+ puiPhysicalOffset[0] = uiOffset;
+ bValid[0] = IMG_TRUE;
+
+ if (ui32NumOfPages > 1)
+ {
+ /* initial offset may not be page aligned, round down */
+ uiOffset &= ~(uiPageSize-1);
+ for (idx=1; idx < ui32NumOfPages; idx++)
+ {
+ uiOffset += uiPageSize;
+ puiPhysicalOffset[idx] = uiOffset;
+ bValid[idx] = IMG_TRUE;
+ }
+ }
+ }
+ else
+ {
+ for (idx=0; idx < ui32NumOfPages; idx++)
+ {
+ ui64ChunkIndex = OSDivide64r64(
+ uiOffset,
+ TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize),
+ &ui32Remain);
+
+ if (psMappingTable->aui32Translation[ui64ChunkIndex] == TRANSLATION_INVALID)
+ {
+ bValid[idx] = IMG_FALSE;
+ }
+ else
+ {
+ bValid[idx] = IMG_TRUE;
+ }
+
+ if (idx == 0)
+ {
+ if (ui32Remain == 0)
+ {
+ /* Start of chunk so return the chunk size */
+ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize);
+ }
+ else
+ {
+ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize - ui32Remain);
+ }
+
+ puiPhysicalOffset[idx] = (psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize) + ui32Remain;
+
+ /* initial offset may not be page aligned, round down */
+ uiOffset &= ~(uiPageSize-1);
+ }
+ else
+ {
+ puiPhysicalOffset[idx] = psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize;
+ }
+ uiOffset += uiPageSize;
+ }
+ }
+}
+
+static PVRSRV_ERROR
+_PMR_ReadBytesPhysical(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ PVRSRV_ERROR eError;
+
+ if (psPMR->psFuncTab->pfnReadBytes != NULL)
+ {
+ /* defer to callback if present */
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ eError = psPMR->psFuncTab->pfnReadBytes(psPMR->pvFlavourData,
+ uiPhysicalOffset,
+ pcBuffer,
+ uiBufSz,
+ puiNumBytes);
+ PMRUnlockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ }
+ else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+ {
+ /* "default" handler for reading bytes */
+
+ IMG_HANDLE hKernelMappingHandle;
+ IMG_UINT8 *pcKernelAddress;
+
+ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+ (size_t) uiPhysicalOffset,
+ uiBufSz,
+ (void **)&pcKernelAddress,
+ &hKernelMappingHandle,
+ psPMR->uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Use the conservative 'DeviceMemCopy' here because we can't know
+ * if this PMR will be mapped cached.
+ */
+
+ OSDeviceMemCopy(&pcBuffer[0], pcKernelAddress, uiBufSz);
+ *puiNumBytes = uiBufSz;
+
+ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+ hKernelMappingHandle);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMR_ReadBytes: can't read from this PMR"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ OSPanic();
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+ size_t uiBytesCopied = 0;
+
+ if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+ {
+ uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+ }
+ PVR_ASSERT(uiBufSz > 0);
+ PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+ /*
+ PMR implementations can override this. If they don't, a
+ "default" handler uses kernel virtual mappings. If the kernel
+ can't provide a kernel virtual mapping, this function fails
+ */
+ PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL ||
+ psPMR->psFuncTab->pfnReadBytes != NULL);
+
+ while (uiBytesCopied != uiBufSz)
+ {
+ IMG_UINT32 ui32Remain;
+ size_t uiBytesToCopy;
+ size_t uiRead;
+ IMG_BOOL bValid;
+
+ _PMRLogicalOffsetToPhysicalOffset(psPMR,
+ 0,
+ 1,
+ uiLogicalOffset,
+ &uiPhysicalOffset,
+ &ui32Remain,
+ &bValid);
+ /*
+ Copy till either then end of the
+ chunk or end of the buffer
+ */
+ uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+ if (bValid)
+ {
+ /* Read the data from the PMR */
+ eError = _PMR_ReadBytesPhysical(psPMR,
+ uiPhysicalOffset,
+ &pcBuffer[uiBytesCopied],
+ uiBytesToCopy,
+ &uiRead);
+ if ((eError != PVRSRV_OK) || (uiRead != uiBytesToCopy))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to read chunk (eError = %s, uiRead = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError),
+ uiRead,
+ uiBytesToCopy));
+ /* Bail out as soon as we hit an error */
+ break;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Invalid phys offset at logical offset (" IMG_DEVMEM_OFFSET_FMTSPEC ") logical size (" IMG_DEVMEM_OFFSET_FMTSPEC ")",
+ __FUNCTION__,
+ uiLogicalOffset,
+ psPMR->uiLogicalSize));
+ /* Fill invalid chunks with 0 */
+ OSCachedMemSet(&pcBuffer[uiBytesCopied], 0, uiBytesToCopy);
+ uiRead = uiBytesToCopy;
+ eError = PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR;
+ }
+ uiLogicalOffset += uiRead;
+ uiBytesCopied += uiRead;
+ }
+
+ *puiNumBytes = uiBytesCopied;
+ return eError;
+}
+
+static PVRSRV_ERROR
+_PMR_WriteBytesPhysical(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ PVRSRV_ERROR eError;
+
+ if (psPMR->psFuncTab->pfnWriteBytes != NULL)
+ {
+ /* defer to callback if present */
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ eError = psPMR->psFuncTab->pfnWriteBytes(psPMR->pvFlavourData,
+ uiPhysicalOffset,
+ pcBuffer,
+ uiBufSz,
+ puiNumBytes);
+ PMRUnlockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ }
+ else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+ {
+ /* "default" handler for reading bytes */
+
+ IMG_HANDLE hKernelMappingHandle;
+ IMG_UINT8 *pcKernelAddress;
+
+ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+ (size_t) uiPhysicalOffset,
+ uiBufSz,
+ (void **)&pcKernelAddress,
+ &hKernelMappingHandle,
+ psPMR->uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Use the conservative 'DeviceMemCopy' here because we can't know
+ * if this PMR will be mapped cached.
+ */
+
+ OSDeviceMemCopy(pcKernelAddress, &pcBuffer[0], uiBufSz);
+ *puiNumBytes = uiBufSz;
+
+ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+ hKernelMappingHandle);
+ }
+ else
+ {
+ /*
+ The write callback is optional as it's only required by the debug
+ tools
+ */
+ PVR_DPF((PVR_DBG_ERROR, "_PMR_WriteBytesPhysical: can't write to this PMR"));
+ eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ OSPanic();
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+ size_t uiBytesCopied = 0;
+
+ /* FIXME: When we honour CPU mapping flags remove the #if 0*/
+ #if 0
+ if (!PVRSRV_CHECK_CPU_WRITEABLE(psPMR->uiFlags))
+ {
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+ #endif
+
+ if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+ {
+ uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+ }
+ PVR_ASSERT(uiBufSz > 0);
+ PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+ /*
+ PMR implementations can override this. If they don't, a
+ "default" handler uses kernel virtual mappings. If the kernel
+ can't provide a kernel virtual mapping, this function fails
+ */
+ PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL ||
+ psPMR->psFuncTab->pfnWriteBytes != NULL);
+
+ while (uiBytesCopied != uiBufSz)
+ {
+ IMG_UINT32 ui32Remain;
+ size_t uiBytesToCopy;
+ size_t uiWrite;
+ IMG_BOOL bValid;
+
+ _PMRLogicalOffsetToPhysicalOffset(psPMR,
+ 0,
+ 1,
+ uiLogicalOffset,
+ &uiPhysicalOffset,
+ &ui32Remain,
+ &bValid);
+
+ /*
+ Copy till either then end of the
+ chunk or end of the buffer
+ */
+ uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+ if (bValid)
+ {
+ /* Write the data to the PMR */
+ eError = _PMR_WriteBytesPhysical(psPMR,
+ uiPhysicalOffset,
+ &pcBuffer[uiBytesCopied],
+ uiBytesToCopy,
+ &uiWrite);
+ if ((eError != PVRSRV_OK) || (uiWrite != uiBytesToCopy))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to read chunk (eError = %s, uiWrite = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError),
+ uiWrite,
+ uiBytesToCopy));
+ /* Bail out as soon as we hit an error */
+ break;
+ }
+ }
+ else
+ {
+ /* Ignore writes to invalid pages */
+ uiWrite = uiBytesToCopy;
+ }
+ uiLogicalOffset += uiWrite;
+ uiBytesCopied += uiWrite;
+ }
+
+ *puiNumBytes = uiBytesCopied;
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
+{
+ if (psPMR->psFuncTab->pfnMMap)
+ {
+ return psPMR->psFuncTab->pfnMMap(psPMR->pvFlavourData, psPMR, pOSMMapData);
+ }
+
+ return OSMMapPMRGeneric(psPMR, pOSMMapData);
+}
+
+void
+PMRRefPMR(PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ _Ref(psPMR);
+}
+
+PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR)
+{
+ _UnrefAndMaybeDestroy(psPMR);
+ return PVRSRV_OK;
+}
+
+extern PVRSRV_ERROR
+PMRUnrefUnlockPMR(PMR *psPMR)
+{
+ PMRUnlockSysPhysAddresses(psPMR);
+
+ PMRUnrefPMR(psPMR);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_DEVICE_NODE *
+PMR_DeviceNode(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ return psPMR->psDevNode;
+}
+
+PMR_FLAGS_T
+PMR_Flags(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ return psPMR->uiFlags;
+}
+
+IMG_BOOL
+PMR_IsSparse(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ return _PMRIsSparse(psPMR);
+
+}
+
+PVRSRV_ERROR
+PMR_LogicalSize(const PMR *psPMR,
+ IMG_DEVMEM_SIZE_T *puiLogicalSize)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ *puiLogicalSize = psPMR->uiLogicalSize;
+ return PVRSRV_OK;
+}
+
+PHYS_HEAP *
+PMR_PhysHeap(const PMR *psPMR)
+{
+ return psPMR->psPhysHeap;
+}
+
+PVRSRV_ERROR
+PMR_IsOffsetValid(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_BOOL *pbValid)
+{
+ IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_UINT32 aui32BytesRemain[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset;
+ IMG_UINT32 *pui32BytesRemain = aui32BytesRemain;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT(psPMR != NULL);
+ PVR_ASSERT(psPMR->uiLogicalSize >= uiLogicalOffset);
+
+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T));
+ if (puiPhysicalOffset == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ pui32BytesRemain = OSAllocMem(ui32NumOfPages * sizeof(IMG_UINT32));
+ if (pui32BytesRemain == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ }
+
+ _PMRLogicalOffsetToPhysicalOffset(psPMR,
+ ui32Log2PageSize,
+ ui32NumOfPages,
+ uiLogicalOffset,
+ puiPhysicalOffset,
+ pui32BytesRemain,
+ pbValid);
+
+e0:
+ if (puiPhysicalOffset != auiPhysicalOffset && puiPhysicalOffset != NULL)
+ {
+ OSFreeMem(puiPhysicalOffset);
+ }
+
+ if (pui32BytesRemain != aui32BytesRemain && pui32BytesRemain != NULL)
+ {
+ OSFreeMem(pui32BytesRemain);
+ }
+
+ return eError;
+}
+
+PMR_MAPPING_TABLE *
+PMR_GetMappigTable(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ return psPMR->psMappingTable;
+
+}
+
+IMG_UINT32
+PMR_GetLog2Contiguity(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ return psPMR->uiLog2ContiguityGuarantee;
+}
+
+PMR_IMPL_TYPE
+PMR_GetType(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ return psPMR->eFlavour;
+}
+
+/* must have called PMRLockSysPhysAddresses() before calling this! */
+PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEV_PHYADDR *psDevAddrPtr,
+ IMG_BOOL *pbValid)
+{
+ IMG_UINT32 ui32Remain;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset;
+
+ PVR_ASSERT(psPMR != NULL);
+ PVR_ASSERT(ui32NumOfPages > 0);
+ PVR_ASSERT(psPMR->psFuncTab->pfnDevPhysAddr != NULL);
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ OSLockAcquire(psPMR->hLock);
+ PVR_ASSERT(psPMR->uiLockCount > 0);
+ OSLockRelease(psPMR->hLock);
+#endif
+
+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T));
+ if (puiPhysicalOffset == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ }
+
+ _PMRLogicalOffsetToPhysicalOffset(psPMR,
+ ui32Log2PageSize,
+ ui32NumOfPages,
+ uiLogicalOffset,
+ puiPhysicalOffset,
+ &ui32Remain,
+ pbValid);
+ if (*pbValid || _PMRIsSparse(psPMR))
+ {
+ /* Sparse PMR may not always have the first page valid */
+ eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData,
+ ui32Log2PageSize,
+ ui32NumOfPages,
+ puiPhysicalOffset,
+ pbValid,
+ psDevAddrPtr);
+#if defined(PLATO_MEMORY_CONFIG)
+ /* Currently excluded from the default build because of performance concerns.
+ *
+ * We do not need this part in other systems because the GPU has the same address view of system RAM as the CPU.
+ * Ideally this should be directly part of the PMR factories and they should always return
+ * the device physical address and not CPU physical or even better they should return the
+ * DMA bus address. */
+
+ if (PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_UMA)
+ {
+ IMG_DEV_PHYADDR *psDevPAddrCorrected = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR));
+ if (psDevPAddrCorrected == NULL)
+ {
+ if (puiPhysicalOffset != auiPhysicalOffset)
+ {
+ OSFreeMem(puiPhysicalOffset);
+ }
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ PhysHeapCpuPAddrToDevPAddr(psPMR->psPhysHeap,
+ ui32NumOfPages,
+ psDevPAddrCorrected,
+ (IMG_CPU_PHYADDR *) psDevAddrPtr);
+
+ /* Copy the translated addresses to the correct array */
+ memcpy(psDevAddrPtr, psDevPAddrCorrected, ui32NumOfPages * sizeof(IMG_DEV_PHYADDR));
+
+ OSFreeMem(psDevPAddrCorrected);
+ }
+#endif
+ }
+
+ if (puiPhysicalOffset != auiPhysicalOffset)
+ {
+ OSFreeMem(puiPhysicalOffset);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_CPU_PHYADDR *psCpuAddrPtr,
+ IMG_BOOL *pbValid)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEV_PHYADDR *psDevPAddr = asDevPAddr;
+
+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ psDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR));
+ if (psDevPAddr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ }
+
+ eError = PMR_DevPhysAddr(psPMR, ui32Log2PageSize, ui32NumOfPages,
+ uiLogicalOffset, psDevPAddr, pbValid);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+ PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, ui32NumOfPages, psCpuAddrPtr, psDevPAddr);
+
+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ OSFreeMem(psDevPAddr);
+ }
+
+ return PVRSRV_OK;
+e1:
+ if (psDevPAddr != asDevPAddr)
+ {
+ OSFreeMem(psDevPAddr);
+ }
+e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiFlags)
+{
+ PVRSRV_ERROR eError;
+
+ if (NULL == psPMR->psFuncTab->pfnChangeSparseMem)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: This type of sparse PMR cannot be changed.",
+ __func__));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ eError = psPMR->psFuncTab->pfnChangeSparseMem(psPMR->pvFlavourData,
+ psPMR,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+#if defined(PDUMP)
+ {
+ IMG_BOOL bInitialise = IMG_FALSE;
+ IMG_UINT32 ui32InitValue = 0;
+
+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
+ {
+ bInitialise = IMG_TRUE;
+ }
+ else if(PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+ {
+ ui32InitValue = 0xDEADBEEF;
+ bInitialise = IMG_TRUE;
+ }
+
+ PDumpPMRChangeSparsePMR(psPMR,
+ 1 << psPMR->uiLog2ContiguityGuarantee,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ bInitialise,
+ ui32InitValue,
+ &psPMR->hPDumpAllocHandle);
+ }
+
+#endif
+
+e0:
+ return eError;
+}
+
+
+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices)
+{
+ PVRSRV_ERROR eError;
+
+ if ((NULL == psPMR->psFuncTab) ||
+ (NULL == psPMR->psFuncTab->pfnChangeSparseMemCPUMap))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: This type of sparse PMR cannot be changed.",
+ __func__));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ eError = psPMR->psFuncTab->pfnChangeSparseMemCPUMap(psPMR->pvFlavourData,
+ psPMR,
+ sCpuVAddrBase,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices);
+
+ return eError;
+}
+
+
+
+#if defined(PDUMP)
+
+static PVRSRV_ERROR
+_PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+ IMG_UINT32 ui32MemspaceNameLen,
+ IMG_CHAR *pszMemspaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+ if (DevmemCPUCacheCoherency(psPMR->psDevNode, psPMR->uiFlags) ||
+ DevmemDeviceCacheCoherency(psPMR->psDevNode, psPMR->uiFlags))
+ {
+ OSSNPrintf(pszMemspaceName,
+ ui32MemspaceNameLen,
+ PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC,
+ psPMR->pszPDumpDefaultMemspaceName);
+ }
+ else
+ {
+ OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC,
+ psPMR->pszPDumpDefaultMemspaceName);
+ }
+
+ OSSNPrintf(pszSymbolicAddr,
+ ui32SymbolicAddrLen,
+ PMR_SYMBOLICADDR_FMTSPEC,
+ PMR_DEFAULT_PREFIX,
+ psPMR->uiSerialNum,
+ uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR),
+ psPMR->pszAnnotation ? psPMR->pszAnnotation : "");
+ PDumpMakeStringValid(pszSymbolicAddr, OSStringLength(pszSymbolicAddr));
+
+
+ *puiNewOffset = uiPhysicalOffset & ((1 << PMR_GetLog2Contiguity(psPMR))-1);
+ *puiNextSymName = (IMG_DEVMEM_OFFSET_T) (((uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR))+1)
+ << PMR_GetLog2Contiguity(psPMR));
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32MemspaceNameLen,
+ IMG_CHAR *pszMemspaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName
+ )
+{
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+ IMG_UINT32 ui32Remain;
+ IMG_BOOL bValid;
+
+ PVR_ASSERT(uiLogicalOffset < psPMR->uiLogicalSize);
+
+ _PMRLogicalOffsetToPhysicalOffset(psPMR,
+ 0,
+ 1,
+ uiLogicalOffset,
+ &uiPhysicalOffset,
+ &ui32Remain,
+ &bValid);
+
+ if (!bValid)
+ {
+ /* We should never be asked a symbolic address of an invalid chunk */
+ PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Invalid chunk (PVRSRV_ERROR_PMR_INVALID_CHUNK)?, May be sparse memory");
+ /* For sparse allocations, for a given logical address, there may not be a
+ * physical memory backing, the virtual range can still be valid.
+ */
+ uiPhysicalOffset = uiLogicalOffset;
+ }
+
+ return _PMR_PDumpSymbolicAddrPhysical(psPMR,
+ uiPhysicalOffset,
+ ui32MemspaceNameLen,
+ pszMemspaceName,
+ ui32SymbolicAddrLen,
+ pszSymbolicAddr,
+ puiNewOffset,
+ puiNextSymName);
+}
+
+/*!
+ * @brief Writes a WRW command to the script2 buffer, representing a
+ * dword write to a physical allocation. Size is always
+ * sizeof(IMG_UINT32).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui32Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+ PVR_ASSERT(uiLogicalOffset + sizeof(ui32Value) <= psPMR->uiLogicalSize);
+ /* Especially make sure to not cross a block boundary */
+ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value))
+ < uiPMRPageSize));
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Get the symbolic address of the PMR */
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Write the WRW script command */
+ eError = PDumpPMRWRW32(aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpSymbolicOffset,
+ ui32Value,
+ uiPDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PMRUnlockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a WRW64 command to the script2 buffer, representing a
+ * dword write to a physical allocation. Size is always
+ * sizeof(IMG_UINT64).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui64Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+
+ PVR_ASSERT(uiLogicalOffset + sizeof(ui64Value) <= psPMR->uiLogicalSize);
+ /* Especially make sure to not cross a block boundary */
+ /* Especially make sure to not cross a block boundary */
+ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui64Value))
+ < uiPMRPageSize));
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Get the symbolic address of the PMR */
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Write the WRW script command */
+ eError = PDumpPMRWRW64(aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpSymbolicOffset,
+ ui64Value,
+ uiPDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PMRUnlockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ return PVRSRV_OK;
+}
+
+/*!
+ * @brief PDumps the contents of the given allocation.
+ * If bZero is IMG_TRUE then the zero page in the parameter stream is used
+ * as the source of data, rather than the allocation's actual backing.
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - Offset to write at
+ * @param uiSize - Number of bytes to write
+ * @param uiPDumpFlags - PDump flags
+ * @param bZero - Use the PDump zero page as the source
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags,
+ IMG_BOOL bZero)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiOutOffset;
+ IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName = 0;
+ const IMG_CHAR *pszParamStreamFileName;
+ PDUMP_FILEOFFSET_T uiParamStreamFileOffset;
+
+ /* required when !bZero */
+ #define PMR_MAX_PDUMP_BUFSZ (1<<14)
+ IMG_CHAR aszParamStreamFilename[PDUMP_PARAM_MAX_FILE_NAME];
+ IMG_UINT8 *pcBuffer = NULL;
+ size_t uiBufSz;
+ size_t uiNumBytes;
+ IMG_BOOL bValid;
+
+ PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+ /* Get the correct PDump stream file name */
+ if(bZero)
+ {
+ /* Check if this PMR needs to be persistent:
+ * If the allocation is persistent then it will be present in every
+ * pdump stream after its allocation. We must ensure the zeroing is also
+ * persistent so that every PDump MALLOC is accompanied by the initialisation
+ * to zero.
+ */
+ if(psPMR->bForcePersistent)
+ {
+ uiPDumpFlags = PDUMP_FLAGS_PERSISTENT;
+ }
+
+ PDumpCommentWithFlags(uiPDumpFlags,
+ "Zeroing allocation (%llu bytes)",
+ (unsigned long long) uiSize);
+
+ /* get the zero page information. it is constant for this function */
+ PDumpGetParameterZeroPageInfo(&uiParamStreamFileOffset,
+ &uiBufSz,
+ &pszParamStreamFileName);
+ }
+ else
+ {
+
+ uiBufSz = 1 << PMR_GetLog2Contiguity(psPMR);
+ PVR_ASSERT((1 << PMR_GetLog2Contiguity(psPMR)) <= PMR_MAX_PDUMP_BUFSZ);
+
+ pcBuffer = OSAllocMem(uiBufSz);
+
+ PVR_LOGR_IF_NOMEM(pcBuffer, "OSAllocMem");
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ pszParamStreamFileName = aszParamStreamFilename;
+ }
+
+ /* Loop over all touched symbolic addresses of the PMR and
+ * emit LDBs to load the contents. */
+ while (uiCurrentOffset < (uiLogicalOffset + uiSize))
+ {
+ /* Get the correct symbolic name for the current offset */
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiCurrentOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiOutOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ PVR_ASSERT((uiNextSymName - uiCurrentOffset) <= uiBufSz);
+
+ PMR_IsOffsetValid(psPMR,
+ 0,
+ 1,
+ uiCurrentOffset,
+ &bValid);
+
+ /* Either just LDB the zeros or read from the PMR and store that
+ * in the pdump stream */
+ if (bValid)
+ {
+ if(bZero)
+ {
+ uiNumBytes = MIN(uiSize, uiNextSymName - uiCurrentOffset);
+ }
+ else
+ {
+ IMG_DEVMEM_OFFSET_T uiReadOffset;
+ uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ?
+ uiLogicalOffset + uiSize - uiCurrentOffset :
+ uiNextSymName - uiCurrentOffset);
+
+ eError = PMR_ReadBytes(psPMR,
+ uiCurrentOffset,
+ pcBuffer,
+ uiReadOffset,
+ &uiNumBytes);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PDumpWriteBuffer(pcBuffer,
+ uiNumBytes,
+ uiPDumpFlags,
+ &aszParamStreamFilename[0],
+ sizeof(aszParamStreamFilename),
+ &uiParamStreamFileOffset);
+ if (eError == PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+ {
+ /* Write to parameter file prevented under the flags and
+ * current state of the driver so skip further writes.
+ */
+ eError = PVRSRV_OK;
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PDUMP_ERROR(eError, "Failed to write PMR memory to parameter file");
+ }
+ }
+
+ /* Emit the LDB command to the current symbolic address*/
+ eError = PDumpPMRLDB(aszMemspaceName,
+ aszSymbolicName,
+ uiOutOffset,
+ uiNumBytes,
+ pszParamStreamFileName,
+ uiParamStreamFileOffset,
+ uiPDumpFlags);
+ }
+ uiCurrentOffset = uiNextSymName;
+ }
+
+ if (!bZero)
+ {
+ eError = PMRUnlockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ OSFreeMem(pcBuffer);
+ }
+
+ return PVRSRV_OK;
+}
+
+
+
+PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiOutOffset;
+ IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName = 0;
+
+ PVR_UNREFERENCED_PARAMETER(uiArraySize);
+
+ PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+ while (uiCurrentOffset < (uiLogicalOffset + uiSize))
+ {
+ IMG_DEVMEM_OFFSET_T uiReadOffset;
+
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiCurrentOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiOutOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ PVR_ASSERT(uiNextSymName <= psPMR->uiLogicalSize);
+
+ uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ?
+ uiLogicalOffset + uiSize - uiCurrentOffset :
+ uiNextSymName - uiCurrentOffset);
+
+ eError = PDumpPMRSAB(aszMemspaceName,
+ aszSymbolicName,
+ uiOutOffset,
+ uiReadOffset,
+ pszFilename,
+ uiFileOffset);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ uiCurrentOffset = uiNextSymName;
+ }
+
+ return PVRSRV_OK;
+}
+
+extern PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+ /* Make sure to not cross a block boundary */
+ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value))
+ < uiPMRPageSize));
+
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpOffset,
+ &uiNextSymName);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+#define _MEMPOLL_DELAY (1000)
+#define _MEMPOLL_COUNT (2000000000 / _MEMPOLL_DELAY)
+
+ eError = PDumpPMRPOL(aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpOffset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ _MEMPOLL_COUNT,
+ _MEMPOLL_DELAY,
+ uiPDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiReadOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpOffset,
+ &uiNextSymName);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ eError = PDumpPMRCBP(aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpOffset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+ IMG_UINT32 uiBlockSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phPDumpAllocInfoOut)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE *phPDumpAllocInfo = (IMG_HANDLE*) psPMR->hPDumpAllocHandle;
+
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ IMG_UINT32 i, uiIndex;
+
+ /* Remove pages from the PMR */
+ for (i = 0; i < ui32FreePageCount; i++)
+ {
+ uiIndex = pai32FreeIndices[i];
+
+ eError = PDumpFree(phPDumpAllocInfo[uiIndex]);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ phPDumpAllocInfo[uiIndex] = NULL;
+ }
+
+ /* Add new pages to the PMR */
+ for (i = 0; i < ui32AllocPageCount; i++)
+ {
+ uiIndex = pai32AllocIndices[i];
+
+ PVR_ASSERT(phPDumpAllocInfo[uiIndex] == NULL);
+
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiIndex * uiBlockSize,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PDumpMalloc(aszMemspaceName,
+ aszSymbolicName,
+ uiBlockSize,
+ uiBlockSize,
+ bInitialise,
+ ui32InitValue,
+ psPMR->bForcePersistent,
+ &phPDumpAllocInfo[uiIndex]);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+
+ /* (IMG_HANDLE) <- (IMG_HANDLE*) */
+ *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo;
+}
+
+IMG_INTERNAL void
+PDumpPMRFreePMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+
+ /* (IMG_HANDLE*) <- (IMG_HANDLE) */
+ IMG_HANDLE *ahPDumpAllocHandleArray = (IMG_HANDLE*) hPDumpAllocationInfoHandle;
+
+ for (i = 0; i < psPMR->uiNumPDumpBlocks; i++)
+ {
+ if (ahPDumpAllocHandleArray[i] != NULL)
+ {
+ eError = PDumpFree(ahPDumpAllocHandleArray[i]);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ ahPDumpAllocHandleArray[i] = NULL;
+ }
+ }
+
+ OSFreeMem(ahPDumpAllocHandleArray);
+}
+
+
+IMG_INTERNAL void
+PDumpPMRMallocPMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 ui32ChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *puiMappingTable,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_BOOL bForcePersistent,
+ IMG_HANDLE *phPDumpAllocInfoOut)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE *phPDumpAllocInfo;
+
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ IMG_UINT32 uiNumPhysBlocks;
+ IMG_UINT32 uiNumVirtBlocks;
+ IMG_UINT32 i, uiIndex;
+
+
+ if (PMR_IsSparse(psPMR))
+ {
+ uiNumPhysBlocks = (ui32ChunkSize * ui32NumPhysChunks) >> uiLog2Contiguity;
+ /* Make sure we did not cut off anything */
+ PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == (ui32ChunkSize * ui32NumPhysChunks));
+ }
+ else
+ {
+ uiNumPhysBlocks = uiSize >> uiLog2Contiguity;
+ /* Make sure we did not cut off anything */
+ PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == uiSize);
+ }
+
+ uiNumVirtBlocks = uiSize >> uiLog2Contiguity;
+ PVR_ASSERT(uiNumVirtBlocks << uiLog2Contiguity == uiSize);
+
+ psPMR->uiNumPDumpBlocks = uiNumVirtBlocks;
+
+ phPDumpAllocInfo = (IMG_HANDLE*) OSAllocZMem(uiNumVirtBlocks * sizeof(IMG_HANDLE));
+
+
+ for (i = 0; i < uiNumPhysBlocks; i++)
+ {
+ uiIndex = PMR_IsSparse(psPMR) ? puiMappingTable[i] : i;
+
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiIndex * uiBlockSize,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PDumpMalloc(aszMemspaceName,
+ aszSymbolicName,
+ uiBlockSize,
+ uiBlockSize,
+ bInitialise,
+ ui32InitValue,
+ bForcePersistent,
+ &phPDumpAllocInfo[uiIndex]);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+
+ /* (IMG_HANDLE) <- (IMG_HANDLE*) */
+ *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo;
+
+}
+#endif /* PDUMP */
+
+
+
+
+/*
+ FIXME: Find a better way to do this
+ */
+
+void *PMRGetPrivateDataHack(const PMR *psPMR,
+ const PMR_IMPL_FUNCTAB *psFuncTab)
+{
+ return (psFuncTab == psPMR->psFuncTab) ? psPMR->pvFlavourData : NULL;
+}
+
+
+PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+ PMR *psPageListPMR,
+ IMG_DEVMEM_OFFSET_T uiTableOffset,
+ IMG_DEVMEM_SIZE_T uiTableLength,
+ /* Referenced PMR, and "page" granularity */
+ PMR *psReferencePMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+ PMR_PAGELIST **ppsPageList)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEVMEM_SIZE_T uiWordSize;
+ IMG_UINT32 uiNumPages;
+ IMG_UINT32 uiPageIndex;
+ PMR_FLAGS_T uiFlags = psPageListPMR->uiFlags;
+ PMR_PAGELIST *psPageList;
+#if defined(PDUMP)
+ IMG_CHAR aszTableEntryMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszTableEntrySymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiTableEntryPDumpOffset;
+ IMG_CHAR aszPageMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszPageSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPagePDumpOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 uiPageListPageSize = 1 << psPageListPMR->uiLog2ContiguityGuarantee;
+ IMG_UINT64 uiPageListPMRPage = 0;
+ IMG_UINT64 uiPrevPageListPMRPage = 0;
+ IMG_HANDLE hPrivData = NULL;
+ void *pvKernAddr = NULL;
+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEV_PHYADDR *pasDevAddrPtr;
+ IMG_UINT32 *pui32DataPtr = NULL;
+ IMG_BOOL *pbPageIsValid;
+#endif
+ /* FIXME: should this be configurable? */
+ uiWordSize = 4;
+
+ /* check we're being asked to write the same number of 4-byte units as there are pages */
+ uiNumPages = (IMG_UINT32)(psReferencePMR->uiLogicalSize >> uiLog2PageSize);
+
+ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psReferencePMR->uiLogicalSize)
+ {
+ /* Strictly speaking, it's possible to provoke this error in two ways:
+ (i) if it's not a whole multiple of the page size; or
+ (ii) if there are more than 4 billion pages.
+ The latter is unlikely. :) but the check is required in order to justify the cast.
+ */
+ eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ goto e0;
+ }
+ uiWordSize = (IMG_UINT32)uiTableLength / uiNumPages;
+ if (uiNumPages * uiWordSize != uiTableLength)
+ {
+ eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ goto e0;
+ }
+
+ /* Check we're not being asked to write off the end of the PMR */
+ if (uiTableOffset + uiTableLength > psPageListPMR->uiLogicalSize)
+ {
+ /* table memory insufficient to store all the entries */
+ /* table insufficient to store addresses of whole block */
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ /* the PMR into which we are writing must not be user CPU mappable: */
+ if (PVRSRV_CHECK_CPU_READABLE(uiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(uiFlags))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "masked flags = 0x%08x", (uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE))));
+ PVR_DPF((PVR_DBG_ERROR, "Page list PMR allows CPU mapping (0x%08x)", uiFlags));
+ eError = PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS;
+ goto e0;
+ }
+
+ if (_PMRIsSparse(psPageListPMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PageList PMR is sparse"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ if (_PMRIsSparse(psReferencePMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Reference PMR is sparse"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ psPageList = OSAllocMem(sizeof(PMR_PAGELIST));
+ if (psPageList == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ psPageList->psReferencePMR = psReferencePMR;
+
+ /* Need to lock down the physical addresses of the reference PMR */
+ /* N.B. This also checks that the requested "contiguity" is achievable */
+ eError = PMRLockSysPhysAddresses(psReferencePMR);
+ if(eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+#if !defined(NO_HARDWARE)
+ if (uiNumPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ pasDevAddrPtr = OSAllocMem(uiNumPages * sizeof(IMG_DEV_PHYADDR));
+ if (pasDevAddrPtr == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e2;
+ }
+
+ pbPageIsValid = OSAllocMem(uiNumPages * sizeof(IMG_BOOL));
+ if (pbPageIsValid == NULL)
+ {
+ /* Clean-up before exit */
+ OSFreeMem(pasDevAddrPtr);
+
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page state"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e2;
+ }
+ }
+ else
+ {
+ pasDevAddrPtr = asDevPAddr;
+ pbPageIsValid = abValid;
+ }
+
+
+ eError = PMR_DevPhysAddr(psReferencePMR, uiLog2PageSize, uiNumPages, 0,
+ pasDevAddrPtr, pbPageIsValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses"));
+ goto e3;
+ }
+#endif
+
+ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+ {
+ IMG_DEVMEM_OFFSET_T uiPMROffset = uiTableOffset + (uiWordSize * uiPageIndex);
+#if defined(PDUMP)
+ eError = PMR_PDumpSymbolicAddr(psPageListPMR,
+ uiPMROffset,
+ sizeof(aszTableEntryMemspaceName),
+ &aszTableEntryMemspaceName[0],
+ sizeof(aszTableEntrySymbolicName),
+ &aszTableEntrySymbolicName[0],
+ &uiTableEntryPDumpOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PMR_PDumpSymbolicAddr(psReferencePMR,
+ (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+ sizeof(aszPageMemspaceName),
+ &aszPageMemspaceName[0],
+ sizeof(aszPageSymbolicName),
+ &aszPageSymbolicName[0],
+ &uiPagePDumpOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PDumpWriteShiftedMaskedValue(/* destination */
+ aszTableEntryMemspaceName,
+ aszTableEntrySymbolicName,
+ uiTableEntryPDumpOffset,
+ /* source */
+ aszPageMemspaceName,
+ aszPageSymbolicName,
+ uiPagePDumpOffset,
+ /* shift right */
+ uiLog2PageSize,
+ /* shift left */
+ 0,
+ /* mask */
+ 0xffffffff,
+ /* word size */
+ uiWordSize,
+ /* flags */
+ PDUMP_FLAGS_CONTINUOUS);
+ PVR_ASSERT(eError == PVRSRV_OK);
+#else
+ PVR_UNREFERENCED_PARAMETER(uiPMROffset);
+#endif
+#if !defined(NO_HARDWARE)
+
+ /*
+ We check for sparse PMR's at function entry, but as we can,
+ check that every page is valid
+ */
+ PVR_ASSERT(pbPageIsValid[uiPageIndex]);
+ PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != 0);
+ PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0);
+
+ uiPageListPMRPage = uiPMROffset >> psReferencePMR->uiLog2ContiguityGuarantee;
+
+ if ((pui32DataPtr == NULL) || (uiPageListPMRPage != uiPrevPageListPMRPage))
+ {
+ size_t uiMappingOffset = uiPMROffset & (~(uiPageListPageSize - 1));
+ size_t uiMappedSize;
+
+ /* If we already had a page list mapped, we need to unmap it... */
+ if (pui32DataPtr != NULL)
+ {
+ PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+ }
+
+ eError = PMRAcquireKernelMappingData(psPageListPMR,
+ uiMappingOffset,
+ uiPageListPageSize,
+ &pvKernAddr,
+ &uiMappedSize,
+ &hPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Error mapping page list PMR page (%llu) into kernel (%d)",
+ uiPageListPMRPage, eError));
+ goto e3;
+ }
+
+ uiPrevPageListPMRPage = uiPageListPMRPage;
+ PVR_ASSERT(uiMappedSize >= uiPageListPageSize);
+ PVR_ASSERT(pvKernAddr != NULL);
+
+ pui32DataPtr = (IMG_UINT32 *) (((IMG_CHAR *) pvKernAddr) + (uiPMROffset & (uiPageListPageSize - 1)));
+ }
+
+ PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0);
+
+ /* Write the physical page index into the page list PMR */
+ *pui32DataPtr++ = TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize);
+
+ /* Last page so unmap */
+ if (uiPageIndex == (uiNumPages - 1))
+ {
+ PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+ }
+#endif
+ }
+
+#if !defined(NO_HARDWARE)
+ if (pasDevAddrPtr != asDevPAddr)
+ {
+ OSFreeMem(pbPageIsValid);
+ OSFreeMem(pasDevAddrPtr);
+ }
+#endif
+ *ppsPageList = psPageList;
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+#if !defined(NO_HARDWARE)
+e3:
+ if (pasDevAddrPtr != asDevPAddr)
+ {
+ OSFreeMem(pbPageIsValid);
+ OSFreeMem(pasDevAddrPtr);
+ }
+ e2:
+ PMRUnlockSysPhysAddresses(psReferencePMR);
+#endif
+ e1:
+ OSFreeMem(psPageList);
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+PVRSRV_ERROR /* FIXME: should be void */
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList)
+{
+ PVRSRV_ERROR eError2;
+
+ eError2 = PMRUnlockSysPhysAddresses(psPageList->psReferencePMR);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+ OSFreeMem(psPageList);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+ IMG_UINT32 uiNumPages;
+ IMG_UINT32 uiPageIndex;
+ IMG_UINT32 ui32PageSize = 1 << uiLog2PageSize;
+ IMG_HANDLE hPrivData = NULL;
+ void *pvKernAddr = NULL;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ size_t uiMapedSize;
+
+ PVR_ASSERT(psPMR);
+
+ /* Calculate number of pages in this PMR */
+ uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+ /* Verify the logical Size is a multiple or the physical page size */
+ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: PMR is not a multiple of %u",ui32PageSize));
+ eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ goto MultiPage_Error;
+ }
+
+ if (_PMRIsSparse(psPMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: PMR is sparse"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Sparse_Error;
+ }
+
+ /* Scan through all pages of the PMR */
+ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+ {
+ /* map the physical page (for a given PMR offset) into kernel space */
+ eError = PMRAcquireKernelMappingData(psPMR,
+ (size_t)uiPageIndex << uiLog2PageSize,
+ ui32PageSize,
+ &pvKernAddr,
+ &uiMapedSize,
+ &hPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: AcquireKernelMapping failed with error %u", eError));
+ goto AcquireKernelMapping_Error;
+ }
+
+ /* ensure the mapped page size is the same as the physical page size */
+ if (uiMapedSize != ui32PageSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: Physical Page size = 0x%08x, Size of Mapping = 0x%016llx",
+ ui32PageSize,
+ (IMG_UINT64)uiMapedSize));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto MappingSize_Error;
+ }
+
+ /* Use the conservative 'DeviceMemSet' here because we can't know
+ * if this PMR will be mapped cached.
+ */
+
+ OSDeviceMemSet(pvKernAddr, 0, ui32PageSize);
+
+ /* release mapping */
+ PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"PMRZeroingPMR: Zeroing PMR %p done (num pages %u, page size %u)",
+ psPMR,
+ uiNumPages,
+ ui32PageSize));
+
+ return PVRSRV_OK;
+
+
+ /* Error handling */
+
+MappingSize_Error:
+ PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+AcquireKernelMapping_Error:
+Sparse_Error:
+MultiPage_Error:
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psPMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+ IMG_DEV_PHYADDR sDevAddrPtr;
+ IMG_UINT32 uiNumPages;
+ IMG_UINT32 uiPageIndex;
+ IMG_BOOL bPageIsValid;
+ IMG_UINT32 ui32Col = 16;
+ IMG_UINT32 ui32SizePerCol = 11;
+ IMG_UINT32 ui32ByteCount = 0;
+ IMG_CHAR pszBuffer[16 /* ui32Col */ * 11 /* ui32SizePerCol */ + 1];
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Get number of pages */
+ uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+ /* Verify the logical Size is a multiple or the physical page size */
+ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR is not a multiple of %u", 1 << uiLog2PageSize));
+ eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ goto MultiPage_Error;
+ }
+
+ if (_PMRIsSparse(psPMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR is sparse"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Sparse_Error;
+ }
+
+ PVR_LOG((" PMR %p, Number of pages %u, Log2PageSize %d", psPMR, uiNumPages, uiLog2PageSize));
+
+ /* Print the address of the physical pages */
+ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+ {
+ /* Get Device physical Address */
+ eError = PMR_DevPhysAddr(psPMR,
+ uiLog2PageSize,
+ 1,
+ (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+ &sDevAddrPtr,
+ &bPageIsValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR %p failed to get DevPhysAddr with error %u",
+ psPMR,
+ eError));
+ goto DevPhysAddr_Error;
+ }
+
+ ui32ByteCount += OSSNPrintf(pszBuffer + ui32ByteCount, ui32SizePerCol + 1, "%08x ", (IMG_UINT32)(sDevAddrPtr.uiAddr >> uiLog2PageSize));
+ PVR_ASSERT(ui32ByteCount < ui32Col * ui32SizePerCol);
+
+ if (uiPageIndex % ui32Col == ui32Col -1)
+ {
+ PVR_LOG((" Phys Page: %s", pszBuffer));
+ ui32ByteCount = 0;
+ }
+ }
+ if (ui32ByteCount > 0)
+ {
+ PVR_LOG((" Phys Page: %s", pszBuffer));
+ }
+
+ return PVRSRV_OK;
+
+ /* Error handling */
+DevPhysAddr_Error:
+Sparse_Error:
+MultiPage_Error:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRInit()
+{
+ PVRSRV_ERROR eError;
+
+ if (_gsSingletonPMRContext.bModuleInitialised)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "pmr.c: oops, already initialized"));
+ return PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+ }
+
+ eError = OSLockCreate(&_gsSingletonPMRContext.hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ _gsSingletonPMRContext.uiNextSerialNum = 1;
+
+ _gsSingletonPMRContext.uiNextKey = 0x8300f001 * (uintptr_t)&_gsSingletonPMRContext;
+
+ _gsSingletonPMRContext.bModuleInitialised = IMG_TRUE;
+
+ _gsSingletonPMRContext.uiNumLivePMRs = 0;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRDeInit()
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ return PVRSRV_OK;
+ }
+
+ PVR_ASSERT(_gsSingletonPMRContext.bModuleInitialised);
+ if (!_gsSingletonPMRContext.bModuleInitialised)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "pmr.c: oops, not initialized"));
+ return PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+ }
+
+ PVR_ASSERT(_gsSingletonPMRContext.uiNumLivePMRs == 0);
+ if (_gsSingletonPMRContext.uiNumLivePMRs != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "pmr.c: %d live PMR(s) remain(s)", _gsSingletonPMRContext.uiNumLivePMRs));
+ PVR_DPF((PVR_DBG_ERROR, "pmr.c: This is an unrecoverable error; a subsequent crash is inevitable"));
+ return PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+ }
+
+ OSLockDestroy(_gsSingletonPMRContext.hLock);
+
+ _gsSingletonPMRContext.bModuleInitialised = IMG_FALSE;
+
+ /*
+ FIXME:
+
+ should deinitialise the mutex here
+ */
+
+ return PVRSRV_OK;
+}
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Physmem (PMR) abstraction
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ the "PMR" abstraction. A PMR (Physical Memory Resource)
+ represents some unit of physical memory which is
+ allocated/freed/mapped/unmapped as an indivisible unit
+ (higher software levels provide an abstraction above that
+ to deal with dividing this down into smaller manageable units).
+ Importantly, this module knows nothing of virtual memory, or
+ of MMUs etc., with one excuseable exception. We have the
+ concept of a "page size", which really means nothing in
+ physical memory, but represents a "contiguity quantum" such
+ that the higher level modules which map this memory are able
+ to verify that it matches the needs of the page size for the
+ virtual realm into which it is being mapped.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PMR_H_
+#define _SRVSRV_PMR_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h" /* Required for export DEVMEM_EXPORTCOOKIE */
+
+/* services/include */
+#include "pdump.h"
+
+/* services/server/include/ */
+#include "pmr_impl.h"
+#include "physheap.h"
+
+#define PMR_MAX_TRANSLATION_STACK_ALLOC (32)
+
+typedef IMG_UINT64 PMR_BASE_T;
+typedef IMG_UINT64 PMR_SIZE_T;
+#define PMR_SIZE_FMTSPEC "0x%010llX"
+#define PMR_VALUE32_FMTSPEC "0x%08X"
+#define PMR_VALUE64_FMTSPEC "0x%016llX"
+typedef IMG_UINT32 PMR_LOG2ALIGN_T;
+typedef IMG_UINT64 PMR_PASSWORD_T;
+
+struct _PMR_MAPPING_TABLE_
+{
+ PMR_SIZE_T uiChunkSize; /*!< Size of a "chunk" */
+ IMG_UINT32 ui32NumPhysChunks; /*!< Number of physical chunks that are valid */
+ IMG_UINT32 ui32NumVirtChunks; /*!< Number of virtual chunks in the mapping */
+ /* Must be last */
+ IMG_UINT32 aui32Translation[1]; /*!< Translation mapping for "logical" to physical */
+};
+
+#define TRANSLATION_INVALID 0xFFFFFFFFUL
+#define INVALID_PAGE 0ULL
+
+typedef struct _PMR_EXPORT_ PMR_EXPORT;
+
+typedef struct _PMR_PAGELIST_ PMR_PAGELIST;
+
+typedef struct _CONNECTION_DATA_ CONNECTION_DATA;
+typedef struct _PVRSRV_DEVICE_NODE_ PVRSRV_DEVICE_NODE;
+
+/*
+ * PMRCreatePMR
+ *
+ * Not to be called directly, only via implementations of PMR
+ * factories, e.g. in physmem_osmem.c, deviceclass.c, etc.
+ *
+ * Creates a PMR object, with callbacks and private data as per the
+ * FuncTab/PrivData args.
+ *
+ * Note that at creation time the PMR must set in stone the "logical
+ * size" and the "contiguity guarantee"
+ *
+ * Flags are also set at this time. (T.B.D. flags also immutable for
+ * the life of the PMR?)
+ *
+ * Logical size is the amount of Virtual space this allocation would
+ * take up when mapped. Note that this does not have to be the same
+ * as the actual physical size of the memory. For example, consider
+ * the sparsely allocated non-power-of-2 texture case. In this
+ * instance, the "logical size" would be the virtual size of the
+ * rounded-up power-of-2 texture. That some pages of physical memory
+ * may not exist does not affect the logical size calculation.
+ *
+ * The PMR must also supply the "contiguity guarantee" which is the
+ * finest granularity of alignment and size of physical pages that the
+ * PMR will provide after LockSysPhysAddresses is called. Note that
+ * the calling code may choose to call PMRSysPhysAddr with a finer
+ * granularity than this, for example if it were to map into a device
+ * MMU with a smaller page size, and it's also OK for the PMR to
+ * supply physical memory in larger chunks than this. But
+ * importantly, never the other way around.
+ *
+ * More precisely, the following inequality must be maintained
+ * whenever mappings and/or physical addresses exist:
+ *
+ * (device MMU page size) <= 2**(uiLog2ContiguityGuarantee) <= (actual contiguity of physical memory)
+ *
+ * The function table will contain the following callbacks which may
+ * be overridden by the PMR implementation:
+ *
+ * pfnLockPhysAddresses
+ *
+ * Called when someone locks requests that Physical pages are to
+ * be locked down via the PMRLockSysPhysAddresses() API. Note
+ * that if physical pages are prefaulted at PMR creation time and
+ * therefore static, it would not be necessary to override this
+ * function, in which case NULL may be supplied.
+ *
+ * pfnUnlockPhysAddresses
+ *
+ * The reverse of pfnLockPhysAddresses. Note that this should be
+ * NULL if and only if pfnLockPhysAddresses is NULL
+ *
+ * pfnSysPhysAddr
+ *
+ * This function is mandatory. This is the one which returns the
+ * system physical address for a given offset into this PMR. The
+ * "lock" function will have been called, if overridden, before
+ * this function, thus the implementation should not increase any
+ * refcount when answering this call. Refcounting, if necessary,
+ * should be done in the lock/unlock calls. Refcounting would
+ * not be necessary in the prefaulted/static scenario, as the
+ * pmr.c abstraction will handle the refcounting for the whole
+ * PMR.
+ *
+ * pfnFinalize
+ *
+ * Called when the PMR's refcount reaches zero and it gets
+ * destroyed. This allows the implementation to free up any
+ * resource acquired during creation time.
+ *
+ */
+extern PVRSRV_ERROR
+PMRCreatePMR(PVRSRV_DEVICE_NODE *psDevNode,
+ PHYS_HEAP *psPhysHeap,
+ PMR_SIZE_T uiLogicalSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+ PMR_FLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ const PMR_IMPL_FUNCTAB *psFuncTab,
+ PMR_IMPL_PRIVDATA pvPrivData,
+ PMR_IMPL_TYPE eType,
+ PMR **ppsPMRPtr,
+ IMG_BOOL bForcePersistent);
+
+/*
+ * PMRLockSysPhysAddresses()
+ *
+ * Calls the relevant callback to lock down the system physical addresses of the memory that makes up the whole PMR.
+ *
+ * Before this call, it is not valid to use any of the information
+ * getting APIs: PMR_Flags(), PMR_SysPhysAddr(),
+ * [ see note below about lock/unlock semantics ]
+ *
+ * The caller of this function does not have to care about how the PMR
+ * is implemented. He only has to know that he is allowed access to
+ * the physical addresses _after_ calling this function and _until_
+ * calling PMRUnlockSysPhysAddresses().
+ *
+ *
+ * Notes to callback implementers (authors of PMR Factories):
+ *
+ * Some PMR implementations will be such that the physical memory
+ * exists for the lifetime of the PMR, with a static address, (and
+ * normally flags and symbolic address are static too) and so it is
+ * legal for a PMR implementation to not provide an implementation for
+ * the lock callback.
+ *
+ * Some PMR implementation may wish to page memory in from secondary
+ * storage on demand. The lock/unlock callbacks _may_ be the place to
+ * do this. (more likely, there would be a separate API for doing
+ * this, but this API provides a useful place to assert that it has
+ * been done)
+ */
+
+extern PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR);
+
+extern PVRSRV_ERROR
+PMRLockSysPhysAddressesNested(PMR *psPMR,
+ IMG_UINT32 ui32NestingLevel);
+
+/*
+ * PMRUnlockSysPhysAddresses()
+ *
+ * the reverse of PMRLockSysPhysAddresses()
+ */
+extern PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR);
+
+extern PVRSRV_ERROR
+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel);
+
+
+/**************************************************************************/ /*!
+@Function PMRUnpinPMR
+@Description This is the counterpart to PMRPinPMR(). It is meant to be
+ called before repinning an allocation.
+
+ For a detailed description see client API documentation.
+
+@Input psPMR The physical memory to unpin.
+
+@Input bDevMapped A flag that indicates if this PMR has been
+ mapped to device virtual space.
+ Needed to check if this PMR is allowed to be
+ unpinned or not.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
+ registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped);
+
+/**************************************************************************/ /*!
+@Function PMRPinPMR
+@Description This is the counterpart to PMRUnpinPMR(). It is meant to be
+ called after unpinning an allocation.
+
+ For a detailed description see client API documentation.
+
+@Input psPMR The physical memory to pin.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
+ was successfully restored.
+
+ PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+ could not be restored and new physical memory
+ was allocated.
+
+ A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR PMRPinPMR(PMR *psPMR);
+
+
+/*
+ * PhysmemPMRExport()
+ *
+ * Given a PMR, creates a PMR "Export", which is a handle that
+ * provides sufficient data to be able to "import" this PMR elsewhere.
+ * The PMR Export is an object in its own right, whose existence
+ * implies a reference on the PMR, thus the PMR cannot be destroyed
+ * while the PMR Export exists. The intention is that the PMR Export
+ * will be wrapped in the devicemem layer by a cross process handle,
+ * and some IPC by which to communicate the handle value and password
+ * to other processes. The receiving process is able to unwrap this
+ * to gain access to the same PMR Export in this layer, and, via
+ * PhysmemPMRImport(), obtain a reference to the original PMR.
+ *
+ * The caller receives, along with the PMR Export object, information
+ * about the size and contiguity guarantee for the PMR, and also the
+ * PMRs secret password, in order to authenticate the subsequent
+ * import.
+ *
+ * N.B. If you call PMRExportPMR() (and it succeeds), you are
+ * promising to later call PMRUnexportPMR()
+ */
+extern PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+ PMR_EXPORT **ppsPMRExport,
+ PMR_SIZE_T *puiSize,
+ PMR_LOG2ALIGN_T *puiLog2Contig,
+ PMR_PASSWORD_T *puiPassword);
+
+/*!
+*******************************************************************************
+
+ @Function PMRMakeLocalImportHandle
+
+ @Description
+
+ Transform a general handle type into one that we are able to import.
+ Takes a PMR reference.
+
+ @Input psPMR The input PMR.
+ @Output ppsPMR The output PMR that is going to be transformed to the
+ correct handle type.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+extern PVRSRV_ERROR
+PMRMakeLocalImportHandle(PMR *psPMR,
+ PMR **ppsPMR);
+
+/*!
+*******************************************************************************
+
+ @Function PMRUnmakeLocalImportHandle
+
+ @Description
+
+ Take a PMR, destroy the handle and release a reference.
+ Counterpart to PMRMakeServerExportClientExport().
+
+ @Input psPMR PMR to destroy.
+ Created by PMRMakeLocalImportHandle().
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+extern PVRSRV_ERROR
+PMRUnmakeLocalImportHandle(PMR *psPMR);
+
+/*
+ * PMRUnexporPMRt()
+ *
+ * The reverse of PMRExportPMR(). This causes the PMR to no
+ * longer be exported. If the PMR has already been imported, the
+ * imported PMR reference will still be valid, but no further imports
+ * will be possible.
+ */
+extern PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport);
+
+/*
+ * PMRImportPMR()
+ *
+ * Takes a PMR Export object, as obtained by PMRExportPMR(), and
+ * obtains a reference to the original PMR.
+ *
+ * The password must match, and is assumed to have been (by whatever
+ * means, IPC etc.) preserved intact from the former call to
+ * PMRExportPMR()
+ *
+ * The size and contiguity arguments are entirely irrelevant for the
+ * import, however they are verified in order to trap bugs.
+ *
+ * N.B. If you call PhysmemPMRImport() (and it succeeds), you are
+ * promising to later call PhysmemPMRUnimport()
+ */
+extern PVRSRV_ERROR
+PMRImportPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PMR_EXPORT *psPMRExport,
+ PMR_PASSWORD_T uiPassword,
+ PMR_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Contig,
+ PMR **ppsPMR);
+
+/*
+ * PMRUnimportPMR()
+ *
+ * releases the reference on the PMR as obtained by PMRImportPMR()
+ */
+extern PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR);
+
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+ PMR **ppsPMR,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * Equivalent mapping functions when in kernel mode - TOOD: should
+ * unify this and the PMRAcquireMMapArgs API with a suitable
+ * abstraction
+ */
+extern PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+ size_t uiLogicalOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ size_t *puiLengthOut,
+ IMG_HANDLE *phPrivOut);
+
+extern PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+ size_t uiLogicalOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ size_t *puiLengthOut,
+ IMG_HANDLE *phPrivOut);
+
+extern PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+ IMG_HANDLE hPriv);
+
+#if defined(INTEGRITY_OS)
+extern PVRSRV_ERROR
+PMRMapMemoryObject(PMR *psPMR,
+ IMG_HANDLE *phMemObj,
+ IMG_HANDLE hPriv);
+extern PVRSRV_ERROR
+PMRUnmapMemoryObject(PMR *psPMR,
+ IMG_HANDLE hPriv);
+#endif
+
+/*
+ * PMR_ReadBytes()
+ *
+ * calls into the PMR implementation to read up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will read up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is read, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just write 0 to invalid offsets
+ */
+extern PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes);
+
+/*
+ * PMR_WriteBytes()
+ *
+ * calls into the PMR implementation to write up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will write up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is written, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just ignore data at invalid offsets
+ */
+extern PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes);
+
+/**************************************************************************/ /*!
+@Function PMRMMapPMR
+@Description Performs the necessary steps to map the PMR into a user process
+ address space. The caller does not need to call
+ PMRLockSysPhysAddresses before calling this function.
+
+@Input psPMR PMR to map.
+
+@Input pOSMMapData OS specific data needed to create a mapping.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success or an error otherwise.
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData);
+
+/*
+ * PMRRefPMR()
+ *
+ * Take a reference on the passed in PMR
+ */
+extern void
+PMRRefPMR(PMR *psPMR);
+
+/*
+ * PMRUnrefPMR()
+ *
+ * This undoes a call to any of the PhysmemNew* family of APIs
+ * (i.e. any PMR factory "constructor")
+ *
+ * This relinquishes a reference to the PMR, and, where the refcount
+ * reaches 0, causes the PMR to be destroyed (calling the finalizer
+ * callback on the PMR, if there is one)
+ */
+extern PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR);
+
+/*
+ * PMRUnrefUnlockPMR()
+ *
+ * Same as above but also unlocks the PMR.
+ */
+extern PVRSRV_ERROR
+PMRUnrefUnlockPMR(PMR *psPMR);
+
+extern PVRSRV_DEVICE_NODE *
+PMR_DeviceNode(const PMR *psPMR);
+
+/*
+ * PMR_Flags()
+ *
+ * Flags are static and guaranteed for the life of the PMR. Thus this
+ * function is idempotent and acquire/release semantics is not
+ * required.
+ *
+ * Returns the flags as specified on the PMR. The flags are to be
+ * interpreted as mapping permissions
+ */
+extern PMR_FLAGS_T
+PMR_Flags(const PMR *psPMR);
+
+extern IMG_BOOL
+PMR_IsSparse(const PMR *psPMR);
+
+
+
+extern PVRSRV_ERROR
+PMR_LogicalSize(const PMR *psPMR,
+ IMG_DEVMEM_SIZE_T *puiLogicalSize);
+
+extern PHYS_HEAP *
+PMR_PhysHeap(const PMR *psPMR);
+
+extern PMR_MAPPING_TABLE *
+PMR_GetMappigTable(const PMR *psPMR);
+
+extern IMG_UINT32
+PMR_GetLog2Contiguity(const PMR *psPMR);
+/*
+ * PMR_IsOffsetValid()
+ *
+ * Returns if an address offset inside a PMR has a valid
+ * physical backing.
+ */
+extern PVRSRV_ERROR
+PMR_IsOffsetValid(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_BOOL *pbValid);
+
+extern PMR_IMPL_TYPE
+PMR_GetType(const PMR *psPMR);
+
+/*
+ * PMR_SysPhysAddr()
+ *
+ * A note regarding Lock/Unlock semantics
+ * ======================================
+ *
+ * PMR_SysPhysAddr may only be called after PMRLockSysPhysAddresses()
+ * has been called. The data returned may be used only until
+ * PMRUnlockSysPhysAddresses() is called after which time the licence
+ * to use the data is revoked and the information may be invalid.
+ *
+ * Given an offset, this function returns the device physical address of the
+ * corresponding page in the PMR. It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ * If caller only wants one physical address it is sufficient to pass in:
+ * ui32Log2PageSize==0 and ui32NumOfPages==1
+ */
+extern PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEV_PHYADDR *psDevAddr,
+ IMG_BOOL *pbValid);
+
+/*
+ * PMR_CpuPhysAddr()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * Given an offset, this function returns the CPU physical address of the
+ * corresponding page in the PMR. It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ */
+extern PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_CPU_PHYADDR *psCpuAddrPtr,
+ IMG_BOOL *pbValid);
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+ IMG_UINT64 *pui64UID);
+/*
+ * PMR_ChangeSparseMem()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * This function alters the memory map of the given PMR in device space by adding/deleting the pages
+ * as requested.
+ *
+ */
+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiFlags);
+
+/*
+ * PMR_ChangeSparseMemCPUMap()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * This function alters the memory map of the given PMR in CPU space by adding/deleting the pages
+ * as requested.
+ *
+ */
+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices);
+
+#if defined(PDUMP)
+
+extern void
+PDumpPMRMallocPMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 ui32ChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *puiMappingTable,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_BOOL bForcePersistent,
+ IMG_HANDLE *phPDumpAllocInfoPtr);
+
+extern void
+PDumpPMRFreePMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_HANDLE hPDumpAllocationInfoHandle);
+
+extern void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+ IMG_UINT32 uiBlockSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phPDumpAllocInfoOut);
+/*
+ * PMR_PDumpSymbolicAddr()
+ *
+ * Given an offset, returns the pdump memspace name and symbolic
+ * address of the corresponding page in the PMR.
+ *
+ * Note that PDump memspace names and symbolic addresses are static
+ * and valid for the lifetime of the PMR, therefore we don't require
+ * acquire/release semantics here.
+ *
+ * Note that it is expected that the pdump "mapping" code will call
+ * this function multiple times as each page is mapped in turn
+ *
+ * Note that NextSymName is the offset from the base of the PMR to the
+ * next pdump symbolic address (or the end of the PMR if the PMR only
+ * had one PDUMPMALLOC
+ */
+extern PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32NamespaceNameLen,
+ IMG_CHAR *pszNamespaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName
+ );
+
+/*
+ * PMRPDumpLoadMemValue32()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMemValue64()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMem()
+ *
+ * writes the current contents of the PMR memory to the pdump PRM
+ * stream, and emits some PDump code to the script stream to LDB said
+ * bytes from said file. If bZero is IMG_TRUE then the PDump zero page
+ * is used as the source for the LDB.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags,
+ IMG_BOOL bZero);
+
+/*
+ * PMRPDumpSaveToFile()
+ *
+ * emits some PDump that does an SAB (save bytes) using the PDump
+ * symbolic address of the PMR. Note that this is generally not the
+ * preferred way to dump the buffer contents. There is an equivalent
+ * function in devicemem_server.h which also emits SAB but using the
+ * virtual address, which is the "right" way to dump the buffer
+ * contents to a file. This function exists just to aid testing by
+ * providing a means to dump the PMR directly by symbolic address
+ * also.
+ */
+extern PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRMallocPMR)
+#endif
+static INLINE void
+PDumpPMRMallocPMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *puiMappingTable,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_BOOL bForcePersistent,
+ IMG_HANDLE *phPDumpAllocInfoPtr)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+ PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+ PVR_UNREFERENCED_PARAMETER(puiMappingTable);
+ PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity);
+ PVR_UNREFERENCED_PARAMETER(bInitialise);
+ PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+ PVR_UNREFERENCED_PARAMETER(bForcePersistent);
+ PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoPtr);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRFreePMR)
+#endif
+static INLINE void
+PDumpPMRFreePMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+ PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity);
+ PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRChangeSparsePMR)
+#endif
+static INLINE void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+ IMG_UINT32 uiBlockSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phPDumpAllocInfoOut)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+ PVR_UNREFERENCED_PARAMETER(ui32AllocPageCount);
+ PVR_UNREFERENCED_PARAMETER(pai32AllocIndices);
+ PVR_UNREFERENCED_PARAMETER(ui32FreePageCount);
+ PVR_UNREFERENCED_PARAMETER(pai32FreeIndices);
+ PVR_UNREFERENCED_PARAMETER(bInitialise);
+ PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+ PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoOut);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMR_PDumpSymbolicAddr)
+#endif
+static INLINE PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32NamespaceNameLen,
+ IMG_CHAR *pszNamespaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32NamespaceNameLen);
+ PVR_UNREFERENCED_PARAMETER(pszNamespaceName);
+ PVR_UNREFERENCED_PARAMETER(ui32SymbolicAddrLen);
+ PVR_UNREFERENCED_PARAMETER(pszSymbolicAddr);
+ PVR_UNREFERENCED_PARAMETER(puiNewOffset);
+ PVR_UNREFERENCED_PARAMETER(puiNextSymName);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(ui64Value);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMem)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags,
+ IMG_BOOL bZero)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+ PVR_UNREFERENCED_PARAMETER(bZero);
+ return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpSaveToFile)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiArraySize);
+ PVR_UNREFERENCED_PARAMETER(pszFilename);
+ PVR_UNREFERENCED_PARAMETER(uiFileOffset);
+ return PVRSRV_OK;
+}
+
+#endif /* PDUMP */
+
+/* This function returns the private data that a pmr subtype
+ squirrelled in here. We use the function table pointer as
+ "authorization" that this function is being called by the pmr
+ subtype implementation. We can assume (assert) that. It would be
+ a bug in the implementation of the pmr subtype if this assertion
+ ever fails. */
+extern void *
+PMRGetPrivateDataHack(const PMR *psPMR,
+ const PMR_IMPL_FUNCTAB *psFuncTab);
+
+extern PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psReferencePMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+extern PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+ PMR *psPageListPMR,
+ IMG_DEVMEM_OFFSET_T uiTableOffset,
+ IMG_DEVMEM_SIZE_T uiTableLength,
+ /* Referenced PMR, and "page" granularity */
+ PMR *psReferencePMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+ PMR_PAGELIST **ppsPageList);
+
+/* Doesn't actually erase the page list - just releases the appropriate refcounts */
+extern PVRSRV_ERROR // should be void, surely
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList);
+
+#if defined(PDUMP)
+extern PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiFlags);
+
+extern PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpPol32)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+ PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+ PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+ return PVRSRV_OK;
+}
+#endif
+/*
+ * PMRInit()
+ *
+ * To be called once and only once to initialise the internal data in
+ * the PMR module (mutexes and such)
+ *
+ * Not for general use. Only PVRSRVInit(); should be calling this.
+ */
+extern PVRSRV_ERROR
+PMRInit(void);
+
+/*
+ * PMRDeInit()
+ *
+ * To be called once and only once to deinitialise the internal data in
+ * the PMR module (mutexes and such) and for debug checks
+ *
+ * Not for general use. Only PVRSRVDeInit(); should be calling this.
+ */
+extern PVRSRV_ERROR
+PMRDeInit(void);
+
+#if defined(PVR_RI_DEBUG)
+extern PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR,
+ void *hRIHandle);
+#endif
+
+#endif /* #ifdef _SRVSRV_PMR_H_ */
+
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Implementation Callbacks for Physmem (PMR) abstraction
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This file is for definitions
+ that are private to the world of PMRs, but that need to be
+ shared between pmr.c itself and the modules that implement the
+ callbacks for the PMR.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PMR_IMPL_H_
+#define _SRVSRV_PMR_IMPL_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+typedef struct _PMR_ PMR;
+/* stuff that per-flavour callbacks need to share with pmr.c */
+typedef void *PMR_IMPL_PRIVDATA;
+
+typedef PVRSRV_MEMALLOCFLAGS_T PMR_FLAGS_T;
+typedef struct _PMR_MAPPING_TABLE_ PMR_MAPPING_TABLE;
+typedef void *PMR_MMAP_DATA;
+
+/**
+ * Which PMR factory has created this PMR?
+ */
+typedef enum _PMR_IMPL_TYPE_
+{
+ PMR_TYPE_NONE = 0,
+ PMR_TYPE_OSMEM,
+ PMR_TYPE_LMA,
+ PMR_TYPE_DMABUF,
+ PMR_TYPE_EXTMEM,
+ PMR_TYPE_DC,
+ PMR_TYPE_TDFWCODE,
+ PMR_TYPE_TDSECBUF
+} PMR_IMPL_TYPE;
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_LOCK_PHYS_ADDRESSES_FN
+
+@Description Called to lock down the physical addresses for all pages
+ allocated for a PMR.
+ The default implementation is to simply increment a
+ lock-count for debugging purposes.
+ If overridden, the PFN_LOCK_PHYS_ADDRESSES_FN function will
+ be called when someone first requires a physical address,
+ and the PFN_UNLOCK_PHYS_ADDRESSES_FN counterpart will be
+ called when the last such reference is released.
+ The PMR implementation may assume that physical addresses
+ will have been "locked" in this manner before any call is
+ made to the pfnDevPhysAddr() callback
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+
+@Return PVRSRV_OK if the operation was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_LOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_UNLOCK_PHYS_ADDRESSES_FN
+
+@Description Called to release the lock taken on the physical addresses
+ for all pages allocated for a PMR.
+ The default implementation is to simply decrement a
+ lock-count for debugging purposes.
+ If overridden, the PFN_UNLOCK_PHYS_ADDRESSES_FN will be
+ called when the last reference taken on the PMR is
+ released.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+
+@Return PVRSRV_OK if the operation was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_DEV_PHYS_ADDR_FN
+
+@Description Called to obtain one or more physical addresses for given
+ offsets within a PMR.
+
+ The PFN_LOCK_PHYS_ADDRESSES_FN callback (if overridden) is
+ guaranteed to have been called prior to calling the
+ PFN_DEV_PHYS_ADDR_FN callback and the caller promises not to
+ rely on the physical address thus obtained after the
+ PFN_UNLOCK_PHYS_ADDRESSES_FN callback is called.
+
+ Implementation of this callback is mandatory.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input ui32Log2PageSize The log2 page size.
+@Input ui32NumOfAddr The number of addresses to be
+ returned
+@Input puiOffset The offset from the start of the
+ PMR (in bytes) for which the
+ physical address is required.
+ Where multiple addresses are
+ requested, this will contain a
+ list of offsets.
+@Output pbValid List of boolean flags indicating
+ which addresses in the returned
+ list (psDevAddrPtr) are valid
+ (for sparse allocations, not all
+ pages may have a physical backing)
+@Output psDevAddrPtr Returned list of physical addresses
+
+@Return PVRSRV_OK if the operation was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_DEV_PHYS_ADDR_FN)(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevAddrPtr);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+
+@Description Called to obtain a kernel-accessible address (mapped to a
+ virtual address if required) for the PMR for use internally
+ in Services.
+
+ Implementation of this function for the (default) PMR factory providing
+ OS-allocations is mandatory (the driver will expect to be able to call
+ this function for OS-provided allocations).
+ For other PMR factories, implementation of this function is only necessary
+ where an MMU mapping is required for the Kernel to be able to access the
+ allocated memory.
+ If no mapping is needed, this function can remain unimplemented and the
+ pfn may be set to NULL.
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input uiOffset Offset from the beginning of
+ the PMR at which mapping is to
+ start
+@Input uiSize Size of mapping (in bytes)
+@Output ppvKernelAddressOut Mapped kernel address
+@Output phHandleOut Returned handle of the new mapping
+@Input ulFlags Mapping flags
+
+@Return PVRSRV_OK if the mapping was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv,
+ size_t uiOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ IMG_HANDLE *phHandleOut,
+ PMR_FLAGS_T ulFlags);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_RELEASE_KERNEL_MAPPING_DATA_FN
+
+@Description Called to release a mapped kernel virtual address
+
+ Implementation of this callback is mandatory if PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+ is provided for the PMR factory, otherwise this function can remain unimplemented
+ and the pfn may be set to NULL.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input hHandle Handle of the mapping to be
+ released
+
+@Return None
+*/
+/*****************************************************************************/
+typedef void (*PFN_RELEASE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE hHandle);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_READ_BYTES_FN
+
+@Description Called to read bytes from an unmapped allocation
+
+ Implementation of this callback is optional -
+ where it is not provided, the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+ to map the entire PMR (if an MMU mapping is required for the Kernel to be
+ able to access the allocated memory).
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input uiOffset Offset from the beginning of
+ the PMR at which to begin
+ reading
+@Output pcBuffer Buffer in which to return the
+ read data
+@Input uiBufSz Number of bytes to be read
+@Output puiNumBytes Number of bytes actually read
+ (may be less than uiBufSz)
+
+@Return PVRSRV_OK if the read was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_READ_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_WRITE_BYTES_FN
+
+@Description Called to write bytes into an unmapped allocation
+
+ Implementation of this callback is optional -
+ where it is not provided, the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+ to map the entire PMR (if an MMU mapping is required for the Kernel to be
+ able to access the allocated memory).
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input uiOffset Offset from the beginning of
+ the PMR at which to begin
+ writing
+@Input pcBuffer Buffer containing the data to be
+ written
+@Input uiBufSz Number of bytes to be written
+@Output puiNumBytes Number of bytes actually written
+ (may be less than uiBufSz)
+
+@Return PVRSRV_OK if the write was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_WRITE_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_UNPIN_MEM_FN
+
+@Description Called to unpin an allocation.
+ Once unpinned, the pages backing the allocation may be
+ re-used by the Operating System for another purpose.
+ When the pages are required again, they may be re-pinned
+ (by calling PFN_PIN_MEM_FN). The driver will try to return
+ same pages as before. The caller will be told if the
+ content of these returned pages has been modified or if
+ the pages returned are not the original pages.
+
+ Implementation of this callback is optional.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+
+@Return PVRSRV_OK if the unpin was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_UNPIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_PIN_MEM_FN
+
+@Description Called to pin a previously unpinned allocation.
+ The driver will try to return same pages as were previously
+ assigned to the allocation. The caller will be told if the
+ content of these returned pages has been modified or if
+ the pages returned are not the original pages.
+
+ Implementation of this callback is optional.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+
+@Input psMappingTable Mapping table, which describes how
+ virtual 'chunks' are to be mapped to
+ physical 'chunks' for the allocation.
+
+@Return PVRSRV_OK if the original pages were returned unmodified.
+ PVRSRV_ERROR_PMR_NEW_MEMORY if the memory returned was modified
+ or different pages were returned.
+ Another PVRSRV_ERROR code on failure.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_PIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
+ PMR_MAPPING_TABLE *psMappingTable);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_CHANGE_SPARSE_MEM_FN
+
+@Description Called to modify the physical backing for a given sparse
+ allocation.
+ The caller provides a list of the pages within the sparse
+ allocation which should be backed with a physical allocation
+ and a list of the pages which do not require backing.
+
+ Implementation of this callback is mandatory.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input psPMR The PMR of the sparse allocation
+ to be modified
+@Input ui32AllocPageCount The number of pages specified in
+ pai32AllocIndices
+@Input pai32AllocIndices The list of pages in the sparse
+ allocation that should be backed
+ with a physical allocation. Pages
+ are referenced by their index
+ within the sparse allocation
+ (e.g. in a 10 page allocation, pages
+ are denoted by indices 0 to 9)
+@Input ui32FreePageCount The number of pages specified in
+ pai32FreeIndices
+@Input pai32FreeIndices The list of pages in the sparse
+ allocation that do not require
+ a physical allocation.
+@Input ui32Flags Allocation flags
+
+@Return PVRSRV_OK if the sparse allocation physical backing was updated
+ successfully, an error code otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiFlags);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN
+
+@Description Called to modify which pages are mapped for a given sparse
+ allocation.
+ The caller provides a list of the pages within the sparse
+ allocation which should be given a CPU mapping and a list
+ of the pages which do not require a CPU mapping.
+
+ Implementation of this callback is mandatory.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input psPMR The PMR of the sparse allocation
+ to be modified
+@Input sCpuVAddrBase The virtual base address of the
+ sparse allocation
+@Input ui32AllocPageCount The number of pages specified in
+ pai32AllocIndices
+@Input pai32AllocIndices The list of pages in the sparse
+ allocation that should be given
+ a CPU mapping. Pages are referenced
+ by their index within the sparse
+ allocation (e.g. in a 10 page
+ allocation, pages are denoted by
+ indices 0 to 9)
+@Input ui32FreePageCount The number of pages specified in
+ pai32FreeIndices
+@Input pai32FreeIndices The list of pages in the sparse
+ allocation that do not require a CPU
+ mapping.
+
+@Return PVRSRV_OK if the page mappings were updated successfully, an
+ error code otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN)(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_MMAP_FN
+
+@Description Called to map pages in the specified PMR.
+
+ Implementation of this callback is optional.
+ Where it is provided, it will be used in place of OSMMapPMRGeneric().
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input psPMR The PMR of the allocation to be
+ mapped
+@Input pMMapData OS-specific data to describe how
+ mapping should be performed
+
+@Return PVRSRV_OK if the mapping was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_MMAP_FN)(PMR_IMPL_PRIVDATA pPriv,
+ PMR *psPMR,
+ PMR_MMAP_DATA pMMapData);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_FINALIZE_FN
+
+@Description Called to destroy the PMR.
+ This callback will be called only when all references to
+ the PMR have been dropped.
+ The PMR was created via a call to PhysmemNewRamBackedPMR()
+ and is destroyed via this callback.
+
+ Implementation of this callback is mandatory.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+
+@Return PVRSRV_OK if the PMR destruction was successful, an error
+ code otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+#if 1
+struct _PMR_IMPL_FUNCTAB_ {
+#else
+typedef struct _PMR_IMPL_FUNCTAB_ {
+#endif
+ PFN_LOCK_PHYS_ADDRESSES_FN pfnLockPhysAddresses;
+ PFN_UNLOCK_PHYS_ADDRESSES_FN pfnUnlockPhysAddresses;
+
+ PFN_DEV_PHYS_ADDR_FN pfnDevPhysAddr;
+
+ PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN pfnAcquireKernelMappingData;
+ PFN_RELEASE_KERNEL_MAPPING_DATA_FN pfnReleaseKernelMappingData;
+
+#if defined (INTEGRITY_OS)
+ /*
+ * MapMemoryObject()/UnmapMemoryObject()
+ *
+ * called to map/unmap memory objects in Integrity OS
+ */
+
+ PVRSRV_ERROR (*pfnMapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE *phMemObj);
+ PVRSRV_ERROR (*pfnUnmapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv);
+
+#if defined(USING_HYPERVISOR)
+ IMG_HANDLE (*pfnGetPmr)(PMR_IMPL_PRIVDATA pvPriv, size_t ulOffset);
+#endif
+#endif
+
+ PFN_READ_BYTES_FN pfnReadBytes;
+ PFN_WRITE_BYTES_FN pfnWriteBytes;
+
+ PFN_UNPIN_MEM_FN pfnUnpinMem;
+ PFN_PIN_MEM_FN pfnPinMem;
+
+ PFN_CHANGE_SPARSE_MEM_FN pfnChangeSparseMem;
+ PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN pfnChangeSparseMemCPUMap;
+
+ PFN_MMAP_FN pfnMMap;
+
+ PFN_FINALIZE_FN pfnFinalize;
+} ;
+typedef struct _PMR_IMPL_FUNCTAB_ PMR_IMPL_FUNCTAB;
+
+
+#endif /* of #ifndef _SRVSRV_PMR_IMPL_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Linux OS PMR functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <asm/io.h>
+#include <asm/page.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#if defined(CONFIG_L4)
+#include <asm/api-l4env/api.h>
+#endif
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+#include <linux/pfn_t.h>
+#include <linux/pfn.h>
+#endif
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "devicemem_server_utils.h"
+#include "pmr.h"
+#include "pmr_os.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+/*
+ * x86_32:
+ * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM
+ * pages with default memory attributes; these HIGHMEM pages are skipped in
+ * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range().
+ * Also vm_insert_page is faster.
+ *
+ * x86_64:
+ * Use vm_insert_page because it is faster.
+ *
+ * Other platforms:
+ * Use remap_pfn_range by default because it does not issue a cache flush.
+ * It is known that ARM32 benefits from this. When other platforms become
+ * available it has to be investigated if this assumption holds for them as well.
+ *
+ * Since vm_insert_page does more precise memory accounting we have the build
+ * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug
+ * feature.
+ *
+ */
+#if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT)
+#define PMR_OS_USE_VM_INSERT_PAGE 1
+#endif
+
+static void MMapPMROpen(struct vm_area_struct *ps_vma)
+{
+ PMR *psPMR = ps_vma->vm_private_data;
+
+ /* Our VM flags should ensure this function never gets called */
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Unexpected mmap open call, this is probably an application bug.",
+ __func__));
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p",
+ __func__,
+ ps_vma,
+ ps_vma->vm_start,
+ ps_vma->vm_end - ps_vma->vm_start,
+ psPMR));
+
+ /* In case we get called anyway let's do things right by increasing the refcount and
+ * locking down the physical addresses. */
+ PMRRefPMR(psPMR);
+
+ if (PMRLockSysPhysAddresses(psPMR) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__));
+ PMRUnrefPMR(psPMR);
+ }
+}
+
+static void MMapPMRClose(struct vm_area_struct *ps_vma)
+{
+ PMR *psPMR = ps_vma->vm_private_data;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ {
+ uintptr_t vAddr = ps_vma->vm_start;
+
+ while (vAddr < ps_vma->vm_end)
+ {
+ /* USER MAPPING */
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT64)vAddr);
+ vAddr += PAGE_SIZE;
+ }
+ }
+#else
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, ps_vma->vm_end - ps_vma->vm_start);
+#endif
+#endif
+
+ PMRUnlockSysPhysAddresses(psPMR);
+ PMRUnrefPMR(psPMR);
+}
+
+/*
+ * This vma operation is used to read data from mmap regions. It is called
+ * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace
+ * requests and reads from /proc/<pid>/mem.
+ */
+static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ PMR *psPMR = ps_vma->vm_private_data;
+ unsigned long ulOffset = addr - ps_vma->vm_start;
+ size_t uiBytesCopied;
+ PVRSRV_ERROR eError;
+ int iRetVal = -EINVAL;
+
+ if (write)
+ {
+ eError = PMR_WriteBytes(psPMR,
+ (IMG_DEVMEM_OFFSET_T) ulOffset,
+ buf,
+ len,
+ &uiBytesCopied);
+ }
+ else
+ {
+ eError = PMR_ReadBytes(psPMR,
+ (IMG_DEVMEM_OFFSET_T) ulOffset,
+ buf,
+ len,
+ &uiBytesCopied);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)",
+ __func__,
+ write ? "PMR_WriteBytes" : "PMR_ReadBytes",
+ eError));
+ }
+ else
+ {
+ iRetVal = uiBytesCopied;
+ }
+
+ return iRetVal;
+}
+
+static const struct vm_operations_struct gsMMapOps =
+{
+ .open = &MMapPMROpen,
+ .close = &MMapPMRClose,
+ .access = MMapVAccess,
+};
+
+static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ struct vm_area_struct *ps_vma,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_BOOL bUseVMInsertPage,
+ IMG_BOOL bUseMixedMap)
+{
+ IMG_INT32 iStatus;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ pfn_t sPFN;
+#else
+ unsigned long uiPFN;
+#endif
+
+#if defined(CONFIG_L4)
+ IMG_CPU_VIRTADDR pvCpuVAddr;
+
+ /* Use L4LINUX function, removes per-arch code-path */
+ pvCpuVAddr = l4x_phys_to_virt(psCpuPAddr->uiAddr);
+ if (pvCpuVAddr == NULL)
+ {
+ return -1;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = phys_to_pfn_t((uintptr_t)pvCpuVAddr, 0);
+#else
+ uiPFN = ((uintptr_t) pvCpuVAddr) >> PAGE_SHIFT;
+#endif
+#else /* defined(CONFIG_L4) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr, 0);
+#else
+ uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT;
+ PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr);
+#endif
+#endif
+
+ /*
+ * vm_insert_page() allows insertion of individual pages into user
+ * VMA space _only_ if page is a order-zero allocated page
+ */
+ if (bUseVMInsertPage)
+ {
+ if (bUseMixedMap)
+ {
+ /*
+ * This path is just for debugging. It should be
+ * equivalent to the remap_pfn_range() path.
+ */
+ iStatus = vm_insert_mixed(ps_vma,
+ ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN);
+#else
+ uiPFN);
+#endif
+ }
+ else
+ {
+ /* Since kernel 3.7 this sets VM_MIXEDMAP internally */
+ iStatus = vm_insert_page(ps_vma,
+ ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ pfn_t_to_page(sPFN));
+#else
+ pfn_to_page(uiPFN));
+#endif
+ }
+ }
+ else
+ {
+ /*
+ NOTE: Regarding absence of dma_mmap_coherent() in _OSMMapPMR()
+
+ The current services mmap model maps in a PMR's full-length size
+ into the user VMA & applies any user specified offset to the kernel
+ returned zero-offset based VA in services client; this essentially
+ means services server ignores ps_vma->vm_pgoff (this houses hPMR)
+ during a mmap call.
+
+ Furthermore, during a DMA/CMA memory allocation, multiple order-n
+ pages are used to satisfy an allocation request due to DMA/CMA
+ framework rounding-up allocation size to next power-of-two which
+ can lead to wasted memory (so we don't allocate using single call).
+
+ The combination of the above two issues mean that we cannot use the
+ dma_mmap_coherent() for a number of reasons outlined below:
+
+ - Services mmap semantics does not fit with dma_mmap_coherent()
+ which requires proper ps_vma->vm_pgoff; seeing this houses a
+ hPMR handle value, calls into dma_mmap_coherent() fails. This
+ could be avoided by forcing ps_vma->vm_pgoff to zero but the
+ ps_vma->vm_pgoff is applied to DMA bus address PFN and not
+ user VMA which is always mapped at ps_vma->vm_start.
+
+ - As multiple order-n pages are used for DMA/CMA allocations, a
+ single dma_mmap_coherent() call with a vma->vm_pgoff set to
+ zero cannot (maybe) be used because there is no guarantee that
+ all of the multiple order-n pages in the PMR are physically
+ contiguous from the first entry to the last. Whilst this is
+ highly likely to be the case, there is no guarantee that it
+ will be so we cannot depend on this being the case.
+
+ The solution is to manually mmap DMA/CMA pages into user VMA
+ using remap_pfn_range() directly. Furthermore, accounting is
+ always compromised for DMA/CMA allocations.
+ */
+ size_t uiNumContiguousBytes = 1ULL << uiLog2PageSize;
+
+ iStatus = remap_pfn_range(ps_vma,
+ ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ pfn_t_to_pfn(sPFN),
+#else
+ uiPFN,
+#endif
+ uiNumContiguousBytes,
+ ps_vma->vm_page_prot);
+ }
+
+ return iStatus;
+}
+
+PVRSRV_ERROR
+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
+{
+ struct vm_area_struct *ps_vma = pOSMMapData;
+ PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR);
+ PVRSRV_ERROR eError;
+ size_t uiLength;
+ IMG_INT32 iStatus;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_UINT32 ui32CPUCacheFlags;
+ pgprot_t sPageProt;
+ IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_UINT32 uiOffsetIdx;
+ IMG_UINT32 uiNumOfPFNs;
+ IMG_UINT32 uiLog2PageSize;
+ IMG_CPU_PHYADDR *psCpuPAddr;
+ IMG_BOOL *pbValid;
+ IMG_BOOL bUseMixedMap = IMG_FALSE;
+ IMG_BOOL bUseVMInsertPage = IMG_FALSE;
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
+ ((ps_vma->vm_flags & VM_SHARED) == 0))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ sPageProt = vm_get_page_prot(ps_vma->vm_flags);
+
+ ui32CPUCacheFlags = DevmemCPUCacheMode(psDevNode, PMR_Flags(psPMR));
+ switch (ui32CPUCacheFlags)
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ sPageProt = pgprot_noncached(sPageProt);
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+ sPageProt = pgprot_writecombine(sPageProt);
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+ {
+/* Do not set to write-combine for plato */
+#if !defined(PLATO_MEMORY_CONFIG)
+ PHYS_HEAP *psPhysHeap = PMR_PhysHeap(psPMR);
+
+ if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA)
+ sPageProt = pgprot_writecombine(sPageProt);
+#endif
+ break;
+ }
+
+ default:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ ps_vma->vm_page_prot = sPageProt;
+
+ ps_vma->vm_flags |= VM_IO;
+
+ /* Don't include the mapping in core dumps */
+ ps_vma->vm_flags |= VM_DONTDUMP;
+
+ /*
+ * Disable mremap because our nopage handler assumes all
+ * page requests have already been validated.
+ */
+ ps_vma->vm_flags |= VM_DONTEXPAND;
+
+ /* Don't allow mapping to be inherited across a process fork */
+ ps_vma->vm_flags |= VM_DONTCOPY;
+
+ uiLength = ps_vma->vm_end - ps_vma->vm_start;
+
+ /* Is this mmap targeting non order-zero pages or does it use pfn mappings?
+ * If yes, don't use vm_insert_page */
+ uiLog2PageSize = PMR_GetLog2Contiguity(psPMR);
+#if defined(PMR_OS_USE_VM_INSERT_PAGE)
+ bUseVMInsertPage = (uiLog2PageSize == PAGE_SHIFT) && (PMR_GetType(psPMR) != PMR_TYPE_EXTMEM);
+#endif
+
+ /* Can we use stack allocations */
+ uiNumOfPFNs = uiLength >> uiLog2PageSize;
+ if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(*psCpuPAddr));
+ if (psCpuPAddr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ /* Should allocation fail, clean-up here before exiting */
+ pbValid = OSAllocMem(uiNumOfPFNs * sizeof(*pbValid));
+ if (pbValid == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ OSFreeMem(psCpuPAddr);
+ goto e1;
+ }
+ }
+ else
+ {
+ psCpuPAddr = asCpuPAddr;
+ pbValid = abValid;
+ }
+
+ /* Obtain map range pfns */
+ eError = PMR_CpuPhysAddr(psPMR,
+ uiLog2PageSize,
+ uiNumOfPFNs,
+ 0,
+ psCpuPAddr,
+ pbValid);
+ if (eError != PVRSRV_OK)
+ {
+ goto e3;
+ }
+
+ /*
+ * Scan the map range for pfns without struct page* handling. If
+ * we find one, this is a mixed map, and we can't use vm_insert_page()
+ * NOTE: vm_insert_page() allows insertion of individual pages into user
+ * VMA space _only_ if said page is an order-zero allocated page.
+ */
+ if (bUseVMInsertPage)
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ pfn_t sPFN;
+#else
+ unsigned long uiPFN;
+#endif
+
+ for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx)
+ {
+ if (pbValid[uiOffsetIdx])
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0);
+
+ if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
+#else
+ uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
+ PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
+
+ if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+ {
+ bUseMixedMap = IMG_TRUE;
+ break;
+ }
+ }
+ }
+
+ if (bUseMixedMap)
+ {
+ ps_vma->vm_flags |= VM_MIXEDMAP;
+ }
+ }
+ else
+ {
+ ps_vma->vm_flags |= VM_PFNMAP;
+ }
+
+ /* For each PMR page-size contiguous bytes, map page(s) into user VMA */
+ for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<uiLog2PageSize)
+ {
+ uiOffsetIdx = uiOffset >> uiLog2PageSize;
+ /*
+ * Only map in pages that are valid, any that aren't will be
+ * picked up by the nopage handler which will return a zeroed
+ * page for us.
+ */
+ if (pbValid[uiOffsetIdx])
+ {
+ iStatus = _OSMMapPMR(psDevNode,
+ ps_vma,
+ uiOffset,
+ &psCpuPAddr[uiOffsetIdx],
+ uiLog2PageSize,
+ bUseVMInsertPage,
+ bUseMixedMap);
+ if (iStatus)
+ {
+ /* Failure error code doesn't get propagated */
+ eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+ PVR_ASSERT(0);
+ goto e3;
+ }
+ }
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+ (void*)(uintptr_t)(ps_vma->vm_start + uiOffset),
+ psCpuPAddr[uiOffsetIdx],
+ 1<<uiLog2PageSize,
+ NULL);
+#endif
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, uiNumOfPFNs * PAGE_SIZE);
+#endif
+
+ if (psCpuPAddr != asCpuPAddr)
+ {
+ OSFreeMem(psCpuPAddr);
+ OSFreeMem(pbValid);
+ }
+
+ /* let us see the PMR so we can unlock it later */
+ ps_vma->vm_private_data = psPMR;
+
+ /* Install open and close handlers for ref-counting */
+ ps_vma->vm_ops = &gsMMapOps;
+
+ /*
+ * Take a reference on the PMR so that it can't be freed while mapped
+ * into the user process.
+ */
+ PMRRefPMR(psPMR);
+
+ return PVRSRV_OK;
+
+ /* Error exit paths follow */
+ e3:
+ if (psCpuPAddr != asCpuPAddr)
+ {
+ OSFreeMem(psCpuPAddr);
+ OSFreeMem(pbValid);
+ }
+ e1:
+ PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error. Abort!"));
+ PMRUnlockSysPhysAddresses(psPMR);
+ e0:
+ return eError;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title OS PMR functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS specific PMR functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PMR_OS_H__)
+#define __PMR_OS_H__
+
+#include "pmr_impl.h"
+
+/*************************************************************************/ /*!
+@Function OSMMapPMRGeneric
+@Description Implements a generic PMR mapping function, which is used
+ to CPU map a PMR where the PMR does not have a mapping
+ function defined by the creating PMR factory.
+@Input psPMR the PMR to be mapped
+@Output pOSMMapData pointer to any private data
+ needed by the generic mapping function
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData);
+
+#endif /* !defined(__PMR_OS_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File power.c
+@Title Power management functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main APIs for power management functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "rk_init.h"
+
+#include "lists.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+struct _PVRSRV_POWER_DEV_TAG_
+{
+ PFN_PRE_POWER pfnDevicePrePower;
+ PFN_POST_POWER pfnDevicePostPower;
+ PFN_SYS_DEV_PRE_POWER pfnSystemPrePower;
+ PFN_SYS_DEV_POST_POWER pfnSystemPostPower;
+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange;
+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange;
+ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest;
+ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest;
+ PFN_DUST_COUNT_REQUEST pfnDustCountRequest;
+ IMG_HANDLE hSysData;
+ IMG_HANDLE hDevCookie;
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState;
+};
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+static IMG_BOOL gbInitServerRunning = IMG_FALSE;
+static IMG_BOOL gbInitServerRan = IMG_FALSE;
+static IMG_BOOL gbInitSuccessful = IMG_FALSE;
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVSetInitServerState
+
+ @Description Sets given services init state.
+
+ @Input eInitServerState : a services init state
+ @Input bState : a state to set
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState)
+{
+
+ switch(eInitServerState)
+ {
+ case PVRSRV_INIT_SERVER_RUNNING:
+ gbInitServerRunning = bState;
+ break;
+ case PVRSRV_INIT_SERVER_RAN:
+ gbInitServerRan = bState;
+ break;
+ case PVRSRV_INIT_SERVER_SUCCESSFUL:
+ gbInitSuccessful = bState;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unknown state %x", __func__, eInitServerState));
+ return PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVGetInitServerState
+
+ @Description Tests whether a given services init state was run.
+
+ @Input eInitServerState : a services init state
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_EXPORT
+IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState)
+{
+ IMG_BOOL bReturnVal;
+
+ switch(eInitServerState)
+ {
+ case PVRSRV_INIT_SERVER_RUNNING:
+ bReturnVal = gbInitServerRunning;
+ break;
+ case PVRSRV_INIT_SERVER_RAN:
+ bReturnVal = gbInitServerRan;
+ break;
+ case PVRSRV_INIT_SERVER_SUCCESSFUL:
+ bReturnVal = gbInitSuccessful;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unknown state %x", __func__, eInitServerState));
+ bReturnVal = IMG_FALSE;
+ }
+
+ return bReturnVal;
+}
+#endif /* !defined(SUPPORT_KERNEL_SRVINIT) */
+
+/*!
+******************************************************************************
+
+ @Function _IsSystemStatePowered
+
+ @Description Tests whether a given system state represents powered-up.
+
+ @Input eSystemPowerState : a system power state
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState)
+{
+ return (eSystemPowerState == PVRSRV_SYS_POWER_STATE_ON);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVPowerLock
+
+ @Description Obtain the mutex for power transitions. Only allowed when
+ system power is on.
+
+ @Return PVRSRV_ERROR_RETRY or PVRSRV_OK
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPowerLock(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ OSLockAcquire(psDeviceNode->hPowerLock);
+
+ /* Only allow to take powerlock when the system power is on */
+ if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState))
+ {
+ return PVRSRV_OK;
+ }
+
+ OSLockRelease(psDeviceNode->hPowerLock);
+
+ return PVRSRV_ERROR_RETRY;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVForcedPowerLock
+
+ @Description Obtain the mutex for power transitions regardless of
+ system power state
+
+ @Return PVRSRV_ERROR_RETRY or PVRSRV_OK
+
+******************************************************************************/
+IMG_EXPORT
+void PVRSRVForcedPowerLock(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ OSLockAcquire(psDeviceNode->hPowerLock);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVPowerUnlock
+
+ @Description Release the mutex for power transitions
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+void PVRSRVPowerUnlock(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ OSLockRelease(psDeviceNode->hPowerLock);
+}
+IMG_EXPORT
+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice)
+{
+ return (psPowerDevice->eDefaultPowerState == PVRSRV_DEV_POWER_STATE_OFF);
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVSetDeviceDefaultPowerState
+
+ @Description Set the default device power state to eNewPowerState
+
+ @Input psDeviceNode : Device node
+ @Input eNewPowerState : New power state
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_DEV_POWER_STATE eNewPowerState)
+{
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (psPowerDevice == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ psPowerDevice->eDefaultPowerState = eNewPowerState;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVDeviceIdleRequestKM
+
+ @Description
+
+ Perform device-specific processing required to force the device idle.
+
+ @Input psDeviceNode : Device node
+ @Input pfnCheckIdleReq : Filter function used to determine whether a forced idle is required for the device
+ @Input bDeviceOffPermitted : IMG_TRUE if the transition should not fail if device off
+ IMG_FALSE if the transition should fail if device off
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff,
+ IMG_BOOL bDeviceOffPermitted)
+{
+ PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+
+ if (psPowerDev && psPowerDev->pfnForcedIdleRequest)
+ {
+ if (!pfnIsDefaultStateOff || pfnIsDefaultStateOff(psPowerDev))
+ {
+ return psPowerDev->pfnForcedIdleRequest(psPowerDev->hDevCookie,
+ bDeviceOffPermitted);
+ }
+ }
+
+ return PVRSRV_OK;
+}
+/*!
+******************************************************************************
+
+ @Function PVRSRVDeviceIdleCancelRequestKM
+
+ @Description
+
+ Perform device-specific processing required to cancel the forced idle state on the device, returning to normal operation.
+
+ @Input psDeviceNode : Device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+
+ if (psPowerDev && psPowerDev->pfnForcedIdleCancelRequest)
+ {
+ return psPowerDev->pfnForcedIdleCancelRequest(psPowerDev->hDevCookie);
+ }
+
+ return PVRSRV_OK;
+}
+/*!
+******************************************************************************
+
+ @Function PVRSRVDevicePrePowerStateKM
+
+ @Description
+
+ Perform device-specific processing required before a power transition
+
+ @Input psPowerDevice : Power device
+ @Input eNewPowerState : New power state
+ @Input bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ IMG_BOOL bForced)
+{
+ IMG_UINT64 ui64SysTimer1 = 0;
+ IMG_UINT64 ui64SysTimer2 = 0;
+ IMG_UINT64 ui64DevTimer1 = 0;
+ IMG_UINT64 ui64DevTimer2 = 0;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+ if (psPowerDevice->pfnDevicePrePower != NULL)
+ {
+ /* Call the device's power callback. */
+ ui64DevTimer1 = OSClockns64();
+ eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie,
+ eNewPowerState,
+ psPowerDevice->eCurrentPowerState,
+ bForced);
+ ui64DevTimer2 = OSClockns64();
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ /* Do any required system-layer processing. */
+ if (psPowerDevice->pfnSystemPrePower != NULL)
+ {
+ ui64SysTimer1 = OSClockns64();
+ eError = psPowerDevice->pfnSystemPrePower(psPowerDevice->hSysData,
+ eNewPowerState,
+ psPowerDevice->eCurrentPowerState,
+ bForced);
+ ui64SysTimer2 = OSClockns64();
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+ ui64DevTimer1, ui64DevTimer2,
+ bForced,
+ eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
+ IMG_TRUE);
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVDevicePostPowerStateKM
+
+ @Description
+
+ Perform device-specific processing required after a power transition
+
+ @Input psPowerDevice : Power device
+ @Input eNewPowerState : New power state
+ @Input bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(PVRSRV_POWER_DEV *psPowerDevice,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ IMG_BOOL bForced)
+{
+ IMG_UINT64 ui64SysTimer1 = 0;
+ IMG_UINT64 ui64SysTimer2 = 0;
+ IMG_UINT64 ui64DevTimer1 = 0;
+ IMG_UINT64 ui64DevTimer2 = 0;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+ /* Do any required system-layer processing. */
+ if (psPowerDevice->pfnSystemPostPower != NULL)
+ {
+ ui64SysTimer1 = OSClockns64();
+ eError = psPowerDevice->pfnSystemPostPower(psPowerDevice->hSysData,
+ eNewPowerState,
+ psPowerDevice->eCurrentPowerState,
+ bForced);
+ ui64SysTimer2 = OSClockns64();
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ if (psPowerDevice->pfnDevicePostPower != NULL)
+ {
+ /* Call the device's power callback. */
+ ui64DevTimer1 = OSClockns64();
+ eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie,
+ eNewPowerState,
+ psPowerDevice->eCurrentPowerState,
+ bForced);
+ ui64DevTimer2 = OSClockns64();
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+ ui64DevTimer1, ui64DevTimer2,
+ bForced,
+ eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
+ IMG_FALSE);
+#endif
+ psPowerDevice->eCurrentPowerState = eNewPowerState;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVSetDevicePowerStateKM
+
+ @Description Set the Device into a new state
+
+ @Input psDeviceNode : Device node
+ @Input eNewPowerState : New power state
+ @Input bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ IMG_BOOL bForced)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (!psPowerDevice)
+ {
+ return PVRSRV_OK;
+ }
+
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
+ {
+ eNewPowerState = psPowerDevice->eDefaultPowerState;
+ }
+
+ if (psPowerDevice->eCurrentPowerState != eNewPowerState)
+ {
+ eError = PVRSRVDevicePrePowerStateKM(psPowerDevice,
+ eNewPowerState,
+ bForced);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+
+ eError = PVRSRVDevicePostPowerStateKM(psPowerDevice,
+ eNewPowerState,
+ bForced);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+
+ /* Signal Device Watchdog Thread about power mode change. */
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON)
+ {
+ psPVRSRVData->ui32DevicesWatchdogPwrTrans++;
+
+ if (psPVRSRVData->ui32DevicesWatchdogTimeout == DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT)
+ {
+ if (psPVRSRVData->hDevicesWatchdogEvObj)
+ {
+ eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+ }
+ }
+ }
+
+ return PVRSRV_OK;
+
+ErrorExit:
+
+ if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Transition to %d was denied, Forced=%d",
+ __func__, eNewPowerState, bForced));
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Transition to %d FAILED (%s)",
+ __func__, eNewPowerState, PVRSRVGetErrorStringKM(eError)));
+ }
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function PVRSRVSetDeviceSystemPowerState
+@Description Set the device into a new power state based on the systems power
+ state
+@Input psDeviceNode Device node
+@Input eNewSysPowerState New system power state
+@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise
+*/ /**************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_SYS_POWER_STATE eNewSysPowerState)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT uiStage = 0;
+
+ PVRSRV_DEV_POWER_STATE eNewDevicePowerState =
+ _IsSystemStatePowered(eNewSysPowerState)? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF;
+
+ /* require a proper power state */
+ if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Prevent simultaneous SetPowerStateKM calls */
+ PVRSRVForcedPowerLock(psDeviceNode);
+
+ /* no power transition requested, so do nothing */
+ if (eNewSysPowerState == psDeviceNode->eCurrentSysPowerState)
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ return PVRSRV_OK;
+ }
+
+ if ((eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_OFF) ||
+ (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT))
+ {
+ /* If setting devices to default state, selectively force idle all devices whose default state is off */
+ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff =
+ (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : NULL;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode,
+ pfnIsDefaultStateOff, IMG_TRUE);
+
+ if (eError == PVRSRV_OK)
+ {
+ break;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ PVRSRVForcedPowerLock(psDeviceNode);
+ }
+ else
+ {
+ uiStage++;
+ goto ErrorExit;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+ }
+
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, eNewDevicePowerState,
+ IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ uiStage++;
+ goto ErrorExit;
+ }
+
+ psDeviceNode->eCurrentSysPowerState = eNewSysPowerState;
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ return PVRSRV_OK;
+
+ErrorExit:
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Transition from %d to %d FAILED (%s) at stage %d. Dumping debug info.",
+ __func__, psDeviceNode->eCurrentSysPowerState, eNewSysPowerState,
+ PVRSRVGetErrorStringKM(eError), uiStage));
+
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+
+ return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PFN_PRE_POWER pfnDevicePrePower,
+ PFN_POST_POWER pfnDevicePostPower,
+ PFN_SYS_DEV_PRE_POWER pfnSystemPrePower,
+ PFN_SYS_DEV_POST_POWER pfnSystemPostPower,
+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
+ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest,
+ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest,
+ PFN_DUST_COUNT_REQUEST pfnDustCountRequest,
+ IMG_HANDLE hDevCookie,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState)
+{
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ PVR_ASSERT(!psDeviceNode->psPowerDev);
+
+ PVR_ASSERT(eCurrentPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+ PVR_ASSERT(eDefaultPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+ psPowerDevice = OSAllocMem(sizeof(PVRSRV_POWER_DEV));
+ if (psPowerDevice == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to alloc PVRSRV_POWER_DEV", __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* setup device for power manager */
+ psPowerDevice->pfnDevicePrePower = pfnDevicePrePower;
+ psPowerDevice->pfnDevicePostPower = pfnDevicePostPower;
+ psPowerDevice->pfnSystemPrePower = pfnSystemPrePower;
+ psPowerDevice->pfnSystemPostPower = pfnSystemPostPower;
+ psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
+ psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
+ psPowerDevice->pfnForcedIdleRequest = pfnForcedIdleRequest;
+ psPowerDevice->pfnForcedIdleCancelRequest = pfnForcedIdleCancelRequest;
+ psPowerDevice->pfnDustCountRequest = pfnDustCountRequest;
+ psPowerDevice->hSysData = psDeviceNode->psDevConfig->hSysData;
+ psPowerDevice->hDevCookie = hDevCookie;
+ psPowerDevice->eCurrentPowerState = eCurrentPowerState;
+ psPowerDevice->eDefaultPowerState = eDefaultPowerState;
+
+ psDeviceNode->psPowerDev = psPowerDevice;
+
+ return (PVRSRV_OK);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVRemovePowerDevice
+
+ @Description
+
+ Removes device from power management register. Device is located by Device Index
+
+ @Input psDeviceNode : Device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRemovePowerDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ if (psDeviceNode->psPowerDev)
+ {
+ OSFreeMem(psDeviceNode->psPowerDev);
+ psDeviceNode->psPowerDev = NULL;
+ }
+
+ return (PVRSRV_OK);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVGetDevicePowerState
+
+ @Description
+
+ Return the device power state
+
+ @Input psDeviceNode : Device node
+ @Output psPowerState : Current power state
+
+ @Return PVRSRV_ERROR_UNKNOWN_POWER_STATE if device could not be found. PVRSRV_OK otherwise.
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVGetDevicePowerState(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PPVRSRV_DEV_POWER_STATE pePowerState)
+{
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (psPowerDevice == NULL)
+ {
+ return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+ }
+
+ *pePowerState = psPowerDevice->eCurrentPowerState;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVIsDevicePowered
+
+ @Description
+
+ Whether the device is powered, for the purposes of lockup detection.
+
+ @Input psDeviceNode : Device node
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_EXPORT
+IMG_BOOL PVRSRVIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_DEV_POWER_STATE ePowerState;
+
+ if (OSLockIsLocked(psDeviceNode->hPowerLock))
+ {
+ return IMG_FALSE;
+ }
+
+ if (PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState) != PVRSRV_OK)
+ {
+ return IMG_FALSE;
+ }
+
+ return (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVDevicePreClockSpeedChange
+
+ @Description
+
+ Notification from system layer that a device clock speed change is about to happen.
+
+ @Input psDeviceNode : Device node
+ @Input bIdleDevice : whether the device should be idled
+ @Input pvInfo
+
+ @Return void
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bIdleDevice,
+ void *pvInfo)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_POWER_DEV *psPowerDevice;
+ IMG_UINT64 ui64StartTimer, ui64StopTimer;
+
+ PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+ ui64StartTimer = OSClockus();
+
+ /* This mutex is released in PVRSRVDevicePostClockSpeedChange. */
+ eError = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to acquire lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (psPowerDevice)
+ {
+ if ((psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+ {
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ { /* We can change the clock speed if the device is either IDLE or OFF */
+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
+
+ if (eError == PVRSRV_OK)
+ {
+ break;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVRSRV_ERROR eError2;
+
+ PVRSRVPowerUnlock(psDeviceNode);
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ eError2 = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to acquire lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ return eError2;
+ }
+ }
+ else
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ return eError;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ return eError;
+ }
+ }
+
+ eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
+ psPowerDevice->eCurrentPowerState);
+ }
+
+ ui64StopTimer = OSClockus();
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ InsertPowerTimeStatisticExtraPre(ui64StartTimer, ui64StopTimer);
+#endif
+
+ return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVDevicePostClockSpeedChange
+
+ @Description
+
+ Notification from system layer that a device clock speed change has just happened.
+
+ @Input psDeviceNode : Device node
+ @Input bIdleDevice : whether the device had been idled
+ @Input pvInfo
+
+ @Return void
+
+******************************************************************************/
+void PVRSRVDevicePostClockSpeedChange(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bIdleDevice,
+ void *pvInfo)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_POWER_DEV *psPowerDevice;
+ IMG_UINT64 ui64StartTimer, ui64StopTimer;
+
+ PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+ ui64StartTimer = OSClockus();
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (psPowerDevice)
+ {
+ eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
+ psPowerDevice->eCurrentPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ }
+
+ if ((psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+ {
+ eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to cancel forced IDLE.", __func__));
+ }
+ }
+ }
+
+ /* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ ui64StopTimer = OSClockus();
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ InsertPowerTimeStatisticExtraPost(ui64StartTimer, ui64StopTimer);
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVDeviceDustCountChange
+
+ @Description
+
+ Request from system layer that a dust count change is requested.
+
+ @Input psDeviceNode : Device node
+ @Input ui32DustCount : dust count to be set
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceDustCountChange(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32DustCount)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (psPowerDevice)
+ {
+ PVRSRV_DEV_POWER_STATE eDevicePowerState;
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ eDevicePowerState = psPowerDevice->eCurrentPowerState;
+ if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON)
+ {
+ /* Device must be idle to change dust count */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_FALSE);
+ if (eError == PVRSRV_OK)
+ {
+ break;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVRSRV_ERROR eError2;
+
+ PVRSRVPowerUnlock(psDeviceNode);
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ eError2 = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ return eError2;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: error occurred whilst forcing idle (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: timeout occurred attempting to force idle (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+ }
+
+ if (psPowerDevice->pfnDustCountRequest != NULL)
+ {
+ PVRSRV_ERROR eError2 = psPowerDevice->pfnDustCountRequest(psPowerDevice->hDevCookie, ui32DustCount);
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
+ __func__, psDeviceNode,
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ }
+
+ if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON)
+ {
+ eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to cancel forced IDLE.", __func__));
+ goto ErrorExit;
+ }
+ }
+
+ PVRSRVPowerUnlock(psDeviceNode);
+ }
+
+ return eError;
+
+ErrorExit:
+ PVRSRVPowerUnlock(psDeviceNode);
+ return eError;
+}
+
+
+/******************************************************************************
+ End of file (power.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Power Management Functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main APIs for power management functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef POWER_H
+#define POWER_H
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_device.h"
+#include "pvrsrv_error.h"
+#include "servicesext.h"
+
+typedef struct _PVRSRV_DEVICE_NODE_ PVRSRV_DEVICE_NODE;
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+typedef enum _PVRSRV_INIT_SERVER_STATE_
+{
+ PVRSRV_INIT_SERVER_Unspecified = -1,
+ PVRSRV_INIT_SERVER_RUNNING = 0,
+ PVRSRV_INIT_SERVER_RAN = 1,
+ PVRSRV_INIT_SERVER_SUCCESSFUL = 2,
+ PVRSRV_INIT_SERVER_NUM = 3,
+ PVRSRV_INIT_SERVER_FORCE_I32 = 0x7fffffff
+} PVRSRV_INIT_SERVER_STATE, *PPVRSRV_INIT_SERVER_STATE;
+
+IMG_IMPORT IMG_BOOL
+PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState);
+
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState,
+ IMG_BOOL bState);
+#endif /* !defined(SUPPORT_KERNEL_SRVINIT) */
+
+
+/*!
+ *****************************************************************************
+ * Power management
+ *****************************************************************************/
+
+typedef struct _PVRSRV_POWER_DEV_TAG_ PVRSRV_POWER_DEV;
+
+typedef IMG_BOOL (*PFN_SYS_DEV_IS_DEFAULT_STATE_OFF)(PVRSRV_POWER_DEV *psPowerDevice);
+
+
+IMG_IMPORT PVRSRV_ERROR PVRSRVPowerLock(PVRSRV_DEVICE_NODE *psDeviceNode);
+IMG_IMPORT void PVRSRVForcedPowerLock(PVRSRV_DEVICE_NODE *psDeviceNode);
+IMG_IMPORT void PVRSRVPowerUnlock(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_IMPORT IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ IMG_BOOL bForced);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_SYS_POWER_STATE ePVRState);
+
+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_DEV_POWER_STATE eNewPowerState);
+
+/* Type PFN_DC_REGISTER_POWER */
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PFN_PRE_POWER pfnDevicePrePower,
+ PFN_POST_POWER pfnDevicePostPower,
+ PFN_SYS_DEV_PRE_POWER pfnSystemPrePower,
+ PFN_SYS_DEV_POST_POWER pfnSystemPostPower,
+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
+ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest,
+ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest,
+ PFN_DUST_COUNT_REQUEST pfnDustCountRequest,
+ IMG_HANDLE hDevCookie,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRemovePowerDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVGetDevicePowerState(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PPVRSRV_DEV_POWER_STATE pePowerState);
+
+IMG_IMPORT
+IMG_BOOL PVRSRVIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bIdleDevice,
+ void *pvInfo);
+
+IMG_IMPORT
+void PVRSRVDevicePostClockSpeedChange(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bIdleDevice,
+ void *pvInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnCheckIdleReq,
+ IMG_BOOL bDeviceOffPermitted);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR PVRSRVDeviceDustCountChange(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32DustCount);
+
+
+#endif /* POWER_H */
+
+/******************************************************************************
+ End of file (power.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Linux private data structure
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__INCLUDED_PRIVATE_DATA_H_)
+#define __INCLUDED_PRIVATE_DATA_H_
+
+#include <linux/fs.h>
+
+#include "connection_server.h"
+
+CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile);
+struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection);
+
+#endif /* !defined(__INCLUDED_PRIVATE_DATA_H_) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Process based statistics
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Manages a collection of statistics based around a process
+ and referenced via OS agnostic methods.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "lock.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lists.h"
+#include "process_stats.h"
+#include "ri_server.h"
+#include "hash.h"
+#include "connection_server.h"
+#include "pvrsrv.h"
+
+/*
+ * Maximum history of process statistics that will be kept.
+ */
+#define MAX_DEAD_LIST_PROCESSES (10)
+
+/*
+ * Definition of all process based statistics and the strings used to
+ * format them.
+ */
+typedef enum
+{
+ /* Stats that are per process... */
+ PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS,
+ PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS,
+
+ PVRSRV_PROCESS_STAT_TYPE_RC_OOMS,
+ PVRSRV_PROCESS_STAT_TYPE_RC_PRS,
+ PVRSRV_PROCESS_STAT_TYPE_RC_GROWS,
+ PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS,
+ PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES,
+ PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES,
+ PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES,
+ PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES,
+ PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP,
+ PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW,
+ PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP,
+ PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW,
+ PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT,
+ PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES,
+ PVRSRV_PROCESS_STAT_TYPE_KMALLOC,
+ PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX,
+ PVRSRV_PROCESS_STAT_TYPE_VMALLOC,
+ PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX,
+ PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA,
+ PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX,
+ PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA,
+ PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX,
+ PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA,
+ PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX,
+ PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA,
+ PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX,
+ PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES,
+ PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX,
+ PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES,
+ PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX,
+ PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES,
+ PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX,
+
+ /* Must be the last enum...*/
+ PVRSRV_PROCESS_STAT_TYPE_COUNT
+} PVRSRV_PROCESS_STAT_TYPE;
+
+static const IMG_CHAR *const pszProcessStatFmt[PVRSRV_PROCESS_STAT_TYPE_COUNT] = {
+ "Connections %10d\n", /* PVRSRV_STAT_TYPE_CONNECTIONS */
+ "ConnectionsMax %10d\n", /* PVRSRV_STAT_TYPE_MAXCONNECTIONS */
+
+ "RenderContextOutOfMemoryEvents %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_OOMS */
+ "RenderContextPartialRenders %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_PRS */
+ "RenderContextGrows %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_GROWS */
+ "RenderContextPushGrows %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS */
+ "RenderContextTAStores %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES */
+ "RenderContext3DStores %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES */
+ "RenderContextSHStores %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES */
+ "RenderContextCDMStores %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES */
+ "ZSBufferRequestsByApp %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP */
+ "ZSBufferRequestsByFirmware %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW */
+ "FreeListGrowRequestsByApp %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP */
+ "FreeListGrowRequestsByFirmware %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW */
+ "FreeListInitialPages %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT */
+ "FreeListMaxPages %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES */
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ "MemoryUsageKMalloc %10d\n", /* PVRSRV_STAT_TYPE_KMALLOC */
+ "MemoryUsageKMallocMax %10d\n", /* PVRSRV_STAT_TYPE_MAX_KMALLOC */
+ "MemoryUsageVMalloc %10d\n", /* PVRSRV_STAT_TYPE_VMALLOC */
+ "MemoryUsageVMallocMax %10d\n", /* PVRSRV_STAT_TYPE_MAX_VMALLOC */
+#else
+ "","","","", /* Empty strings if these stats are not logged */
+#endif
+ "MemoryUsageAllocPTMemoryUMA %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_PAGES_PT_UMA */
+ "MemoryUsageAllocPTMemoryUMAMax %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_PAGES_PT_UMA */
+ "MemoryUsageVMapPTUMA %10d\n", /* PVRSRV_STAT_TYPE_VMAP_PT_UMA */
+ "MemoryUsageVMapPTUMAMax %10d\n", /* PVRSRV_STAT_TYPE_MAX_VMAP_PT_UMA */
+ "MemoryUsageAllocPTMemoryLMA %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_PAGES_PT_LMA */
+ "MemoryUsageAllocPTMemoryLMAMax %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_PAGES_PT_LMA */
+ "MemoryUsageIORemapPTLMA %10d\n", /* PVRSRV_STAT_TYPE_IOREMAP_PT_LMA */
+ "MemoryUsageIORemapPTLMAMax %10d\n", /* PVRSRV_STAT_TYPE_MAX_IOREMAP_PT_LMA */
+ "MemoryUsageAllocGPUMemLMA %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_LMA_PAGES */
+ "MemoryUsageAllocGPUMemLMAMax %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_LMA_PAGES */
+ "MemoryUsageAllocGPUMemUMA %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_UMA_PAGES */
+ "MemoryUsageAllocGPUMemUMAMax %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_UMA_PAGES */
+ "MemoryUsageMappedGPUMemUMA/LMA %10d\n", /* PVRSRV_STAT_TYPE_MAP_UMA_LMA_PAGES */
+ "MemoryUsageMappedGPUMemUMA/LMAMax %10d\n", /* PVRSRV_STAT_TYPE_MAX_MAP_UMA_LMA_PAGES */
+};
+
+/* structure used in hash table to track statistic entries */
+typedef struct{
+ size_t uiSizeInBytes;
+ IMG_PID uiPid;
+}_PVR_STATS_TRACKING_HASH_ENTRY;
+
+/* Function used internally to decrement tracked per-process statistic entries */
+static void _StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry,
+ PVRSRV_MEM_ALLOC_TYPE eAllocType);
+
+/*
+ * Functions for printing the information stored...
+ */
+void ProcessStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void RawProcessStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+#endif
+
+void MemStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void RIMemStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void PowerStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void GlobalStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void CacheOpStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+static void StripBadChars( IMG_CHAR *psStr);
+#endif
+
+/*
+ * Macros for updating stat values.
+ */
+#define UPDATE_MAX_VALUE(a,b) do { if ((b) > (a)) {(a) = (b);} } while(0)
+#define INCREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] += (val); if ((ptr)->i32StatValue[(var)] > (ptr)->i32StatValue[(var##_MAX)]) {(ptr)->i32StatValue[(var##_MAX)] = (ptr)->i32StatValue[(var)];} } while(0)
+#define INCREASE_GLOBAL_STAT_VALUE(var,val) do { (var) += (val); if ((var) > (var##Max)) {(var##Max) = (var);} } while(0)
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+/* Allow stats to go negative */
+#define DECREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] -= (val); } while(0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,val) do { (var) -= (val); } while(0)
+#else
+#define DECREASE_STAT_VALUE(ptr,var,val) do { if ((ptr)->i32StatValue[(var)] >= (val)) { (ptr)->i32StatValue[(var)] -= (val); } else { (ptr)->i32StatValue[(var)] = 0; } } while(0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,val) do { if ((var) >= (val)) { (var) -= (val); } else { (var) = 0; } } while(0)
+#endif
+#define MAX_CACHEOP_STAT 16
+#define INCREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x+1) >= MAX_CACHEOP_STAT ? 0 : (x+1))
+#define DECREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x-1) < 0 ? (MAX_CACHEOP_STAT-1) : (x-1))
+
+/*
+ * Structures for holding statistics...
+ */
+typedef enum
+{
+ PVRSRV_STAT_STRUCTURE_PROCESS = 1,
+ PVRSRV_STAT_STRUCTURE_RENDER_CONTEXT = 2,
+ PVRSRV_STAT_STRUCTURE_MEMORY = 3,
+ PVRSRV_STAT_STRUCTURE_RIMEMORY = 4,
+ PVRSRV_STAT_STRUCTURE_CACHEOP = 5
+} PVRSRV_STAT_STRUCTURE_TYPE;
+
+#define MAX_PROC_NAME_LENGTH (32)
+
+typedef struct _PVRSRV_PROCESS_STATS_ {
+ /* Structure type (must be first!) */
+ PVRSRV_STAT_STRUCTURE_TYPE eStructureType;
+
+ /* Linked list pointers */
+ struct _PVRSRV_PROCESS_STATS_* psNext;
+ struct _PVRSRV_PROCESS_STATS_* psPrev;
+
+ /* Create per process lock that need to be held
+ * to edit of its members */
+ POS_LOCK hLock;
+
+ /* OS level process ID */
+ IMG_PID pid;
+ IMG_UINT32 ui32RefCount;
+ IMG_UINT32 ui32MemRefCount;
+
+ /* Folder name used to store the statistic */
+ IMG_CHAR szFolderName[MAX_PROC_NAME_LENGTH];
+
+ /* OS specific data */
+ void *pvOSPidFolderData;
+ void *pvOSPidEntryData;
+
+ /* Stats... */
+ IMG_INT32 i32StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT];
+ IMG_UINT32 ui32StatAllocFlags;
+
+#if defined(DEBUG)
+ struct _CACHEOP_STRUCT_ {
+ PVRSRV_CACHE_OP uiCacheOp;
+#if defined(PVR_RI_DEBUG)
+ IMG_DEV_VIRTADDR sDevVAddr;
+ RGXFWIF_DM eFenceOpType;
+#endif
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT64 ui64ExecuteTime;
+ IMG_BOOL bRangeBasedFlush;
+ IMG_BOOL bUserModeFlush;
+ IMG_UINT32 ui32OpSeqNum;
+ IMG_BOOL bHasTimeline;
+ IMG_BOOL bIsFence;
+ IMG_PID ownerPid;
+ } asCacheOp[MAX_CACHEOP_STAT];
+ IMG_INT32 uiCacheOpWriteIndex;
+ struct _PVRSRV_CACHEOP_STATS_* psCacheOpStats;
+#endif
+
+ /* Other statistics structures */
+ struct _PVRSRV_MEMORY_STATS_* psMemoryStats;
+ struct _PVRSRV_RI_MEMORY_STATS_* psRIMemoryStats;
+} PVRSRV_PROCESS_STATS;
+
+typedef struct _PVRSRV_MEM_ALLOC_REC_
+{
+ PVRSRV_MEM_ALLOC_TYPE eAllocType;
+ IMG_UINT64 ui64Key;
+ void *pvCpuVAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ size_t uiBytes;
+ void *pvPrivateData;
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+ void *pvAllocdFromFile;
+ IMG_UINT32 ui32AllocdFromLine;
+#endif
+ IMG_PID pid;
+ struct _PVRSRV_MEM_ALLOC_REC_ *psNext;
+ struct _PVRSRV_MEM_ALLOC_REC_ **ppsThis;
+} PVRSRV_MEM_ALLOC_REC;
+
+typedef struct _PVRSRV_MEMORY_STATS_ {
+ /* Structure type (must be first!) */
+ PVRSRV_STAT_STRUCTURE_TYPE eStructureType;
+
+ /* OS specific data */
+ void *pvOSMemEntryData;
+
+ /* Stats... */
+ PVRSRV_MEM_ALLOC_REC *psMemoryRecords;
+} PVRSRV_MEMORY_STATS;
+
+typedef struct _PVRSRV_RI_MEMORY_STATS_ {
+ /* Structure type (must be first!) */
+ PVRSRV_STAT_STRUCTURE_TYPE eStructureType;
+
+ /* OS level process ID */
+ IMG_PID pid;
+
+ /* OS specific data */
+ void *pvOSRIMemEntryData;
+} PVRSRV_RI_MEMORY_STATS;
+
+typedef struct _PVRSRV_CACHEOP_STATS_ {
+ /* Structure type (must be first!) */
+ PVRSRV_STAT_STRUCTURE_TYPE eStructureType;
+
+ /* OS specific data */
+ void *pvOSCacheOpEntryData;
+} PVRSRV_CACHEOP_STATS;
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+static IMPLEMENT_LIST_INSERT(PVRSRV_MEM_ALLOC_REC)
+static IMPLEMENT_LIST_REMOVE(PVRSRV_MEM_ALLOC_REC)
+#endif
+
+/*
+ * Global Boolean to flag when the statistics are ready to monitor
+ * memory allocations.
+ */
+static IMG_BOOL bProcessStatsInitialised = IMG_FALSE;
+
+/*
+ * Linked lists for process stats. Live stats are for processes which are still running
+ * and the dead list holds those that have exited.
+ */
+static PVRSRV_PROCESS_STATS* g_psLiveList = NULL;
+static PVRSRV_PROCESS_STATS* g_psDeadList = NULL;
+
+static POS_LOCK g_psLinkedListLock = NULL;
+/* Lockdep feature in the kernel cannot differentiate between different instances of same lock type.
+ * This allows it to group all such instances of the same lock type under one class
+ * The consequence of this is that, if lock acquisition is nested on different instances, it generates
+ * a false warning message about the possible occurrence of deadlock due to recursive lock acquisition.
+ * Hence we create the following sub classes to explicitly appraise Lockdep of such safe lock nesting */
+#define PROCESS_LOCK_SUBCLASS_CURRENT 1
+#define PROCESS_LOCK_SUBCLASS_PREV 2
+#define PROCESS_LOCK_SUBCLASS_NEXT 3
+/*
+ * Pointer to OS folder to hold PID folders.
+ */
+static IMG_CHAR *pszOSLivePidFolderName = "pid";
+static IMG_CHAR *pszOSDeadPidFolderName = "pids_retired";
+static void *pvOSLivePidFolder = NULL;
+static void *pvOSDeadPidFolder = NULL;
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+static void *pvOSProcStats = NULL;
+#endif
+
+/* global driver-data folders */
+typedef struct _GLOBAL_STATS_
+{
+ IMG_UINT32 ui32MemoryUsageKMalloc;
+ IMG_UINT32 ui32MemoryUsageKMallocMax;
+ IMG_UINT32 ui32MemoryUsageVMalloc;
+ IMG_UINT32 ui32MemoryUsageVMallocMax;
+ IMG_UINT32 ui32MemoryUsageAllocPTMemoryUMA;
+ IMG_UINT32 ui32MemoryUsageAllocPTMemoryUMAMax;
+ IMG_UINT32 ui32MemoryUsageVMapPTUMA;
+ IMG_UINT32 ui32MemoryUsageVMapPTUMAMax;
+ IMG_UINT32 ui32MemoryUsageAllocPTMemoryLMA;
+ IMG_UINT32 ui32MemoryUsageAllocPTMemoryLMAMax;
+ IMG_UINT32 ui32MemoryUsageIORemapPTLMA;
+ IMG_UINT32 ui32MemoryUsageIORemapPTLMAMax;
+ IMG_UINT32 ui32MemoryUsageAllocGPUMemLMA;
+ IMG_UINT32 ui32MemoryUsageAllocGPUMemLMAMax;
+ IMG_UINT32 ui32MemoryUsageAllocGPUMemUMA;
+ IMG_UINT32 ui32MemoryUsageAllocGPUMemUMAMax;
+ IMG_UINT32 ui32MemoryUsageAllocGPUMemUMAPool;
+ IMG_UINT32 ui32MemoryUsageAllocGPUMemUMAPoolMax;
+ IMG_UINT32 ui32MemoryUsageMappedGPUMemUMA_LMA;
+ IMG_UINT32 ui32MemoryUsageMappedGPUMemUMA_LMAMax;
+ POS_LOCK hGlobalStatsLock;
+} GLOBAL_STATS;
+
+static void *pvOSGlobalMemEntryRef = NULL;
+static IMG_CHAR* const pszDriverStatFilename = "driver_stats";
+static GLOBAL_STATS gsGlobalStats;
+
+#define HASH_INITIAL_SIZE 5
+/* A hash table used to store the size of any vmalloc'd allocation
+ * against its address (not needed for kmallocs as we can use ksize()) */
+static HASH_TABLE* gpsSizeTrackingHashTable;
+static POS_LOCK gpsSizeTrackingHashTableLock;
+
+static void _AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats);
+static IMG_UINT32 _PVRSRVIncrMemStatRefCount(void *pvStatPtr);
+static IMG_UINT32 _PVRSRVDecrMemStatRefCount(void *pvStatPtr);
+static void _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _RemoveOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _CreateOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats, void *pvOSPidFolder);
+static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ PVRSRV_PROCESS_STATS* psProcessStats,
+ IMG_UINT32 uiBytes);
+/*
+ * Power statistics related definitions
+ */
+
+/* For the mean time, use an exponentially weighted moving average with a
+ * 1/4 weighting for the new measurement.
+ */
+#define MEAN_TIME(A, B) ( ((3*(A))/4) + ((1 * (B))/4) )
+
+#define UPDATE_TIME(time, newtime) \
+ ((time) > 0 ? MEAN_TIME((time),(newtime)) : (newtime))
+
+/* Enum to be used as input to GET_POWER_STAT_INDEX */
+typedef enum
+{
+ DEVICE = 0,
+ SYSTEM = 1,
+ POST_POWER = 0,
+ PRE_POWER = 2,
+ POWER_OFF = 0,
+ POWER_ON = 4,
+ NOT_FORCED = 0,
+ FORCED = 8,
+} PVRSRV_POWER_STAT_TYPE;
+
+/* Macro used to access one of the power timing statistics inside an array */
+#define GET_POWER_STAT_INDEX(forced,powon,prepow,system) \
+ ((forced) + (powon) + (prepow) + (system))
+
+/* For the power timing stats we need 16 variables to store all the
+ * combinations of forced/not forced, power-on/power-off, pre-power/post-power
+ * and device/system statistics
+ */
+#define NUM_POWER_STATS (16)
+static IMG_UINT32 aui32PowerTimingStats[NUM_POWER_STATS];
+
+static void *pvOSPowerStatsEntryData = NULL;
+
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower)
+{
+ IMG_UINT32 *pui32Stat;
+ IMG_UINT64 ui64DeviceDiff = ui64DevEndTime - ui64DevStartTime;
+ IMG_UINT64 ui64SystemDiff = ui64SysEndTime - ui64SysStartTime;
+ IMG_UINT32 ui32Index;
+
+ ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+ bPowerOn ? POWER_ON : POWER_OFF,
+ bPrePower ? PRE_POWER : POST_POWER,
+ DEVICE);
+ pui32Stat = &aui32PowerTimingStats[ui32Index];
+ *pui32Stat = UPDATE_TIME(*pui32Stat, ui64DeviceDiff);
+
+ ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+ bPowerOn ? POWER_ON : POWER_OFF,
+ bPrePower ? PRE_POWER : POST_POWER,
+ SYSTEM);
+ pui32Stat = &aui32PowerTimingStats[ui32Index];
+ *pui32Stat = UPDATE_TIME(*pui32Stat, ui64SystemDiff);
+}
+
+typedef struct _EXTRA_POWER_STATS_
+{
+ IMG_UINT64 ui64PreClockSpeedChangeDuration;
+ IMG_UINT64 ui64BetweenPreEndingAndPostStartingDuration;
+ IMG_UINT64 ui64PostClockSpeedChangeDuration;
+} EXTRA_POWER_STATS;
+
+#define NUM_EXTRA_POWER_STATS 10
+
+static EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS];
+static IMG_UINT32 ui32ClockSpeedIndexStart = 0, ui32ClockSpeedIndexEnd = 0;
+
+static IMG_UINT64 ui64PreClockSpeedChangeMark = 0;
+
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer)
+{
+ asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration = ui64Stoptimer - ui64StartTimer;
+
+ ui64PreClockSpeedChangeMark = OSClockus();
+
+ return ;
+}
+
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer)
+{
+ IMG_UINT64 ui64Duration = ui64StartTimer - ui64PreClockSpeedChangeMark;
+
+ PVR_ASSERT(ui64PreClockSpeedChangeMark > 0);
+
+ asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration;
+ asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer;
+
+ ui32ClockSpeedIndexEnd = (ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS;
+
+ if (ui32ClockSpeedIndexEnd == ui32ClockSpeedIndexStart)
+ {
+ ui32ClockSpeedIndexStart = (ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS;
+ }
+
+ ui64PreClockSpeedChangeMark = 0;
+
+ return;
+}
+
+/*************************************************************************/ /*!
+@Function _FindProcessStatsInLiveList
+@Description Searches the Live Process List for a statistics structure that
+ matches the PID given.
+@Input pid Process to search for.
+@Return Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInLiveList(IMG_PID pid)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList;
+
+ while (psProcessStats != NULL)
+ {
+ if (psProcessStats->pid == pid)
+ {
+ return psProcessStats;
+ }
+
+ psProcessStats = psProcessStats->psNext;
+ }
+
+ return NULL;
+} /* _FindProcessStatsInLiveList */
+
+/*************************************************************************/ /*!
+@Function _FindProcessStatsInDeadList
+@Description Searches the Dead Process List for a statistics structure that
+ matches the PID given.
+@Input pid Process to search for.
+@Return Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInDeadList(IMG_PID pid)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList;
+
+ while (psProcessStats != NULL)
+ {
+ if (psProcessStats->pid == pid)
+ {
+ return psProcessStats;
+ }
+
+ psProcessStats = psProcessStats->psNext;
+ }
+
+ return NULL;
+} /* _FindProcessStatsInDeadList */
+
+/*************************************************************************/ /*!
+@Function _FindProcessStats
+@Description Searches the Live and Dead Process Lists for a statistics
+ structure that matches the PID given.
+@Input pid Process to search for.
+@Return Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStats(IMG_PID pid)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats = _FindProcessStatsInLiveList(pid);
+
+ if (psProcessStats == NULL)
+ {
+ psProcessStats = _FindProcessStatsInDeadList(pid);
+ }
+
+ return psProcessStats;
+} /* _FindProcessStats */
+
+/*************************************************************************/ /*!
+@Function _CompressMemoryUsage
+@Description Reduces memory usage by deleting old statistics data.
+ This function requires that the list lock is not held!
+*/ /**************************************************************************/
+static void
+_CompressMemoryUsage(void)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats;
+ PVRSRV_PROCESS_STATS* psProcessStatsToBeFreed;
+ IMG_UINT32 ui32ItemsRemaining;
+
+ /*
+ * We hold the lock whilst checking the list, but we'll release it
+ * before freeing memory (as that will require the lock too)!
+ */
+ OSLockAcquire(g_psLinkedListLock);
+
+ /* Check that the dead list is not bigger than the max size... */
+ psProcessStats = g_psDeadList;
+ psProcessStatsToBeFreed = NULL;
+ ui32ItemsRemaining = MAX_DEAD_LIST_PROCESSES;
+
+ while (psProcessStats != NULL && ui32ItemsRemaining > 0)
+ {
+ ui32ItemsRemaining--;
+ if (ui32ItemsRemaining == 0)
+ {
+ /* This is the last allowed process, cut the linked list here! */
+ psProcessStatsToBeFreed = psProcessStats->psNext;
+ psProcessStats->psNext = NULL;
+ }
+ else
+ {
+ psProcessStats = psProcessStats->psNext;
+ }
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+
+ /* Any processes stats remaining will need to be destroyed... */
+ while (psProcessStatsToBeFreed != NULL)
+ {
+ PVRSRV_PROCESS_STATS* psNextProcessStats = psProcessStatsToBeFreed->psNext;
+
+ psProcessStatsToBeFreed->psNext = NULL;
+ _RemoveOSStatisticEntries(psProcessStatsToBeFreed);
+ psProcessStatsToBeFreed = psNextProcessStats;
+ }
+} /* _CompressMemoryUsage */
+
+/* These functions move the process stats from the live to the dead list.
+ * _MoveProcessToDeadList moves the entry in the global lists and
+ * it needs to be protected by g_psLinkedListLock.
+ * _MoveProcessToDeadListDebugFS performs the OS calls and it
+ * shouldn't be used under g_psLinkedListLock because this could generate a
+ * lockdep warning. */
+static void
+_MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ /* Take the element out of the live list and append to the dead list... */
+ _RemoveProcessStatsFromList(psProcessStats);
+ _AddProcessStatsToFrontOfDeadList(psProcessStats);
+} /* _MoveProcessToDeadList */
+
+static void
+_MoveProcessToDeadListDebugFS(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ /* Transfer the OS entries to the folder for dead processes... */
+ _RemoveOSStatisticEntries(psProcessStats);
+ _CreateOSStatisticEntries(psProcessStats, pvOSDeadPidFolder);
+} /* _MoveProcessToDeadListDebugFS */
+
+/* These functions move the process stats from the dead to the live list.
+ * _MoveProcessToLiveList moves the entry in the global lists and
+ * it needs to be protected by g_psLinkedListLock.
+ * _MoveProcessToLiveListDebugFS performs the OS calls and it
+ * shouldn't be used under g_psLinkedListLock because this could generate a
+ * lockdep warning. */
+static void
+_MoveProcessToLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ /* Take the element out of the live list and append to the dead list... */
+ _RemoveProcessStatsFromList(psProcessStats);
+ _AddProcessStatsToFrontOfLiveList(psProcessStats);
+} /* _MoveProcessToLiveList */
+
+static void
+_MoveProcessToLiveListDebugFS(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ /* Transfer the OS entries to the folder for live processes... */
+ _RemoveOSStatisticEntries(psProcessStats);
+ _CreateOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+} /* _MoveProcessToLiveListDebugFS */
+
+/*************************************************************************/ /*!
+@Function _AddProcessStatsToFrontOfLiveList
+@Description Add a statistic to the live list head.
+@Input psProcessStats Process stats to add.
+*/ /**************************************************************************/
+static void
+_AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ /* This function should always be called under global list lock g_psLinkedListLock.
+ */
+ PVR_ASSERT(psProcessStats != NULL);
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ if (g_psLiveList != NULL)
+ {
+ PVR_ASSERT(psProcessStats != g_psLiveList);
+ OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+ g_psLiveList->psPrev = psProcessStats;
+ OSLockRelease(g_psLiveList->hLock);
+ psProcessStats->psNext = g_psLiveList;
+ }
+
+ g_psLiveList = psProcessStats;
+
+ OSLockRelease(psProcessStats->hLock);
+} /* _AddProcessStatsToFrontOfLiveList */
+
+/*************************************************************************/ /*!
+@Function _AddProcessStatsToFrontOfDeadList
+@Description Add a statistic to the dead list head.
+@Input psProcessStats Process stats to add.
+*/ /**************************************************************************/
+static void
+_AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ PVR_ASSERT(psProcessStats != NULL);
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ if (g_psDeadList != NULL)
+ {
+ PVR_ASSERT(psProcessStats != g_psDeadList);
+ OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+ g_psDeadList->psPrev = psProcessStats;
+ OSLockRelease(g_psDeadList->hLock);
+ psProcessStats->psNext = g_psDeadList;
+ }
+
+ g_psDeadList = psProcessStats;
+
+ OSLockRelease(psProcessStats->hLock);
+} /* _AddProcessStatsToFrontOfDeadList */
+
+/*************************************************************************/ /*!
+@Function _RemoveProcessStatsFromList
+@Description Detaches a process from either the live or dead list.
+@Input psProcessStats Process stats to remove.
+*/ /**************************************************************************/
+static void
+_RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ PVR_ASSERT(psProcessStats != NULL);
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ /* Remove the item from the linked lists... */
+ if (g_psLiveList == psProcessStats)
+ {
+ g_psLiveList = psProcessStats->psNext;
+
+ if (g_psLiveList != NULL)
+ {
+ PVR_ASSERT(psProcessStats != g_psLiveList);
+ OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+ g_psLiveList->psPrev = NULL;
+ OSLockRelease(g_psLiveList->hLock);
+
+ }
+ }
+ else if (g_psDeadList == psProcessStats)
+ {
+ g_psDeadList = psProcessStats->psNext;
+
+ if (g_psDeadList != NULL)
+ {
+ PVR_ASSERT(psProcessStats != g_psDeadList);
+ OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+ g_psDeadList->psPrev = NULL;
+ OSLockRelease(g_psDeadList->hLock);
+ }
+ }
+ else
+ {
+ PVRSRV_PROCESS_STATS* psNext = psProcessStats->psNext;
+ PVRSRV_PROCESS_STATS* psPrev = psProcessStats->psPrev;
+
+ if (psProcessStats->psNext != NULL)
+ {
+ PVR_ASSERT(psProcessStats != psNext);
+ OSLockAcquireNested(psNext->hLock, PROCESS_LOCK_SUBCLASS_NEXT);
+ psProcessStats->psNext->psPrev = psPrev;
+ OSLockRelease(psNext->hLock);
+ }
+ if (psProcessStats->psPrev != NULL)
+ {
+ PVR_ASSERT(psProcessStats != psPrev);
+ OSLockAcquireNested(psPrev->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+ psProcessStats->psPrev->psNext = psNext;
+ OSLockRelease(psPrev->hLock);
+ }
+ }
+
+
+ /* Reset the pointers in this cell, as it is not attached to anything */
+ psProcessStats->psNext = NULL;
+ psProcessStats->psPrev = NULL;
+
+ OSLockRelease(psProcessStats->hLock);
+
+} /* _RemoveProcessStatsFromList */
+
+/*************************************************************************/ /*!
+@Function _CreateOSStatisticEntries
+@Description Create all OS entries for this statistic.
+@Input psProcessStats Process stats to destroy.
+@Input pvOSPidFolder Pointer to OS folder to place the entries in.
+*/ /**************************************************************************/
+static void
+_CreateOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats,
+ void *pvOSPidFolder)
+{
+ void *pvOSPidFolderData;
+ void *pvOSPidEntryData;
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ void *pvOSMemEntryData;
+#endif
+#if defined(PVR_RI_DEBUG)
+ void *pvOSRIMemEntryData;
+#endif
+#if defined(DEBUG)
+ void *pvOSCacheOpEntryData;
+#endif
+
+ PVR_ASSERT(psProcessStats != NULL);
+
+ pvOSPidFolderData = OSCreateStatisticFolder(psProcessStats->szFolderName, pvOSPidFolder);
+ pvOSPidEntryData = OSCreateStatisticEntry("process_stats",
+ pvOSPidFolderData,
+ ProcessStatsPrintElements,
+ _PVRSRVIncrMemStatRefCount,
+ _PVRSRVDecrMemStatRefCount,
+ (void *) psProcessStats);
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ pvOSMemEntryData = OSCreateStatisticEntry("mem_area",
+ pvOSPidFolderData,
+ MemStatsPrintElements,
+ NULL,
+ NULL,
+ (void *) psProcessStats->psMemoryStats);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ pvOSRIMemEntryData = OSCreateStatisticEntry("ri_mem_area",
+ pvOSPidFolderData,
+ RIMemStatsPrintElements,
+ NULL,
+ NULL,
+ (void *) psProcessStats->psRIMemoryStats);
+#endif
+
+#if defined(DEBUG)
+ pvOSCacheOpEntryData = OSCreateStatisticEntry("cache_ops_exec",
+ pvOSPidFolderData,
+ CacheOpStatsPrintElements,
+ NULL,
+ NULL,
+ (void *) psProcessStats);
+#endif
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ psProcessStats->pvOSPidFolderData = pvOSPidFolderData;
+ psProcessStats->pvOSPidEntryData = pvOSPidEntryData;
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ psProcessStats->psMemoryStats->pvOSMemEntryData = pvOSMemEntryData;
+#endif
+#if defined(PVR_RI_DEBUG)
+ psProcessStats->psRIMemoryStats->pvOSRIMemEntryData = pvOSRIMemEntryData;
+#endif
+#if defined(DEBUG)
+ psProcessStats->psCacheOpStats->pvOSCacheOpEntryData = pvOSCacheOpEntryData;
+#endif
+
+ OSLockRelease(psProcessStats->hLock);
+} /* _CreateOSStatisticEntries */
+
+/*************************************************************************/ /*!
+@Function _RemoveOSStatisticEntries
+@Description Removed all OS entries used by this statistic.
+@Input psProcessStats Process stats to destroy.
+*/ /**************************************************************************/
+static void
+_RemoveOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ PVR_ASSERT(psProcessStats != NULL);
+
+#if defined(DEBUG)
+ OSRemoveStatisticEntry(psProcessStats->psCacheOpStats->pvOSCacheOpEntryData);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ OSRemoveStatisticEntry(psProcessStats->psRIMemoryStats->pvOSRIMemEntryData);
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ OSRemoveStatisticEntry(psProcessStats->psMemoryStats->pvOSMemEntryData);
+#endif
+
+ if( psProcessStats->pvOSPidEntryData != NULL)
+ {
+ OSRemoveStatisticEntry(psProcessStats->pvOSPidEntryData);
+ }
+ if( psProcessStats->pvOSPidFolderData != NULL)
+ {
+ OSRemoveStatisticFolder(&psProcessStats->pvOSPidFolderData);
+ }
+
+} /* _RemoveOSStatisticEntries */
+
+/*************************************************************************/ /*!
+@Function _DestroyProcessStat
+@Description Frees memory and resources held by a process statistic.
+@Input psProcessStats Process stats to destroy.
+*/ /**************************************************************************/
+static void
+_DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ PVR_ASSERT(psProcessStats != NULL);
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ /* Free the memory statistics... */
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ while (psProcessStats->psMemoryStats->psMemoryRecords)
+ {
+ List_PVRSRV_MEM_ALLOC_REC_Remove(psProcessStats->psMemoryStats->psMemoryRecords);
+ }
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+#endif
+#if defined(PVR_RI_DEBUG)
+ OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+#endif
+ OSLockRelease(psProcessStats->hLock);
+
+ /*Destroy the lock */
+ OSLockDestroyNoStats(psProcessStats->hLock);
+
+ /* Free the memory... */
+ OSFreeMemNoStats(psProcessStats);
+} /* _DestroyProcessStat */
+
+static IMG_UINT32 _PVRSRVIncrMemStatRefCount(void *pvStatPtr)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE* peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+ IMG_UINT32 ui32Res = 0;
+
+ switch (*peStructureType)
+ {
+ case PVRSRV_STAT_STRUCTURE_PROCESS:
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ ui32Res = ++psProcessStats->ui32MemRefCount;
+ OSLockRelease(psProcessStats->hLock);
+ break;
+ }
+ default:
+ {
+ /* _PVRSRVIncrMemStatRefCount was passed a pointer to an unrecognised struct */
+ PVR_ASSERT(0);
+ break;
+ }
+ }
+
+ return ui32Res;
+}
+
+static IMG_UINT32 _PVRSRVDecrMemStatRefCount(void *pvStatPtr)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE* peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+ IMG_UINT32 ui32Res = 0;
+
+ switch (*peStructureType)
+ {
+ case PVRSRV_STAT_STRUCTURE_PROCESS:
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ /* Decrement stat memory refCount and free if now zero */
+ ui32Res = --psProcessStats->ui32MemRefCount;
+ OSLockRelease(psProcessStats->hLock);
+ if (ui32Res == 0)
+ {
+ _DestroyProcessStat(psProcessStats);
+ }
+ break;
+ }
+ default:
+ {
+ /* _PVRSRVDecrMemStatRefCount was passed a pointer to an unrecognised struct */
+ PVR_ASSERT(0);
+ break;
+ }
+ }
+ return ui32Res;
+}
+
+/*************************************************************************/ /*!
+@Function PVRSRVStatsInitialise
+@Description Entry point for initialising the statistics module.
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsInitialise(void)
+{
+ PVRSRV_ERROR error;
+
+ PVR_ASSERT(g_psLiveList == NULL);
+ PVR_ASSERT(g_psDeadList == NULL);
+ PVR_ASSERT(g_psLinkedListLock == NULL);
+ PVR_ASSERT(gpsSizeTrackingHashTable == NULL);
+ PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE);
+
+ /* We need a lock to protect the linked lists... */
+ error = OSLockCreate(&g_psLinkedListLock, LOCK_TYPE_NONE);
+ if (error == PVRSRV_OK)
+ {
+ /* We also need a lock to protect the hash table used for size tracking.. */
+ error = OSLockCreate(&gpsSizeTrackingHashTableLock, LOCK_TYPE_NONE);
+
+ if (error != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* We also need a lock to protect the GlobalStat counters */
+ error = OSLockCreate(&gsGlobalStats.hGlobalStatsLock, LOCK_TYPE_NONE);
+ if (error != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ /* Create a pid folders for putting the PID files in... */
+ pvOSLivePidFolder = OSCreateStatisticFolder(pszOSLivePidFolderName, NULL);
+ pvOSDeadPidFolder = OSCreateStatisticFolder(pszOSDeadPidFolderName, NULL);
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+ pvOSProcStats = OSCreateRawStatisticEntry("memtrack_stats", NULL,
+ RawProcessStatsPrintElements);
+#endif
+
+ /* Create power stats entry... */
+ pvOSPowerStatsEntryData = OSCreateStatisticEntry("power_timing_stats",
+ NULL,
+ PowerStatsPrintElements,
+ NULL,
+ NULL,
+ NULL);
+
+ pvOSGlobalMemEntryRef = OSCreateStatisticEntry(pszDriverStatFilename,
+ NULL,
+ GlobalStatsPrintElements,
+ NULL,
+ NULL,
+ NULL);
+
+ /* Flag that we are ready to start monitoring memory allocations. */
+
+ gpsSizeTrackingHashTable = HASH_Create(HASH_INITIAL_SIZE);
+
+ OSCachedMemSet(asClockSpeedChanges, 0, sizeof(asClockSpeedChanges));
+
+ bProcessStatsInitialised = IMG_TRUE;
+ }
+ return error;
+e1:
+ OSLockDestroy(gpsSizeTrackingHashTableLock);
+ gpsSizeTrackingHashTableLock = NULL;
+e0:
+ OSLockDestroy(g_psLinkedListLock);
+ g_psLinkedListLock = NULL;
+ return error;
+
+} /* PVRSRVStatsInitialise */
+
+/*************************************************************************/ /*!
+@Function PVRSRVStatsDestroy
+@Description Method for destroying the statistics module data.
+*/ /**************************************************************************/
+void
+PVRSRVStatsDestroy(void)
+{
+ PVR_ASSERT(bProcessStatsInitialised == IMG_TRUE);
+
+ /* Stop monitoring memory allocations... */
+ bProcessStatsInitialised = IMG_FALSE;
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+ if (pvOSProcStats)
+ {
+ OSRemoveRawStatisticEntry(pvOSProcStats);
+ pvOSProcStats = NULL;
+ }
+#endif
+
+ /* Destroy the power stats entry... */
+ if (pvOSPowerStatsEntryData!=NULL)
+ {
+ OSRemoveStatisticEntry(pvOSPowerStatsEntryData);
+ pvOSPowerStatsEntryData=NULL;
+ }
+
+ /* Destroy the global data entry */
+ if (pvOSGlobalMemEntryRef!=NULL)
+ {
+ OSRemoveStatisticEntry(pvOSGlobalMemEntryRef);
+ pvOSGlobalMemEntryRef=NULL;
+ }
+
+ /* Destroy the locks... */
+ if (g_psLinkedListLock != NULL)
+ {
+ OSLockDestroy(g_psLinkedListLock);
+ g_psLinkedListLock = NULL;
+ }
+
+ /* Free the live and dead lists... */
+ while (g_psLiveList != NULL)
+ {
+ PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList;
+
+ _RemoveProcessStatsFromList(psProcessStats);
+ _RemoveOSStatisticEntries(psProcessStats);
+ }
+
+ while (g_psDeadList != NULL)
+ {
+ PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList;
+
+ _RemoveProcessStatsFromList(psProcessStats);
+ _RemoveOSStatisticEntries(psProcessStats);
+ }
+
+ /* Remove the OS folders used by the PID folders...
+ * OSRemoveStatisticFolder will NULL the pointers */
+ OSRemoveStatisticFolder(&pvOSLivePidFolder);
+ OSRemoveStatisticFolder(&pvOSDeadPidFolder);
+
+ if (gpsSizeTrackingHashTable != NULL)
+ {
+ HASH_Delete(gpsSizeTrackingHashTable);
+ }
+ if (gpsSizeTrackingHashTableLock != NULL)
+ {
+ OSLockDestroy(gpsSizeTrackingHashTableLock);
+ gpsSizeTrackingHashTableLock = NULL;
+ }
+
+ if(NULL != gsGlobalStats.hGlobalStatsLock)
+ {
+ OSLockDestroy(gsGlobalStats.hGlobalStatsLock);
+ gsGlobalStats.hGlobalStatsLock = NULL;
+ }
+
+} /* PVRSRVStatsDestroy */
+
+static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes)
+{
+ OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+ switch (eAllocType)
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageKMalloc, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageVMalloc, uiBytes);
+ break;
+#else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ break;
+#endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocPTMemoryUMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageVMapPTUMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocPTMemoryLMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageIORemapPTLMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemLMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemUMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageMappedGPUMemUMA_LMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemUMAPool, uiBytes);
+ break;
+
+ default:
+ PVR_ASSERT(0);
+ break;
+ }
+ OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+}
+
+static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes)
+{
+ OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+ switch (eAllocType)
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageKMalloc, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageVMalloc, uiBytes);
+ break;
+#else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ break;
+#endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocPTMemoryUMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageVMapPTUMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocPTMemoryLMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageIORemapPTLMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemLMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemUMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageMappedGPUMemUMA_LMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemUMAPool, uiBytes);
+ break;
+
+ default:
+ PVR_ASSERT(0);
+ break;
+ }
+ OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+}
+
+/*************************************************************************/ /*!
+@Function PVRSRVStatsRegisterProcess
+@Description Register a process into the list statistics list.
+@Output phProcessStats Handle to the process to be used to deregister.
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats=NULL;
+ PVRSRV_ERROR eError;
+ IMG_PID currentPid = OSGetCurrentClientProcessIDKM();
+ IMG_BOOL bMoveProcess = IMG_FALSE;
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ IMG_CHAR acFolderName[30];
+ IMG_CHAR *pszProcName = OSGetCurrentProcessName();
+
+ strncpy(acFolderName, pszProcName, sizeof(acFolderName));
+ StripBadChars(acFolderName);
+#endif
+
+ PVR_ASSERT(phProcessStats != NULL);
+
+ /* Check the PID has not already moved to the dead list... */
+ OSLockAcquire(g_psLinkedListLock);
+ psProcessStats = _FindProcessStatsInDeadList(currentPid);
+ if (psProcessStats != NULL)
+ {
+ /* Move it back onto the live list! */
+ _RemoveProcessStatsFromList(psProcessStats);
+ _AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+ /* we can perform the OS operation out of lock */
+ bMoveProcess = IMG_TRUE;
+ }
+ else
+ {
+ /* Check the PID is not already registered in the live list... */
+ psProcessStats = _FindProcessStatsInLiveList(currentPid);
+ }
+
+ /* If the PID is on the live list then just increment the ref count and return... */
+ if (psProcessStats != NULL)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ psProcessStats->ui32RefCount++;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+ UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS],
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]);
+ OSLockRelease(psProcessStats->hLock);
+ OSLockRelease(g_psLinkedListLock);
+
+ *phProcessStats = psProcessStats;
+
+ /* Check if we need to perform any OS operation */
+ if (bMoveProcess)
+ {
+ /* Transfer the OS entries back to the folder for live processes... */
+ _RemoveOSStatisticEntries(psProcessStats);
+ _CreateOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+ }
+
+ return PVRSRV_OK;
+ }
+ OSLockRelease(g_psLinkedListLock);
+
+ /* Allocate a new node structure and initialise it... */
+ psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+ if (psProcessStats == NULL)
+ {
+ *phProcessStats = 0;
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psProcessStats->eStructureType = PVRSRV_STAT_STRUCTURE_PROCESS;
+ psProcessStats->pid = currentPid;
+ psProcessStats->ui32RefCount = 1;
+ psProcessStats->ui32MemRefCount = 1;
+
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = 1;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+ eError = OSLockCreateNoStats(&psProcessStats->hLock ,LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ psProcessStats->psMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_MEMORY_STATS));
+ if (psProcessStats->psMemoryStats == NULL)
+ {
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ goto e0;
+ }
+
+ psProcessStats->psMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_MEMORY;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ psProcessStats->psRIMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_RI_MEMORY_STATS));
+ if (psProcessStats->psRIMemoryStats == NULL)
+ {
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ goto e0;
+ }
+ psProcessStats->psRIMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_RIMEMORY;
+ psProcessStats->psRIMemoryStats->pid = currentPid;
+#endif
+
+#if defined(DEBUG)
+ psProcessStats->psCacheOpStats = OSAllocZMemNoStats(sizeof(PVRSRV_CACHEOP_STATS));
+ if (psProcessStats->psCacheOpStats == NULL)
+ {
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+ goto e0;
+ }
+ psProcessStats->psCacheOpStats->eStructureType = PVRSRV_STAT_STRUCTURE_CACHEOP;
+#endif
+
+ /* Add it to the live list... */
+ OSLockAcquire(g_psLinkedListLock);
+ _AddProcessStatsToFrontOfLiveList(psProcessStats);
+ OSLockRelease(g_psLinkedListLock);
+
+ /* Create the process stat in the OS... */
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+ "%d_%s", currentPid, acFolderName);
+#else
+ OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+ "%d", currentPid);
+#endif
+ _CreateOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+
+ /* Done */
+ *phProcessStats = (IMG_HANDLE) psProcessStats;
+
+ return PVRSRV_OK;
+
+e0:
+ OSFreeMemNoStats(psProcessStats);
+ *phProcessStats = 0;
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+} /* PVRSRVStatsRegisterProcess */
+
+/*************************************************************************/ /*!
+@Function PVRSRVStatsDeregisterProcess
+@Input hProcessStats Handle to the process returned when registered.
+@Description Method for destroying the statistics module data.
+*/ /**************************************************************************/
+void
+PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats)
+{
+ IMG_BOOL bMoveProcess = IMG_FALSE;
+
+ if (hProcessStats != 0)
+ {
+ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) hProcessStats;
+
+ /* Lower the reference count, if zero then move it to the dead list */
+ OSLockAcquire(g_psLinkedListLock);
+ if (psProcessStats->ui32RefCount > 0)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ psProcessStats->ui32RefCount--;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+
+#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ if (psProcessStats->ui32RefCount == 0)
+ {
+ OSLockRelease(psProcessStats->hLock);
+ _MoveProcessToDeadList(psProcessStats);
+ bMoveProcess = IMG_TRUE;
+ }else
+#endif
+ {
+ OSLockRelease(psProcessStats->hLock);
+ }
+ }
+ OSLockRelease(g_psLinkedListLock);
+
+ /* The OS calls need to be performed without g_psLinkedListLock */
+ if (bMoveProcess == IMG_TRUE)
+ {
+ _MoveProcessToDeadListDebugFS(psProcessStats);
+ }
+
+ /* Check if the dead list needs to be reduced */
+ _CompressMemoryUsage();
+ }
+} /* PVRSRVStatsDeregisterProcess */
+
+void
+PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ void *pvCpuVAddr,
+ IMG_CPU_PHYADDR sCpuPAddr,
+ size_t uiBytes,
+ void *pvPrivateData)
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+{
+ _PVRSRVStatsAddMemAllocRecord(eAllocType, pvCpuVAddr, sCpuPAddr, uiBytes, pvPrivateData, NULL, 0);
+}
+void
+_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ void *pvCpuVAddr,
+ IMG_CPU_PHYADDR sCpuPAddr,
+ size_t uiBytes,
+ void *pvPrivateData,
+ void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+#endif
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ IMG_PID currentPid = OSGetCurrentClientProcessIDKM();
+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_MEM_ALLOC_REC* psRecord = NULL;
+ PVRSRV_PROCESS_STATS* psProcessStats;
+ PVRSRV_MEMORY_STATS* psMemoryStats;
+ IMG_BOOL bResurrectProcess = IMG_FALSE;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ /*
+ * To prevent a recursive loop, we make the memory allocations
+ * for our memstat records via OSAllocMemNoStats(), which does not try to
+ * create a memstat record entry..
+ */
+
+ /* Allocate the memory record... */
+ psRecord = OSAllocZMemNoStats(sizeof(PVRSRV_MEM_ALLOC_REC));
+ if (psRecord == NULL)
+ {
+ return;
+ }
+
+ psRecord->eAllocType = eAllocType;
+ psRecord->pvCpuVAddr = pvCpuVAddr;
+ psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr;
+ psRecord->uiBytes = uiBytes;
+ psRecord->pvPrivateData = pvPrivateData;
+
+ psRecord->pid = currentPid;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+ psRecord->pvAllocdFromFile = pvAllocFromFile;
+ psRecord->ui32AllocdFromLine = ui32AllocFromLine;
+#endif
+
+ _increase_global_stat(eAllocType, uiBytes);
+ /* Lock while we find the correct process... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ if (psPVRSRVData)
+ {
+ if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+ (currentCleanupPid != 0))
+ {
+ psProcessStats = _FindProcessStats(currentCleanupPid);
+ }
+ else
+ {
+ psProcessStats = _FindProcessStatsInLiveList(currentPid);
+ if (!psProcessStats)
+ {
+ psProcessStats = _FindProcessStatsInDeadList(currentPid);
+ bResurrectProcess = IMG_TRUE;
+ }
+ }
+ }
+ else
+ {
+ psProcessStats = _FindProcessStatsInLiveList(currentPid);
+ if (!psProcessStats)
+ {
+ psProcessStats = _FindProcessStatsInDeadList(currentPid);
+ bResurrectProcess = IMG_TRUE;
+ }
+ }
+
+ if (psProcessStats == NULL)
+ {
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ PVRSRV_ERROR eError;
+ IMG_CHAR acFolderName[30];
+ IMG_CHAR *pszProcName = OSGetCurrentProcessName();
+
+ strncpy(acFolderName, pszProcName, sizeof(acFolderName));
+ StripBadChars(acFolderName);
+
+ psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+ if (psProcessStats == NULL)
+ {
+ OSLockRelease(g_psLinkedListLock);
+ return;
+ }
+
+ psProcessStats->eStructureType = PVRSRV_STAT_STRUCTURE_PROCESS;
+ psProcessStats->pid = currentPid;
+ psProcessStats->ui32RefCount = 1;
+ psProcessStats->ui32MemRefCount = 1;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = 1;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+ eError = OSLockCreateNoStats(&psProcessStats->hLock ,LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ psProcessStats->psMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_MEMORY_STATS));
+ if (psProcessStats->psMemoryStats == NULL)
+ {
+ OSLockRelease(g_psLinkedListLock);
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ psProcessStats->hLock = NULL;
+ goto e0;
+ }
+
+ psProcessStats->psMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_MEMORY;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ psProcessStats->psRIMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_RI_MEMORY_STATS));
+ if (psProcessStats->psRIMemoryStats == NULL)
+ {
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ psProcessStats->hLock = NULL;
+ OSLockRelease(g_psLinkedListLock);
+ goto e0;
+ }
+
+ psProcessStats->psRIMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_RIMEMORY;
+ psProcessStats->psRIMemoryStats->pid = currentPid;
+#endif
+
+#if defined(DEBUG)
+ psProcessStats->psCacheOpStats = OSAllocZMemNoStats(sizeof(PVRSRV_CACHEOP_STATS));
+ if (psProcessStats->psCacheOpStats == NULL)
+ {
+ OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSLockRelease(g_psLinkedListLock);
+ psProcessStats->hLock = NULL;
+ goto e0;
+ }
+
+ psProcessStats->psCacheOpStats->eStructureType = PVRSRV_STAT_STRUCTURE_CACHEOP;
+#endif
+
+ OSLockRelease(g_psLinkedListLock);
+ /* Add it to the live list... */
+ _AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+ /* Create the process stat in the OS... */
+ OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+ "%d_%s", currentPid, acFolderName);
+
+ _CreateOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+#else /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+ OSLockRelease(g_psLinkedListLock);
+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+ }
+ else
+ {
+ OSLockRelease(g_psLinkedListLock);
+ }
+
+ if (psProcessStats == NULL)
+ {
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ PVR_DPF((PVR_DBG_ERROR, "%s UNABLE TO CREATE process_stats entry for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)", __FUNCTION__, currentPid, OSGetCurrentProcessName(), uiBytes));
+#endif
+ if (psRecord != NULL)
+ {
+ OSFreeMemNoStats(psRecord);
+ }
+ return;
+ }
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ psMemoryStats = psProcessStats->psMemoryStats;
+
+ /* Insert the memory record... */
+ if (psRecord != NULL)
+ {
+ List_PVRSRV_MEM_ALLOC_REC_Insert(&psMemoryStats->psMemoryRecords, psRecord);
+ }
+
+ /* Update the memory watermarks... */
+ switch (eAllocType)
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+#else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ break;
+#endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+ {
+ if (psRecord != NULL)
+ {
+ psRecord->ui64Key = sCpuPAddr.uiAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+ {
+ if (psRecord != NULL)
+ {
+ psRecord->ui64Key = sCpuPAddr.uiAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+ {
+ if (psRecord != NULL)
+ {
+ psRecord->ui64Key = sCpuPAddr.uiAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ default:
+ {
+ PVR_ASSERT(0);
+ }
+ break;
+ }
+ OSLockRelease(psProcessStats->hLock);
+ if (bResurrectProcess)
+ {
+ /* Move process from dead list to live list */
+ OSLockAcquire(g_psLinkedListLock);
+ _MoveProcessToLiveList(psProcessStats);
+ OSLockRelease(g_psLinkedListLock);
+ _MoveProcessToLiveListDebugFS(psProcessStats);
+ }
+ return;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+e0:
+ OSFreeMemNoStats(psRecord);
+ OSFreeMemNoStats(psProcessStats);
+ return;
+#endif
+#endif
+} /* PVRSRVStatsAddMemAllocRecord */
+
+void
+PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ IMG_UINT64 ui64Key)
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ IMG_PID currentPid = OSGetCurrentClientProcessIDKM();
+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_PROCESS_STATS* psProcessStats = NULL;
+ PVRSRV_MEMORY_STATS* psMemoryStats = NULL;
+ PVRSRV_MEM_ALLOC_REC* psRecord = NULL;
+ IMG_BOOL bFound = IMG_FALSE;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ /* Lock while we find the correct process and remove this record... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ if (psPVRSRVData)
+ {
+ if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+ (currentCleanupPid != 0))
+ {
+ psProcessStats = _FindProcessStats(currentCleanupPid);
+ }
+ else
+ {
+ psProcessStats = _FindProcessStats(currentPid);
+ }
+ }
+ else
+ {
+ psProcessStats = _FindProcessStats(currentPid);
+ }
+ if (psProcessStats != NULL)
+ {
+ psMemoryStats = psProcessStats->psMemoryStats;
+ psRecord = psMemoryStats->psMemoryRecords;
+ while (psRecord != NULL)
+ {
+ if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType)
+ {
+ bFound = IMG_TRUE;
+ break;
+ }
+
+ psRecord = psRecord->psNext;
+ }
+ }
+
+ /* If not found, we need to do a full search in case it was allocated to a different PID... */
+ if (!bFound)
+ {
+ PVRSRV_PROCESS_STATS* psProcessStatsAlreadyChecked = psProcessStats;
+
+ /* Search all live lists first... */
+ psProcessStats = g_psLiveList;
+ while (psProcessStats != NULL)
+ {
+ if (psProcessStats != psProcessStatsAlreadyChecked)
+ {
+ psMemoryStats = psProcessStats->psMemoryStats;
+ psRecord = psMemoryStats->psMemoryRecords;
+ while (psRecord != NULL)
+ {
+ if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType)
+ {
+ bFound = IMG_TRUE;
+ break;
+ }
+
+ psRecord = psRecord->psNext;
+ }
+ }
+
+ if (bFound)
+ {
+ break;
+ }
+
+ psProcessStats = psProcessStats->psNext;
+ }
+
+ /* If not found, then search all dead lists next... */
+ if (!bFound)
+ {
+ psProcessStats = g_psDeadList;
+ while (psProcessStats != NULL)
+ {
+ if (psProcessStats != psProcessStatsAlreadyChecked)
+ {
+ psMemoryStats = psProcessStats->psMemoryStats;
+ psRecord = psMemoryStats->psMemoryRecords;
+ while (psRecord != NULL)
+ {
+ if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType)
+ {
+ bFound = IMG_TRUE;
+ break;
+ }
+
+ psRecord = psRecord->psNext;
+ }
+ }
+
+ if (bFound)
+ {
+ break;
+ }
+
+ psProcessStats = psProcessStats->psNext;
+ }
+ }
+ }
+
+ /* Update the watermark and remove this record...*/
+ if (bFound)
+ {
+ _decrease_global_stat(eAllocType, psRecord->uiBytes);
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ _DecreaseProcStatValue(eAllocType,
+ psProcessStats,
+ psRecord->uiBytes);
+
+ List_PVRSRV_MEM_ALLOC_REC_Remove(psRecord);
+ OSLockRelease(psProcessStats->hLock);
+ OSLockRelease(g_psLinkedListLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ /* If all stats are now zero, remove the entry for this thread */
+ if (psProcessStats->ui32StatAllocFlags == 0)
+ {
+ OSLockAcquire(g_psLinkedListLock);
+ _MoveProcessToDeadList(psProcessStats);
+ OSLockRelease(g_psLinkedListLock);
+ _MoveProcessToDeadListDebugFS(psProcessStats);
+
+ /* Check if the dead list needs to be reduced */
+ _CompressMemoryUsage();
+ }
+#endif
+ /*
+ * Free the record outside the lock so we don't deadlock and so we
+ * reduce the time the lock is held.
+ */
+ OSFreeMemNoStats(psRecord);
+ }
+ else
+ {
+ OSLockRelease(g_psLinkedListLock);
+ }
+
+#else
+PVR_UNREFERENCED_PARAMETER(eAllocType);
+PVR_UNREFERENCED_PARAMETER(ui64Key);
+#endif
+} /* PVRSRVStatsRemoveMemAllocRecord */
+
+void
+PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes,
+ IMG_UINT64 uiCpuVAddr)
+{
+ IMG_BOOL bRes = IMG_FALSE;
+ _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = NULL;
+
+ if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL) )
+ {
+ return;
+ }
+
+ /* Alloc untracked memory for the new hash table entry */
+ psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)OSAllocMemNoStats(sizeof(*psNewTrackingHashEntry));
+ if (psNewTrackingHashEntry)
+ {
+ /* Fill-in the size of the allocation and PID of the allocating process */
+ psNewTrackingHashEntry->uiSizeInBytes = uiBytes;
+ psNewTrackingHashEntry->uiPid = OSGetCurrentClientProcessIDKM();
+ OSLockAcquire(gpsSizeTrackingHashTableLock);
+ /* Insert address of the new struct into the hash table */
+ bRes = HASH_Insert(gpsSizeTrackingHashTable, uiCpuVAddr, (uintptr_t)psNewTrackingHashEntry);
+ OSLockRelease(gpsSizeTrackingHashTableLock);
+ }
+
+ if (psNewTrackingHashEntry)
+ {
+ if (bRes)
+ {
+ PVRSRVStatsIncrMemAllocStat(eAllocType, uiBytes);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d HASH_Insert() failed!!", __FUNCTION__, __LINE__));
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d Failed to alloc memory for psNewTrackingHashEntry!!", __FUNCTION__, __LINE__));
+ }
+}
+
+void
+PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes)
+{
+ IMG_PID currentPid = OSGetCurrentClientProcessIDKM();
+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_PROCESS_STATS* psProcessStats = NULL;
+ IMG_BOOL bResurrectProcess = IMG_FALSE;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ _increase_global_stat(eAllocType, uiBytes);
+ OSLockAcquire(g_psLinkedListLock);
+ if (psPVRSRVData)
+ {
+ if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+ (currentCleanupPid != 0))
+ {
+ psProcessStats = _FindProcessStats(currentCleanupPid);
+ }
+ else
+ {
+ psProcessStats = _FindProcessStatsInLiveList(currentPid);
+ if (!psProcessStats)
+ {
+ psProcessStats = _FindProcessStatsInDeadList(currentPid);
+ bResurrectProcess = IMG_TRUE;
+ }
+ }
+ }
+ else
+ {
+ psProcessStats = _FindProcessStatsInLiveList(currentPid);
+ if (!psProcessStats)
+ {
+ psProcessStats = _FindProcessStatsInDeadList(currentPid);
+ bResurrectProcess = IMG_TRUE;
+ }
+ }
+
+ if (psProcessStats == NULL)
+ {
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ PVRSRV_ERROR eError;
+ IMG_CHAR acFolderName[30];
+ IMG_CHAR *pszProcName = OSGetCurrentProcessName();
+
+ strncpy(acFolderName, pszProcName, sizeof(acFolderName));
+ StripBadChars(acFolderName);
+
+ if (bProcessStatsInitialised)
+ {
+ psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+ if (psProcessStats == NULL)
+ {
+ return;
+ }
+
+ psProcessStats->eStructureType = PVRSRV_STAT_STRUCTURE_PROCESS;
+ psProcessStats->pid = currentPid;
+ psProcessStats->ui32RefCount = 1;
+ psProcessStats->ui32MemRefCount = 1;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = 1;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+ eError = OSLockCreateNoStats(&psProcessStats->hLock ,LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMemNoStats(psProcessStats);
+ return;
+ }
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ psProcessStats->psMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_MEMORY_STATS));
+ if (psProcessStats->psMemoryStats == NULL)
+ {
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSFreeMemNoStats(psProcessStats);
+ return;
+ }
+ psProcessStats->psMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_MEMORY;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ psProcessStats->psRIMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_RI_MEMORY_STATS));
+ if (psProcessStats->psRIMemoryStats == NULL)
+ {
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSFreeMemNoStats(psProcessStats);
+ return;
+ }
+ psProcessStats->psRIMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_RIMEMORY;
+ psProcessStats->psRIMemoryStats->pid = currentPid;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ psProcessStats->psCacheOpStats = OSAllocZMemNoStats(sizeof(PVRSRV_CACHEOP_STATS));
+ if (psProcessStats->psCacheOpStats == NULL)
+ {
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSFreeMemNoStats(psProcessStats);
+ return;
+ }
+ psProcessStats->psCacheOpStats->eStructureType = PVRSRV_STAT_STRUCTURE_CACHEOP;
+#endif
+
+ /* Add it to the live list... */
+ _AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+ /* Create the process stat in the OS... */
+ OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+ "%d_%s", currentPid, acFolderName);
+
+ _CreateOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+ }
+#else
+ OSLockRelease(g_psLinkedListLock);
+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+
+ }
+
+ if (psProcessStats != NULL)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ /*Release the list lock as soon as we acquire the process lock,
+ * this ensures if the process is in deadlist the entry cannot be deleted or modified */
+ OSLockRelease(g_psLinkedListLock);
+ /* Update the memory watermarks... */
+ switch (eAllocType)
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+#else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ break;
+#endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ default:
+ {
+ PVR_ASSERT(0);
+ }
+ break;
+ }
+ OSLockRelease(psProcessStats->hLock);
+
+ if (bResurrectProcess)
+ {
+ /* Move process from dead list to live list */
+ OSLockAcquire(g_psLinkedListLock);
+ _MoveProcessToLiveList(psProcessStats);
+ OSLockRelease(g_psLinkedListLock);
+ _MoveProcessToLiveListDebugFS(psProcessStats);
+ }
+ }
+}
+
+static void
+_DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ PVRSRV_PROCESS_STATS* psProcessStats,
+ IMG_UINT32 uiBytes)
+{
+ switch (eAllocType)
+ {
+ #if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+ #else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ break;
+ #endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ default:
+ {
+ PVR_ASSERT(0);
+ }
+ break;
+ }
+
+}
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void RawProcessStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC *pfnOSStatsPrintf)
+{
+ PVRSRV_PROCESS_STATS *psProcessStats;
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: pfnOSStatsPrintf not set", __func__));
+ return;
+ }
+
+ pfnOSStatsPrintf(pvFile, "%s,%s,%s,%s,%s,%s\n",
+ "PID",
+ "MemoryUsageKMalloc", // PVRSRV_PROCESS_STAT_TYPE_KMALLOC
+ "MemoryUsageAllocPTMemoryUMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA
+ "MemoryUsageAllocPTMemoryLMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA
+ "MemoryUsageAllocGPUMemUMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES
+ "MemoryUsageAllocGPUMemLMA" // PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES
+ );
+
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = g_psLiveList;
+
+ while (psProcessStats != NULL)
+ {
+ pfnOSStatsPrintf(pvFile, "%d,%d,%d,%d,%d,%d\n",
+ psProcessStats->pid,
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC],
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA],
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA],
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES],
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES]
+ );
+
+ psProcessStats = psProcessStats->psNext;
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* RawProcessStatsPrintElements */
+#endif
+
+void
+PVRSRVStatsDecrMemKAllocStat(size_t uiBytes,
+ IMG_PID decrPID)
+{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, uiBytes);
+
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(decrPID);
+
+ if (psProcessStats != NULL)
+ {
+ /* Decrement the kmalloc memory stat... */
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes);
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+#endif
+}
+
+static void
+_StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry,
+ PVRSRV_MEM_ALLOC_TYPE eAllocType)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ _decrease_global_stat(eAllocType, psTrackingHashEntry->uiSizeInBytes);
+
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(psTrackingHashEntry->uiPid);
+
+ if (psProcessStats != NULL)
+ {
+ /* Decrement the memory stat... */
+ _DecreaseProcStatValue(eAllocType,
+ psProcessStats,
+ psTrackingHashEntry->uiSizeInBytes);
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+}
+
+void
+PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ IMG_UINT64 uiCpuVAddr)
+{
+ _PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry = NULL;
+
+ if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL) )
+ {
+ return;
+ }
+
+ OSLockAcquire(gpsSizeTrackingHashTableLock);
+ psTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)HASH_Remove(gpsSizeTrackingHashTable, uiCpuVAddr);
+ OSLockRelease(gpsSizeTrackingHashTableLock);
+ if (psTrackingHashEntry)
+ {
+ _StatsDecrMemTrackedStat(psTrackingHashEntry, eAllocType);
+ OSFreeMemNoStats(psTrackingHashEntry);
+ }
+}
+
+void
+PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes)
+{
+ IMG_PID currentPid = OSGetCurrentClientProcessIDKM();
+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_PROCESS_STATS* psProcessStats = NULL;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ _decrease_global_stat(eAllocType, uiBytes);
+
+ OSLockAcquire(g_psLinkedListLock);
+ if (psPVRSRVData)
+ {
+ if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+ (currentCleanupPid != 0))
+ {
+ psProcessStats = _FindProcessStats(currentCleanupPid);
+ }
+ else
+ {
+ psProcessStats = _FindProcessStats(currentPid);
+ }
+ }
+ else
+ {
+ psProcessStats = _FindProcessStats(currentPid);
+ }
+
+
+ if (psProcessStats != NULL)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ /*Release the list lock as soon as we acquire the process lock,
+ * this ensures if the process is in deadlist the entry cannot be deleted or modified */
+ OSLockRelease(g_psLinkedListLock);
+ /* Update the memory watermarks... */
+ _DecreaseProcStatValue(eAllocType,
+ psProcessStats,
+ uiBytes);
+ OSLockRelease(psProcessStats->hLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ /* If all stats are now zero, remove the entry for this thread */
+ if (psProcessStats->ui32StatAllocFlags == 0)
+ {
+ OSLockAcquire(g_psLinkedListLock);
+ _MoveProcessToDeadList(psProcessStats);
+ OSLockRelease(g_psLinkedListLock);
+ _MoveProcessToDeadListDebugFS(psProcessStats);
+
+ /* Check if the dead list needs to be reduced */
+ _CompressMemoryUsage();
+ }
+#endif
+ }else{
+ OSLockRelease(g_psLinkedListLock);
+ }
+}
+
+/* For now we do not want to expose the global stats API
+ * so we wrap it into this specific function for pooled pages.
+ * As soon as we need to modify the global stats directly somewhere else
+ * we want to replace these functions with more general ones.
+ */
+void
+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes)
+{
+ _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+void
+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes)
+{
+ _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+void
+PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+ IMG_UINT32 ui32TotalNumOutOfMemory,
+ IMG_UINT32 ui32NumTAStores,
+ IMG_UINT32 ui32Num3DStores,
+ IMG_UINT32 ui32NumSHStores,
+ IMG_UINT32 ui32NumCDMStores,
+ IMG_PID pidOwner)
+{
+ IMG_PID pidCurrent = pidOwner;
+
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ /* Lock while we find the correct process and update the record... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(pidCurrent);
+ if (psProcessStats != NULL)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_PRS] += ui32TotalNumPartialRenders;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_OOMS] += ui32TotalNumOutOfMemory;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES] += ui32NumSHStores;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores;
+ OSLockRelease(psProcessStats->hLock);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateRenderContextStats: Null process. Pid=%d", pidCurrent));
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateRenderContextStats */
+
+void
+PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+ IMG_UINT32 ui32NumReqByFW,
+ IMG_PID owner)
+{
+ IMG_PID currentPid = (owner==0)?OSGetCurrentClientProcessIDKM():owner;
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ /* Lock while we find the correct process and update the record... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(currentPid);
+ if (psProcessStats != NULL)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW] += ui32NumReqByFW;
+ OSLockRelease(psProcessStats->hLock);
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateZSBufferStats */
+
+void
+PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+ IMG_UINT32 ui32NumGrowReqByFW,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32NumHighPages,
+ IMG_PID ownerPid)
+{
+ IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM();
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ /* Lock while we find the correct process and update the record... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(currentPid);
+
+ if (psProcessStats != NULL)
+ {
+ /* Avoid signed / unsigned mismatch which is flagged by some compilers */
+ IMG_INT32 a, b;
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW] += ui32NumGrowReqByFW;
+
+ a=psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT];
+ b=(IMG_INT32)(ui32InitFLPages);
+ UPDATE_MAX_VALUE(a, b);
+
+
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT]=a;
+ ui32InitFLPages=(IMG_UINT32)b;
+
+ a=psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES];
+ b=(IMG_INT32)ui32NumHighPages;
+
+ UPDATE_MAX_VALUE(a, b);
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT]=a;
+ ui32InitFLPages=(IMG_UINT32)b;
+ OSLockRelease(psProcessStats->hLock);
+
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateFreelistStats */
+
+/*************************************************************************/ /*!
+@Function ProcessStatsPrintElements
+@Description Prints all elements for this process statistic record.
+@Input pvStatPtr Pointer to statistics structure.
+@Input pfnOSStatsPrintf Printf function to use for output.
+*/ /**************************************************************************/
+void
+ProcessStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE* peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+ IMG_UINT32 ui32StatNumber = 0;
+
+ if (peStructureType == NULL || *peStructureType != PVRSRV_STAT_STRUCTURE_PROCESS)
+ {
+ PVR_ASSERT(peStructureType != NULL && *peStructureType == PVRSRV_STAT_STRUCTURE_PROCESS);
+ return;
+ }
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ return;
+ }
+
+ /* Loop through all the values and print them... */
+ while (ui32StatNumber < PVRSRV_PROCESS_STAT_TYPE_COUNT)
+ {
+ if (psProcessStats->ui32MemRefCount > 0)
+ {
+ pfnOSStatsPrintf(pvFile, pszProcessStatFmt[ui32StatNumber], psProcessStats->i32StatValue[ui32StatNumber]);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Called with psProcessStats->ui32MemRefCount=%d", __FUNCTION__, psProcessStats->ui32MemRefCount));
+ }
+ ui32StatNumber++;
+ }
+} /* ProcessStatsPrintElements */
+
+#if defined(DEBUG)
+/* Divide a number by 10 using shifts only */
+static INLINE IMG_UINT64 DivBy10(IMG_UINT64 uiNum)
+{
+ IMG_UINT64 uiQuot;
+ IMG_UINT64 uiRem;
+
+ uiQuot = (uiNum >> 1) + (uiNum >> 2);
+ uiQuot = uiQuot + (uiQuot >> 4);
+ uiQuot = uiQuot + (uiQuot >> 8);
+ uiQuot = uiQuot + (uiQuot >> 16);
+ uiQuot = uiQuot >> 3;
+ uiRem = uiNum - (((uiQuot << 2) + uiQuot) << 1);
+
+ return uiQuot + (uiRem > 9);
+}
+
+void
+PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp,
+ IMG_UINT32 ui32OpSeqNum,
+#if defined(PVR_RI_DEBUG)
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 eFenceOpType,
+#endif
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT64 ui64ExecuteTime,
+ IMG_BOOL bRangeBasedFlush,
+ IMG_BOOL bUserModeFlush,
+ IMG_BOOL bHasTimeline,
+ IMG_BOOL bIsFence,
+ IMG_PID ownerPid)
+{
+ IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM();
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ /* Lock while we find the correct process and update the record... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(currentPid);
+
+ if (psProcessStats != NULL)
+ {
+ IMG_INT32 Idx;
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ /* Look-up next buffer write index */
+ Idx = psProcessStats->uiCacheOpWriteIndex;
+ psProcessStats->uiCacheOpWriteIndex = INCREMENT_CACHEOP_STAT_IDX_WRAP(Idx);
+
+ /* Store all CacheOp meta-data */
+ psProcessStats->asCacheOp[Idx].uiCacheOp = uiCacheOp;
+#if defined(PVR_RI_DEBUG)
+ psProcessStats->asCacheOp[Idx].sDevVAddr = sDevVAddr;
+ psProcessStats->asCacheOp[Idx].eFenceOpType = eFenceOpType;
+#endif
+ psProcessStats->asCacheOp[Idx].uiOffset = uiOffset;
+ psProcessStats->asCacheOp[Idx].uiSize = uiSize;
+ psProcessStats->asCacheOp[Idx].bRangeBasedFlush = bRangeBasedFlush;
+ psProcessStats->asCacheOp[Idx].bUserModeFlush = bUserModeFlush;
+ psProcessStats->asCacheOp[Idx].ui64ExecuteTime = ui64ExecuteTime;
+ psProcessStats->asCacheOp[Idx].ui32OpSeqNum = ui32OpSeqNum;
+ psProcessStats->asCacheOp[Idx].bHasTimeline = bHasTimeline;
+ psProcessStats->asCacheOp[Idx].bIsFence = bIsFence;
+
+ OSLockRelease(psProcessStats->hLock);
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateCacheOpStats */
+
+/*************************************************************************/ /*!
+@Function CacheOpStatsPrintElements
+@Description Prints all elements for this process statistic CacheOp record.
+@Input pvStatPtr Pointer to statistics structure.
+@Input pfnOSStatsPrintf Printf function to use for output.
+*/ /**************************************************************************/
+void
+CacheOpStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE* peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+ IMG_CHAR *pszCacheOpType, *pszFlushType, *pszFlushMode;
+ IMG_INT32 i32WriteIdx, i32ReadIdx;
+
+#if defined(PVR_RI_DEBUG)
+ #define CACHEOP_RI_PRINTF_HEADER \
+ "%-10s %-10s %-5s %-16s %-10s %-10s %-12s %-12s\n"
+ #define CACHEOP_RI_PRINTF_FENCE \
+ "%-10s %-10s %-5s %-16s %-10s %-10s %-12llu 0x%-10x\n"
+ #define CACHEOP_RI_PRINTF \
+ "%-10s %-10s %-5s 0x%-14llx 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n"
+#else
+ #define CACHEOP_PRINTF_HEADER \
+ "%-10s %-10s %-5s %-10s %-10s %-12s %-12s\n"
+ #define CACHEOP_PRINTF_FENCE \
+ "%-10s %-10s %-5s %-10s %-10s %-12llu 0x%-10x\n"
+ #define CACHEOP_PRINTF \
+ "%-10s %-10s %-5s 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n"
+#endif
+
+ if (peStructureType == NULL ||
+ *peStructureType != PVRSRV_STAT_STRUCTURE_PROCESS ||
+ psProcessStats->psCacheOpStats->eStructureType != PVRSRV_STAT_STRUCTURE_CACHEOP)
+ {
+ PVR_ASSERT(peStructureType != NULL);
+ PVR_ASSERT(*peStructureType == PVRSRV_STAT_STRUCTURE_PROCESS);
+ PVR_ASSERT(psProcessStats->psCacheOpStats->eStructureType == PVRSRV_STAT_STRUCTURE_CACHEOP);
+ return;
+ }
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ return;
+ }
+
+ /* File header info */
+ pfnOSStatsPrintf(pvFile,
+#if defined(PVR_RI_DEBUG)
+ CACHEOP_RI_PRINTF_HEADER,
+#else
+ CACHEOP_PRINTF_HEADER,
+#endif
+ "CacheOp",
+ "Type",
+ "Mode",
+#if defined(PVR_RI_DEBUG)
+ "DevVAddr",
+#endif
+ "Offset",
+ "Size",
+ "Time (us)",
+ "SeqNo");
+
+ /* Take a snapshot of write index, read backwards in buffer
+ and wrap round at boundary */
+ i32WriteIdx = psProcessStats->uiCacheOpWriteIndex;
+ for (i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32WriteIdx);
+ i32ReadIdx != i32WriteIdx;
+ i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32ReadIdx))
+ {
+ IMG_UINT64 ui64ExecuteTime;
+
+ if (! psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum)
+ {
+ break;
+ }
+
+ /* Convert nano-seconds to micro-seconds */
+ ui64ExecuteTime = psProcessStats->asCacheOp[i32ReadIdx].ui64ExecuteTime;
+ ui64ExecuteTime = DivBy10(DivBy10(DivBy10(ui64ExecuteTime)));
+
+ if (psProcessStats->asCacheOp[i32ReadIdx].bIsFence)
+ {
+ IMG_CHAR *pszFenceType = "";
+ pszCacheOpType = "Fence";
+
+#if defined(PVR_RI_DEBUG)
+ switch (psProcessStats->asCacheOp[i32ReadIdx].eFenceOpType)
+ {
+ case RGXFWIF_DM_GP:
+ pszFenceType = "GP";
+ break;
+
+ case RGXFWIF_DM_TDM:
+ /* Also case RGXFWIF_DM_2D: */
+ pszFenceType = "TDM/2D";
+ break;
+
+ case RGXFWIF_DM_TA:
+ pszFenceType = "TA";
+ break;
+
+ case RGXFWIF_DM_3D:
+ pszFenceType = "3D";
+ break;
+
+ case RGXFWIF_DM_CDM:
+ pszFenceType = "CDM";
+ break;
+
+ case RGXFWIF_DM_RTU:
+ pszFenceType = "RTU";
+ break;
+
+ case RGXFWIF_DM_SHG:
+ pszFenceType = "SHG";
+ break;
+
+ default:
+ PVR_ASSERT(0);
+ break;
+ }
+#endif
+
+ pfnOSStatsPrintf(pvFile,
+#if defined(PVR_RI_DEBUG)
+ CACHEOP_RI_PRINTF_FENCE,
+#else
+ CACHEOP_PRINTF_FENCE,
+#endif
+ pszCacheOpType,
+ pszFenceType,
+ "",
+#if defined(PVR_RI_DEBUG)
+ "",
+#endif
+ "",
+ "",
+ ui64ExecuteTime,
+ psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum);
+ }
+ else if (psProcessStats->asCacheOp[i32ReadIdx].bHasTimeline)
+ {
+ pfnOSStatsPrintf(pvFile,
+#if defined(PVR_RI_DEBUG)
+ CACHEOP_RI_PRINTF_FENCE,
+#else
+ CACHEOP_PRINTF_FENCE,
+#endif
+ "Timeline",
+ "",
+ "",
+#if defined(PVR_RI_DEBUG)
+ "",
+#endif
+ "",
+ "",
+ ui64ExecuteTime,
+ psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum);
+ }
+ else
+ {
+ if (psProcessStats->asCacheOp[i32ReadIdx].bRangeBasedFlush)
+ {
+ IMG_DEVMEM_SIZE_T ui64NumOfPages;
+
+ ui64NumOfPages = psProcessStats->asCacheOp[i32ReadIdx].uiSize >> OSGetPageShift();
+ if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ pszFlushType = "RBF.Fast";
+ }
+ else
+ {
+ pszFlushType = "RBF.Slow";
+ }
+ }
+ else
+ {
+ pszFlushType = "GF";
+ }
+
+ if (psProcessStats->asCacheOp[i32ReadIdx].bUserModeFlush)
+ {
+ pszFlushMode = "UM";
+ }
+ else
+ {
+ pszFlushMode = "KM";
+ }
+
+ switch (psProcessStats->asCacheOp[i32ReadIdx].uiCacheOp)
+ {
+ case PVRSRV_CACHE_OP_NONE:
+ pszCacheOpType = "None";
+ break;
+ case PVRSRV_CACHE_OP_CLEAN:
+ pszCacheOpType = "Clean";
+ break;
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ pszCacheOpType = "Invalidate";
+ break;
+ case PVRSRV_CACHE_OP_FLUSH:
+ pszCacheOpType = "Flush";
+ break;
+ default:
+ pszCacheOpType = "Unknown";
+ break;
+ }
+
+ pfnOSStatsPrintf(pvFile,
+#if defined(PVR_RI_DEBUG)
+ CACHEOP_RI_PRINTF,
+#else
+ CACHEOP_PRINTF,
+#endif
+ pszCacheOpType,
+ pszFlushType,
+ pszFlushMode,
+#if defined(PVR_RI_DEBUG)
+ psProcessStats->asCacheOp[i32ReadIdx].sDevVAddr.uiAddr,
+#endif
+ psProcessStats->asCacheOp[i32ReadIdx].uiOffset,
+ psProcessStats->asCacheOp[i32ReadIdx].uiSize,
+ ui64ExecuteTime,
+ psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum);
+ }
+ }
+} /* CacheOpStatsPrintElements */
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+/*************************************************************************/ /*!
+@Function MemStatsPrintElements
+@Description Prints all elements for the memory statistic record.
+@Input pvStatPtr Pointer to statistics structure.
+@Input pfnOSStatsPrintf Printf function to use for output.
+*/ /**************************************************************************/
+void
+MemStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE* peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_MEMORY_STATS* psMemoryStats = (PVRSRV_MEMORY_STATS*) pvStatPtr;
+ IMG_UINT32 ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32);
+ IMG_UINT32 ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32);
+ PVRSRV_MEM_ALLOC_REC *psRecord;
+ IMG_UINT32 ui32ItemNumber;
+
+ if (peStructureType == NULL || *peStructureType != PVRSRV_STAT_STRUCTURE_MEMORY)
+ {
+ PVR_ASSERT(peStructureType != NULL && *peStructureType == PVRSRV_STAT_STRUCTURE_MEMORY);
+ return;
+ }
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ return;
+ }
+
+ /* Write the header... */
+ pfnOSStatsPrintf(pvFile, "Type VAddress");
+ for (ui32ItemNumber = 1; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++)
+ {
+ pfnOSStatsPrintf(pvFile, " ");
+ }
+
+ pfnOSStatsPrintf(pvFile, " PAddress");
+ for (ui32ItemNumber = 1; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++)
+ {
+ pfnOSStatsPrintf(pvFile, " ");
+ }
+
+ pfnOSStatsPrintf(pvFile, " Size(bytes)\n");
+
+ /* The lock has to be held whilst moving through the memory list... */
+ OSLockAcquire(g_psLinkedListLock);
+ psRecord = psMemoryStats->psMemoryRecords;
+
+ while (psRecord != NULL)
+ {
+ IMG_BOOL bPrintStat = IMG_TRUE;
+
+ switch (psRecord->eAllocType)
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: pfnOSStatsPrintf(pvFile, "KMALLOC "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: pfnOSStatsPrintf(pvFile, "VMALLOC "); break;
+#else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ bPrintStat = IMG_FALSE; break;
+#endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: pfnOSStatsPrintf(pvFile, "ALLOC_PAGES_PT_LMA "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: pfnOSStatsPrintf(pvFile, "ALLOC_PAGES_PT_UMA "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: pfnOSStatsPrintf(pvFile, "IOREMAP_PT_LMA "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: pfnOSStatsPrintf(pvFile, "VMAP_PT_UMA "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: pfnOSStatsPrintf(pvFile, "ALLOC_LMA_PAGES "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: pfnOSStatsPrintf(pvFile, "ALLOC_UMA_PAGES "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: pfnOSStatsPrintf(pvFile, "MAP_UMA_LMA_PAGES "); break;
+ default: pfnOSStatsPrintf(pvFile, "INVALID "); break;
+ }
+
+ if (bPrintStat)
+ {
+ for (ui32ItemNumber = 0; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++)
+ {
+ pfnOSStatsPrintf(pvFile, "%08x", *(((IMG_UINT32*) &psRecord->pvCpuVAddr) + ui32VAddrFields - ui32ItemNumber - 1));
+ }
+ pfnOSStatsPrintf(pvFile, " ");
+
+ for (ui32ItemNumber = 0; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++)
+ {
+ pfnOSStatsPrintf(pvFile, "%08x", *(((IMG_UINT32*) &psRecord->sCpuPAddr.uiAddr) + ui32PAddrFields - ui32ItemNumber - 1));
+ }
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+ pfnOSStatsPrintf(pvFile, " %u", psRecord->uiBytes);
+
+ pfnOSStatsPrintf(pvFile, " %s", (IMG_CHAR*)psRecord->pvAllocdFromFile);
+
+ pfnOSStatsPrintf(pvFile, " %d\n", psRecord->ui32AllocdFromLine);
+#else
+ pfnOSStatsPrintf(pvFile, " %u\n", psRecord->uiBytes);
+#endif
+ }
+ /* Move to next record... */
+ psRecord = psRecord->psNext;
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* MemStatsPrintElements */
+#endif
+
+#if defined(PVR_RI_DEBUG)
+/*************************************************************************/ /*!
+@Function RIMemStatsPrintElements
+@Description Prints all elements for the RI Memory record.
+@Input pvStatPtr Pointer to statistics structure.
+@Input pfnOSStatsPrintf Printf function to use for output.
+*/ /**************************************************************************/
+void
+RIMemStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE *peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_RI_MEMORY_STATS *psRIMemoryStats = (PVRSRV_RI_MEMORY_STATS*) pvStatPtr;
+ IMG_CHAR *pszStatFmtText = NULL;
+ IMG_HANDLE *pRIHandle = NULL;
+
+ if (peStructureType == NULL || *peStructureType != PVRSRV_STAT_STRUCTURE_RIMEMORY)
+ {
+ PVR_ASSERT(peStructureType != NULL && *peStructureType == PVRSRV_STAT_STRUCTURE_RIMEMORY);
+ return;
+ }
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ return;
+ }
+
+ /*
+ * Loop through the RI system to get each line of text.
+ */
+ while (RIGetListEntryKM(psRIMemoryStats->pid,
+ &pRIHandle,
+ &pszStatFmtText))
+ {
+ pfnOSStatsPrintf(pvFile, "%s", pszStatFmtText);
+ }
+} /* RIMemStatsPrintElements */
+#endif
+
+static IMG_UINT32 ui32FirmwareStartTimestamp=0;
+static IMG_UINT64 ui64FirmwareIdleDuration=0;
+
+void SetFirmwareStartTime(IMG_UINT32 ui32Time)
+{
+ ui32FirmwareStartTimestamp = UPDATE_TIME(ui32FirmwareStartTimestamp, ui32Time);
+}
+
+void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration)
+{
+ ui64FirmwareIdleDuration = UPDATE_TIME(ui64FirmwareIdleDuration, ui64Duration);
+}
+
+static INLINE void PowerStatsPrintGroup(IMG_UINT32 *pui32Stats,
+ void *pvFile,
+ OS_STATS_PRINTF_FUNC *pfnPrintf,
+ PVRSRV_POWER_STAT_TYPE eForced,
+ PVRSRV_POWER_STAT_TYPE ePowerOn)
+{
+ IMG_UINT32 ui32Index;
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, DEVICE);
+ pfnPrintf(pvFile, " Pre-Device: %9u\n", pui32Stats[ui32Index]);
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, SYSTEM);
+ pfnPrintf(pvFile, " Pre-System: %9u\n", pui32Stats[ui32Index]);
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, SYSTEM);
+ pfnPrintf(pvFile, " Post-System: %9u\n", pui32Stats[ui32Index]);
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, DEVICE);
+ pfnPrintf(pvFile, " Post-Device: %9u\n", pui32Stats[ui32Index]);
+}
+
+void PowerStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ IMG_UINT32 *pui32Stats = &aui32PowerTimingStats[0];
+ IMG_UINT32 ui32Idx;
+
+ PVR_UNREFERENCED_PARAMETER(pvStatPtr);
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ return;
+ }
+
+ pfnOSStatsPrintf(pvFile, "Forced Power-on Transition (nanoseconds):\n");
+ PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, FORCED, POWER_ON);
+ pfnOSStatsPrintf(pvFile, "\n");
+
+ pfnOSStatsPrintf(pvFile, "Forced Power-off Transition (nanoseconds):\n");
+ PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, FORCED, POWER_OFF);
+ pfnOSStatsPrintf(pvFile, "\n");
+
+ pfnOSStatsPrintf(pvFile, "Not Forced Power-on Transition (nanoseconds):\n");
+ PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, NOT_FORCED, POWER_ON);
+ pfnOSStatsPrintf(pvFile, "\n");
+
+ pfnOSStatsPrintf(pvFile, "Not Forced Power-off Transition (nanoseconds):\n");
+ PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, NOT_FORCED, POWER_OFF);
+ pfnOSStatsPrintf(pvFile, "\n");
+
+
+ pfnOSStatsPrintf(pvFile, "FW bootup time (timer ticks): %u\n", ui32FirmwareStartTimestamp);
+ pfnOSStatsPrintf(pvFile, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(ui64FirmwareIdleDuration));
+ pfnOSStatsPrintf(pvFile, "\n");
+
+ pfnOSStatsPrintf(pvFile, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS);
+ pfnOSStatsPrintf(pvFile, "Prepare DVFS\tDVFS Change\tPost DVFS\n");
+
+ for (ui32Idx = ui32ClockSpeedIndexStart; ui32Idx !=ui32ClockSpeedIndexEnd; ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS)
+ {
+ pfnOSStatsPrintf(pvFile, "%12llu\t%11llu\t%9llu\n",asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration,
+ asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration,
+ asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration);
+ }
+
+
+} /* PowerStatsPrintElements */
+
+void GlobalStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf)
+{
+ PVR_UNREFERENCED_PARAMETER(pvStatPtr);
+
+ if (pfnOSGetStatsPrintf != NULL)
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageKMalloc %10d\n", gsGlobalStats.ui32MemoryUsageKMalloc);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageKMallocMax %10d\n", gsGlobalStats.ui32MemoryUsageKMallocMax);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMalloc %10d\n", gsGlobalStats.ui32MemoryUsageVMalloc);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMallocMax %10d\n", gsGlobalStats.ui32MemoryUsageVMallocMax);
+#endif
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryUMA %10d\n", gsGlobalStats.ui32MemoryUsageAllocPTMemoryUMA);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryUMAMax %10d\n", gsGlobalStats.ui32MemoryUsageAllocPTMemoryUMAMax);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMapPTUMA %10d\n", gsGlobalStats.ui32MemoryUsageVMapPTUMA);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMapPTUMAMax %10d\n", gsGlobalStats.ui32MemoryUsageVMapPTUMAMax);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryLMA %10d\n", gsGlobalStats.ui32MemoryUsageAllocPTMemoryLMA);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryLMAMax %10d\n", gsGlobalStats.ui32MemoryUsageAllocPTMemoryLMAMax);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageIORemapPTLMA %10d\n", gsGlobalStats.ui32MemoryUsageIORemapPTLMA);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageIORemapPTLMAMax %10d\n", gsGlobalStats.ui32MemoryUsageIORemapPTLMAMax);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemLMA %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemLMA);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemLMAMax %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemLMAMax);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMA %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemUMA);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMAMax %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemUMAMax);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMAPool %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemUMAPool);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMAPoolMax %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemUMAPoolMax);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageMappedGPUMemUMA/LMA %10d\n", gsGlobalStats.ui32MemoryUsageMappedGPUMemUMA_LMA);
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageMappedGPUMemUMA/LMAMax %10d\n", gsGlobalStats.ui32MemoryUsageMappedGPUMemUMA_LMAMax);
+ }
+}
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+static void StripBadChars( IMG_CHAR *psStr)
+{
+ IMG_INT cc;
+
+ /* Remove any '/' chars that may be in the ProcName (kernel thread could contain these) */
+ for (cc=0; cc<30; cc++)
+ {
+ if( *psStr == '/')
+ {
+ *psStr = '-';
+ }
+ psStr++;
+ }
+}
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Functions for creating and reading proc filesystem entries.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PROCESS_STATS_H__
+#define __PROCESS_STATS_H__
+
+#include <powervr/mem_types.h>
+
+#include "pvrsrv_error.h"
+#include "cache_ops.h"
+
+/*
+ * The publishing of Process Stats is controlled by the
+ * PVRSRV_ENABLE_PROCESS_STATS build option. The recording of all Memory
+ * allocations is controlled by the PVRSRV_ENABLE_MEMORY_STATS build option.
+ *
+ * Note: There will be a performance degradation with memory allocation
+ * recording enabled!
+ */
+
+
+/*
+ * Memory types which can be tracked...
+ */
+typedef enum {
+ PVRSRV_MEM_ALLOC_TYPE_KMALLOC, /* memory allocated by kmalloc() */
+ PVRSRV_MEM_ALLOC_TYPE_VMALLOC, /* memory allocated by vmalloc() */
+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, /* pages allocated from UMA to hold page table information */
+ PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, /* ALLOC_PAGES_PT_UMA mapped to kernel address space */
+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, /* pages allocated from LMA to hold page table information */
+ PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, /* ALLOC_PAGES_PT_LMA mapped to kernel address space */
+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, /* pages allocated from LMA */
+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, /* pages allocated from UMA */
+ PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, /* mapped UMA/LMA pages */
+ PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, /* pages in the page pool */
+
+ /* Must be the last enum...*/
+ PVRSRV_MEM_ALLOC_TYPE_COUNT
+} PVRSRV_MEM_ALLOC_TYPE;
+
+
+/*
+ * Functions for managing the processes recorded...
+ */
+PVRSRV_ERROR PVRSRVStatsInitialise(void);
+
+void PVRSRVStatsDestroy(void);
+
+PVRSRV_ERROR PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats);
+
+void PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats);
+
+#define MAX_POWER_STAT_ENTRIES 51
+
+/*
+ * Functions for recording the statistics...
+ */
+void PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ void *pvCpuVAddr,
+ IMG_CPU_PHYADDR sCpuPAddr,
+ size_t uiBytes,
+ void *pvPrivateData);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+void _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ void *pvCpuVAddr,
+ IMG_CPU_PHYADDR sCpuPAddr,
+ size_t uiBytes,
+ void *pvPrivateData,
+ void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+#endif
+void PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ IMG_UINT64 ui64Key);
+
+void PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes);
+/*
+ * Increases the memory stat for eAllocType. Tracks the allocation size value
+ * by inserting a value into a hash table with uiCpuVAddr as key.
+ * Pair with PVRSRVStatsDecrMemAllocStatAndUntrack().
+ */
+void PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes,
+ IMG_UINT64 uiCpuVAddr);
+
+void PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes);
+
+void PVRSRVStatsDecrMemKAllocStat(size_t uiBytes,
+ IMG_PID decrPID);
+
+/*
+ * Decrease the memory stat for eAllocType. Takes the allocation size value from the
+ * hash table with uiCpuVAddr as key. Pair with PVRSRVStatsIncrMemAllocStatAndTrack().
+ */
+void PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ IMG_UINT64 uiCpuVAddr);
+
+void
+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes);
+
+void
+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes);
+
+void PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+ IMG_UINT32 ui32TotalNumOutOfMemory,
+ IMG_UINT32 ui32TotalTAStores,
+ IMG_UINT32 ui32Total3DStores,
+ IMG_UINT32 ui32TotalSHStores,
+ IMG_UINT32 ui32TotalCDMStores,
+ IMG_PID owner);
+
+void PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+ IMG_UINT32 ui32NumReqByFW,
+ IMG_PID owner);
+
+void PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+ IMG_UINT32 ui32NumGrowReqByFW,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32NumHighPages,
+ IMG_PID ownerPid);
+
+void PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp,
+ IMG_UINT32 ui32OpSeqNum,
+#if defined(PVR_RI_DEBUG)
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 eFenceOpType,
+#endif
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT64 ui64ExecuteTimeMs,
+ IMG_BOOL bRangeBasedFlush,
+ IMG_BOOL bUserModeFlush,
+ IMG_BOOL bIsTimeline,
+ IMG_BOOL bIsFence,
+ IMG_PID ownerPid);
+
+/* Update pre/post power transition timing statistics */
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower);
+
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer);
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer);
+
+void SetFirmwareStartTime(IMG_UINT32 ui32TimeStamp);
+
+void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration);
+
+#endif /* __PROCESS_STATS_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title PVR Bridge Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the PVR Bridge code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_BRIDGE_H__
+#define __PVR_BRIDGE_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "pvrsrv_error.h"
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "common_dc_bridge.h"
+# if defined(SUPPORT_DCPLAT_BRIDGE)
+# include "common_dcplat_bridge.h"
+# endif
+#endif
+#include "common_mm_bridge.h"
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#include "common_mmplat_bridge.h"
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+#include "common_mmextmem_bridge.h"
+#endif
+#if !defined(EXCLUDE_CMM_BRIDGE)
+#include "common_cmm_bridge.h"
+#endif
+#if defined(LINUX)
+#include "common_dmabuf_bridge.h"
+#endif
+#if defined(PDUMP)
+#include "common_pdump_bridge.h"
+#include "common_pdumpctrl_bridge.h"
+#include "common_pdumpmm_bridge.h"
+#endif
+#include "common_cache_bridge.h"
+#include "common_srvcore_bridge.h"
+#include "common_sync_bridge.h"
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+#include "common_syncexport_bridge.h"
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "common_syncsexport_bridge.h"
+#endif
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "common_smm_bridge.h"
+#endif
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+#include "common_htbuffer_bridge.h"
+#endif
+#include "common_pvrtl_bridge.h"
+#if defined(PVR_RI_DEBUG)
+#include "common_ri_bridge.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+#include "common_validation_bridge.h"
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+#include "common_tutils_bridge.h"
+#endif
+
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+#include "common_devicememhistory_bridge.h"
+#endif
+
+#if defined(SUPPORT_SYNCTRACKING_BRIDGE)
+#include "common_synctracking_bridge.h"
+#endif
+
+/*
+ * Bridge Cmd Ids
+ */
+
+
+/* Note: The pattern
+ * #define PVRSRV_BRIDGE_FEATURE (PVRSRV_BRIDGE_PREVFEATURE + 1)
+ * #if defined(SUPPORT_FEATURE)
+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST + 1)
+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST (PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST + PVRSRV_BRIDGE_FEATURE_CMD_LAST)
+ * #else
+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST 0
+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST)
+ * #endif
+ * is used in the macro definitions below to make PVRSRV_BRIDGE_FEATURE_*
+ * take up no space in the dispatch table if SUPPORT_FEATURE is disabled.
+ *
+ * Note however that a bridge always defines PVRSRV_BRIDGE_FEATURE, even where
+ * the feature is not enabled (each bridge group retains its own ioctl number).
+ */
+
+#define PVRSRV_BRIDGE_FIRST 0UL
+
+/* 0: Default handler */
+#define PVRSRV_BRIDGE_DEFAULT 0UL
+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST 0UL
+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST)
+/* 1: CORE functions */
+#define PVRSRV_BRIDGE_SRVCORE 1UL
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST+1)
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST + PVRSRV_BRIDGE_SRVCORE_CMD_LAST)
+
+/* 2: SYNC functions */
+#define PVRSRV_BRIDGE_SYNC 2UL
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNC_CMD_LAST)
+
+/* 3: SYNCEXPORT functions */
+#define PVRSRV_BRIDGE_SYNCEXPORT 3UL
+#if defined(SUPPORT_INSECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCEXPORT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST)
+#endif
+
+/* 4: SYNCSEXPORT functions */
+#define PVRSRV_BRIDGE_SYNCSEXPORT 4UL
+#if defined(SUPPORT_SECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCSEXPORT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST)
+#endif
+
+/* 5: PDUMP CTRL layer functions*/
+#define PVRSRV_BRIDGE_PDUMPCTRL 5UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST)
+#endif
+
+/* 6: Memory Management functions */
+#define PVRSRV_BRIDGE_MM 6UL
+#define PVRSRV_BRIDGE_MM_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MM_DISPATCH_LAST (PVRSRV_BRIDGE_MM_DISPATCH_FIRST + PVRSRV_BRIDGE_MM_CMD_LAST)
+
+/* 7: Non-Linux Memory Management functions */
+#define PVRSRV_BRIDGE_MMPLAT 7UL
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_MM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_MMPLAT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_MM_DISPATCH_LAST)
+#endif
+
+/* 8: Context Memory Management functions */
+#define PVRSRV_BRIDGE_CMM 8UL
+#if !defined(EXCLUDE_CMM_BRIDGE)
+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST (PVRSRV_BRIDGE_CMM_DISPATCH_FIRST + PVRSRV_BRIDGE_CMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST)
+#endif
+
+/* 9: PDUMP Memory Management functions */
+#define PVRSRV_BRIDGE_PDUMPMM 9UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST)
+#endif
+
+/* 10: PDUMP functions */
+#define PVRSRV_BRIDGE_PDUMP 10UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMP_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST)
+#endif
+
+/* 11: DMABUF functions */
+#define PVRSRV_BRIDGE_DMABUF 11UL
+#if defined(LINUX)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST (PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST + PVRSRV_BRIDGE_DMABUF_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST)
+#endif
+
+/* 12: Display Class functions */
+#define PVRSRV_BRIDGE_DC 12UL
+#if defined(SUPPORT_DISPLAY_CLASS)
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST (PVRSRV_BRIDGE_DC_DISPATCH_FIRST + PVRSRV_BRIDGE_DC_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST)
+#endif
+
+/* 13: Cache interface functions */
+#define PVRSRV_BRIDGE_CACHE 13UL
+#define PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST (PVRSRV_BRIDGE_DC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CACHE_DISPATCH_LAST (PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST + PVRSRV_BRIDGE_CACHE_CMD_LAST)
+
+/* 14: Secure Memory Management functions*/
+#define PVRSRV_BRIDGE_SMM 14UL
+#if defined(SUPPORT_SECURE_EXPORT)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST (PVRSRV_BRIDGE_SMM_DISPATCH_FIRST + PVRSRV_BRIDGE_SMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST)
+#endif
+
+/* 15: Transport Layer interface functions */
+#define PVRSRV_BRIDGE_PVRTL 15UL
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST (PVRSRV_BRIDGE_SMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST (PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST + PVRSRV_BRIDGE_PVRTL_CMD_LAST)
+
+/* 16: Resource Information (RI) interface functions */
+#define PVRSRV_BRIDGE_RI 16UL
+#if defined(PVR_RI_DEBUG)
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST (PVRSRV_BRIDGE_RI_DISPATCH_FIRST + PVRSRV_BRIDGE_RI_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST)
+#endif
+
+/* 17: Validation interface functions */
+#define PVRSRV_BRIDGE_VALIDATION 17UL
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST (PVRSRV_BRIDGE_RI_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST + PVRSRV_BRIDGE_VALIDATION_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST (PVRSRV_BRIDGE_RI_DISPATCH_LAST)
+#endif
+
+/* 18: TUTILS interface functions */
+#define PVRSRV_BRIDGE_TUTILS 18UL
+#if defined(PVR_TESTING_UTILS)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST + PVRSRV_BRIDGE_TUTILS_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST)
+#endif
+
+/* 19: DevMem history interface functions */
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY 19UL
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST + PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST)
+#endif
+
+/* 20: Host Trace Buffer interface functions */
+#define PVRSRV_BRIDGE_HTBUFFER 20UL
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST + PVRSRV_BRIDGE_HTBUFFER_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST)
+#endif
+
+/* 21: Non-Linux Display functions */
+#define PVRSRV_BRIDGE_DCPLAT 21UL
+#if defined(SUPPORT_DISPLAY_CLASS) && defined (SUPPORT_DCPLAT_BRIDGE)
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_DCPLAT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST)
+#endif
+
+/* 22: Extmem functions */
+#define PVRSRV_BRIDGE_MMEXTMEM 22UL
+#if defined(SUPPORT_WRAP_EXTMEM)
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST + PVRSRV_BRIDGE_MMEXTMEM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST)
+#endif
+
+/* 23: Sync tracking functions */
+#define PVRSRV_BRIDGE_SYNCTRACKING 23UL
+#if defined(SUPPORT_SYNCTRACKING_BRIDGE)
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST)
+#endif
+
+/* NB PVRSRV_BRIDGE_LAST below must be the last bridge group defined above (PVRSRV_BRIDGE_FEATURE) */
+#define PVRSRV_BRIDGE_LAST (PVRSRV_BRIDGE_SYNCTRACKING)
+/* NB PVRSRV_BRIDGE_DISPATCH LAST below must be the last dispatch entry defined above (PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST) */
+#define PVRSRV_BRIDGE_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST)
+
+/* bit mask representing the enabled PVR bridges */
+
+static const IMG_UINT32 gui32PVRBridges =
+ (1U << (PVRSRV_BRIDGE_DEFAULT - PVRSRV_BRIDGE_FIRST))
+ | (1U << (PVRSRV_BRIDGE_SRVCORE - PVRSRV_BRIDGE_FIRST))
+ | (1U << (PVRSRV_BRIDGE_SYNC - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_INSECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+ | (1U << (PVRSRV_BRIDGE_SYNCEXPORT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_SECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+ | (1U << (PVRSRV_BRIDGE_SYNCSEXPORT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PDUMP)
+ | (1U << (PVRSRV_BRIDGE_PDUMPCTRL - PVRSRV_BRIDGE_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_MM - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+ | (1U << (PVRSRV_BRIDGE_MMPLAT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_CMM)
+ | (1U << (PVRSRV_BRIDGE_CMM - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PDUMP)
+ | (1U << (PVRSRV_BRIDGE_PDUMPMM - PVRSRV_BRIDGE_FIRST))
+ | (1U << (PVRSRV_BRIDGE_PDUMP - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(LINUX)
+ | (1U << (PVRSRV_BRIDGE_DMABUF - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS)
+ | (1U << (PVRSRV_BRIDGE_DC - PVRSRV_BRIDGE_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_CACHE - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_SECURE_EXPORT)
+ | (1U << (PVRSRV_BRIDGE_SMM - PVRSRV_BRIDGE_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_PVRTL - PVRSRV_BRIDGE_FIRST))
+#if defined(PVR_RI_DEBUG)
+ | (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_VALIDATION)
+ | (1U << (PVRSRV_BRIDGE_VALIDATION - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PVR_TESTING_UTILS)
+ | (1U << (PVRSRV_BRIDGE_TUTILS - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+ | (1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_HTBUFFER)
+ | (1U << (PVRSRV_BRIDGE_HTBUFFER - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS) && defined (SUPPORT_DCPLAT_BRIDGE)
+ | (1U << (PVRSRV_BRIDGE_DCPLAT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+ | (1U << (PVRSRV_BRIDGE_MMEXTMEM - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_SYNCTRACKING_BRIDGE)
+ | (1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST))
+#endif
+ ;
+
+/* bit field representing which PVR bridge groups may optionally not
+ * be present in the server
+ */
+#define PVR_BRIDGES_OPTIONAL \
+ ( \
+ (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) | \
+ (1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST)) | \
+ (1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST)) \
+ )
+
+/******************************************************************************
+ * Generic bridge structures
+ *****************************************************************************/
+
+
+/******************************************************************************
+ * bridge packaging structure
+ *****************************************************************************/
+typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
+{
+ IMG_UINT32 ui32BridgeID; /*!< ioctl bridge group */
+ IMG_UINT32 ui32FunctionID; /*!< ioctl function index */
+ IMG_UINT32 ui32Size; /*!< size of structure */
+ void *pvParamIn; /*!< input data buffer */
+ IMG_UINT32 ui32InBufferSize; /*!< size of input data buffer */
+ void *pvParamOut; /*!< output data buffer */
+ IMG_UINT32 ui32OutBufferSize; /*!< size of output data buffer */
+}PVRSRV_BRIDGE_PACKAGE;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_BRIDGE_H__ */
+
+/******************************************************************************
+ End of file (pvr_bridge.h)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title PVR Bridge Module (kernel side)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Receives calls from the user portion of services and
+ despatches them to functions in the kernel portion.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/mm_types.h>
+
+#include "img_defs.h"
+#include "pvr_bridge.h"
+#include "connection_server.h"
+#include "syscommon.h"
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "private_data.h"
+#include "linkage.h"
+#include "pmr.h"
+#include "rgx_bvnc_defs_km.h"
+
+#include <drm/drmP.h>
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+
+#include "env_connection.h"
+#include <linux/sched.h>
+
+/* RGX: */
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+#include "srvcore.h"
+#include "common_srvcore_bridge.h"
+
+#if defined(SUPPORT_DRM_EXT)
+#define CAST_BRIDGE_CMD_PTR_TO_PTR(ptr) (ptr)
+#else
+#define CAST_BRIDGE_CMD_PTR_TO_PTR(ptr) (void *)(uintptr_t)(ptr)
+#endif
+
+#if defined(MODULE_TEST)
+/************************************************************************/
+// additional includes for services testing
+/************************************************************************/
+#include "pvr_test_bridge.h"
+#include "kern_test.h"
+/************************************************************************/
+// end of additional includes
+/************************************************************************/
+#endif
+
+/* WARNING!
+ * The mmap code has its own mutex, to prevent a possible deadlock,
+ * when using gPVRSRVLock.
+ * The Linux kernel takes the mm->mmap_sem before calling the mmap
+ * entry points (PVRMMap, MMapVOpen, MMapVClose), but the ioctl
+ * entry point may take mm->mmap_sem during fault handling, or
+ * before calling get_user_pages. If gPVRSRVLock was used in the
+ * mmap entry points, a deadlock could result, due to the ioctl
+ * and mmap code taking the two locks in different orders.
+ * As a corollary to this, the mmap entry points must not call
+ * any driver code that relies on gPVRSRVLock is held.
+ */
+static DEFINE_MUTEX(g_sMMapMutex);
+
+#if defined(DEBUG_BRIDGE_KM)
+static PVR_DEBUGFS_ENTRY_DATA *gpsPVRDebugFSBridgeStatsEntry = NULL;
+static struct seq_operations gsBridgeStatsReadOps;
+#endif
+
+/* These will go when full bridge gen comes in */
+#if defined(PDUMP)
+PVRSRV_ERROR InitPDUMPCTRLBridge(void);
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(void);
+PVRSRV_ERROR InitPDUMPBridge(void);
+PVRSRV_ERROR DeinitPDUMPBridge(void);
+PVRSRV_ERROR InitRGXPDUMPBridge(void);
+PVRSRV_ERROR DeinitRGXPDUMPBridge(void);
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS)
+PVRSRV_ERROR InitDCBridge(void);
+PVRSRV_ERROR DeinitDCBridge(void);
+#endif
+PVRSRV_ERROR InitMMBridge(void);
+PVRSRV_ERROR DeinitMMBridge(void);
+#if !defined(EXCLUDE_CMM_BRIDGE)
+PVRSRV_ERROR InitCMMBridge(void);
+PVRSRV_ERROR DeinitCMMBridge(void);
+#endif
+PVRSRV_ERROR InitPDUMPMMBridge(void);
+PVRSRV_ERROR DeinitPDUMPMMBridge(void);
+PVRSRV_ERROR InitSRVCOREBridge(void);
+PVRSRV_ERROR DeinitSRVCOREBridge(void);
+PVRSRV_ERROR InitSYNCBridge(void);
+PVRSRV_ERROR DeinitSYNCBridge(void);
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR InitSYNCEXPORTBridge(void);
+PVRSRV_ERROR DeinitSYNCEXPORTBridge(void);
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR InitSYNCSEXPORTBridge(void);
+PVRSRV_ERROR DeinitSYNCSEXPORTBridge(void);
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined (SUPPORT_RGX)
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+PVRSRV_ERROR InitRGXINITBridge(void);
+PVRSRV_ERROR DeinitRGXINITBridge(void);
+#endif
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+PVRSRV_ERROR DeinitRGXTA3DBridge(void);
+PVRSRV_ERROR InitRGXTQBridge(void);
+PVRSRV_ERROR DeinitRGXTQBridge(void);
+PVRSRV_ERROR InitRGXTQ2Bridge(void);
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void);
+PVRSRV_ERROR InitRGXCMPBridge(void);
+PVRSRV_ERROR DeinitRGXCMPBridge(void);
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+PVRSRV_ERROR InitBREAKPOINTBridge(void);
+PVRSRV_ERROR DeinitBREAKPOINTBridge(void);
+#endif
+PVRSRV_ERROR InitDEBUGMISCBridge(void);
+PVRSRV_ERROR DeinitDEBUGMISCBridge(void);
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void);
+PVRSRV_ERROR InitRGXRAYBridge(void);
+PVRSRV_ERROR DeinitRGXRAYBridge(void);
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+PVRSRV_ERROR InitREGCONFIGBridge(void);
+PVRSRV_ERROR DeinitREGCONFIGBridge(void);
+#endif
+PVRSRV_ERROR InitTIMERQUERYBridge(void);
+PVRSRV_ERROR DeinitTIMERQUERYBridge(void);
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR InitRGXSIGNALSBridge(void);
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void);
+#endif /* SUPPORT_RGX */
+PVRSRV_ERROR InitCACHEBridge(void);
+PVRSRV_ERROR DeinitCACHEBridge(void);
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR InitSMMBridge(void);
+PVRSRV_ERROR DeinitSMMBridge(void);
+#endif
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+PVRSRV_ERROR InitHTBUFFERBridge(void);
+PVRSRV_ERROR DeinitHTBUFFERBridge(void);
+#endif
+PVRSRV_ERROR InitPVRTLBridge(void);
+PVRSRV_ERROR DeinitPVRTLBridge(void);
+#if defined(PVR_RI_DEBUG)
+PVRSRV_ERROR InitRIBridge(void);
+PVRSRV_ERROR DeinitRIBridge(void);
+#endif
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void);
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void);
+#endif
+PVRSRV_ERROR InitDMABUFBridge(void);
+PVRSRV_ERROR DeinitDMABUFBridge(void);
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+PVRSRV_ERROR InitVALIDATIONBridge(void);
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+PVRSRV_ERROR InitTUTILSBridge(void);
+PVRSRV_ERROR DeinitTUTILSBridge(void);
+#endif
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void);
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void);
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+PVRSRV_ERROR InitMMEXTMEMBridge(void);
+PVRSRV_ERROR DeinitMMEXTMEMBridge(void);
+#endif
+
+PVRSRV_ERROR
+DeviceDepBridgeInit(IMG_UINT64 ui64Features)
+{
+ PVRSRV_ERROR eError;
+
+ if(ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+ {
+ eError = InitRGXCMPBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+
+ if(ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)
+ {
+ eError = InitRGXSIGNALSBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ if(ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ eError = InitRGXRAYBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ if(ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+ {
+ eError = InitRGXTQ2Bridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DeviceDepBridgeDeInit(IMG_UINT64 ui64Features)
+{
+ PVRSRV_ERROR eError;
+
+ if(ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+ {
+ eError = DeinitRGXCMPBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+
+ if(ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)
+ {
+ eError = DeinitRGXSIGNALSBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ if(ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ eError = DeinitRGXRAYBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ if(ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+ {
+ eError = DeinitRGXTQ2Bridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+
+
+PVRSRV_ERROR
+LinuxBridgeInit(void)
+{
+ PVRSRV_ERROR eError;
+#if defined(DEBUG_BRIDGE_KM)
+ IMG_INT iResult;
+
+ iResult = PVRDebugFSCreateEntry("bridge_stats",
+ NULL,
+ &gsBridgeStatsReadOps,
+ NULL,
+ NULL,
+ NULL,
+ &g_BridgeDispatchTable[0],
+ &gpsPVRDebugFSBridgeStatsEntry);
+ if (iResult != 0)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+#endif
+
+ BridgeDispatchTableStartOffsetsInit();
+
+ eError = InitSRVCOREBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = InitSYNCBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+ eError = InitSYNCEXPORTBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+ eError = InitSYNCSEXPORTBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined(PDUMP)
+ eError = InitPDUMPCTRLBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = InitMMBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#if !defined(EXCLUDE_CMM_BRIDGE)
+ eError = InitCMMBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+#if defined(PDUMP)
+ eError = InitPDUMPMMBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ eError = InitPDUMPBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = InitDMABUFBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+ eError = InitDCBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = InitCACHEBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if defined(SUPPORT_SECURE_EXPORT)
+ eError = InitSMMBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+ eError = InitHTBUFFERBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = InitPVRTLBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ #if defined(PVR_RI_DEBUG)
+ eError = InitRIBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ #endif
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+ eError = InitVALIDATIONBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+ eError = InitTUTILSBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ eError = InitDEVICEMEMHISTORYBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ eError = InitSYNCTRACKINGBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ #if defined (SUPPORT_RGX)
+
+ eError = InitRGXTQBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ eError = InitRGXINITBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = InitRGXTA3DBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+ eError = InitBREAKPOINTBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = InitDEBUGMISCBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if defined(PDUMP)
+ eError = InitRGXPDUMPBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = InitRGXHWPERFBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+ eError = InitREGCONFIGBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = InitTIMERQUERYBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = InitRGXKICKSYNCBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#endif /* SUPPORT_RGX */
+
+#if defined(SUPPORT_WRAP_EXTMEM)
+ eError = InitMMEXTMEMBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR
+LinuxBridgeDeInit(void)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_WRAP_EXTMEM)
+ eError = DeinitMMEXTMEMBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+#if defined(DEBUG_BRIDGE_KM)
+ if (gpsPVRDebugFSBridgeStatsEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsPVRDebugFSBridgeStatsEntry);
+ }
+#endif
+
+ eError = DeinitSRVCOREBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = DeinitSYNCBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+ eError = DeinitSYNCEXPORTBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+ eError = DeinitSYNCSEXPORTBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined(PDUMP)
+ eError = DeinitPDUMPCTRLBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = DeinitMMBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#if !defined(EXCLUDE_CMM_BRIDGE)
+ eError = DeinitCMMBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+#if defined(PDUMP)
+ eError = DeinitPDUMPMMBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ eError = DeinitPDUMPBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = DeinitDMABUFBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if defined(PVR_TESTING_UTILS)
+ eError = DeinitTUTILSBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+ eError = DeinitDCBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = DeinitCACHEBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if defined(SUPPORT_SECURE_EXPORT)
+ eError = DeinitSMMBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+ eError = DeinitHTBUFFERBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = DeinitPVRTLBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ #if defined(PVR_RI_DEBUG)
+ eError = DeinitRIBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ #endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ eError = DeinitDEVICEMEMHISTORYBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ eError = DeinitSYNCTRACKINGBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ #if defined (SUPPORT_RGX)
+
+ eError = DeinitRGXTQBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ eError = DeinitRGXINITBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = DeinitRGXTA3DBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+ eError = DeinitBREAKPOINTBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = DeinitDEBUGMISCBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if defined(PDUMP)
+ eError = DeinitRGXPDUMPBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = DeinitRGXHWPERFBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+ eError = DeinitREGCONFIGBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ eError = DeinitTIMERQUERYBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = DeinitRGXKICKSYNCBridge();
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#endif /* SUPPORT_RGX */
+
+ return eError;
+}
+
+#if defined(DEBUG_BRIDGE_KM)
+static void *BridgeStatsSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)psSeqFile->private;
+
+ OSAcquireBridgeLock();
+
+ if (psDispatchTable == NULL || (*puiPosition) > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+ {
+ return NULL;
+ }
+
+ if ((*puiPosition) == 0)
+ {
+ return SEQ_START_TOKEN;
+ }
+
+ return &(psDispatchTable[(*puiPosition) - 1]);
+}
+
+static void BridgeStatsSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ OSReleaseBridgeLock();
+}
+
+static void *BridgeStatsSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)psSeqFile->private;
+ loff_t uiItemAskedFor = *puiPosition; /* puiPosition on entry is the index to return */
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ /* Is the item asked for (starts at 0) a valid table index? */
+ if (uiItemAskedFor < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+ {
+ (*puiPosition)++; /* on exit it is the next seq index to ask for */
+ return &(psDispatchTable[uiItemAskedFor]);
+ }
+
+ /* Now passed the end of the table to indicate stop */
+ return NULL;
+}
+
+static int BridgeStatsSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ if (pvData == SEQ_START_TOKEN)
+ {
+ seq_printf(psSeqFile,
+ "Total ioctl call count = %u\n"
+ "Total number of bytes copied via copy_from_user = %u\n"
+ "Total number of bytes copied via copy_to_user = %u\n"
+ "Total number of bytes copied via copy_*_user = %u\n\n"
+ "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s \n",
+ g_BridgeGlobalStats.ui32IOCTLCount,
+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+ "#",
+ "Bridge Name",
+ "Wrapper Function",
+ "Call Count",
+ "copy_from_user (B)",
+ "copy_to_user (B)",
+ "Total Time (us)",
+ "Max Time (us)");
+ }
+ else if (pvData != NULL)
+ {
+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = ( PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)pvData;
+ IMG_UINT32 ui32Remainder;
+
+ seq_printf(psSeqFile,
+ "%3d: %-60s %-48s %-10u %-20u %-20u %-20llu %-20llu\n",
+ (IMG_UINT32)(((size_t)psEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)),
+ psEntry->pszIOCName,
+ (psEntry->pfFunction != NULL) ? psEntry->pszFunctionName : "(null)",
+ psEntry->ui32CallCount,
+ psEntry->ui32CopyFromUserTotalBytes,
+ psEntry->ui32CopyToUserTotalBytes,
+ (unsigned long long) OSDivide64r64(psEntry->ui64TotalTimeNS, 1000, &ui32Remainder),
+ (unsigned long long) OSDivide64r64(psEntry->ui64MaxTimeNS, 1000, &ui32Remainder));
+ }
+
+ return 0;
+}
+
+static struct seq_operations gsBridgeStatsReadOps =
+{
+ .start = BridgeStatsSeqStart,
+ .stop = BridgeStatsSeqStop,
+ .next = BridgeStatsSeqNext,
+ .show = BridgeStatsSeqShow,
+};
+#endif /* defined(DEBUG_BRIDGE_KM) */
+
+int
+PVRSRV_BridgeDispatchKM(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *pDRMFile)
+{
+ struct drm_pvr_srvkm_cmd *psSrvkmCmd = (struct drm_pvr_srvkm_cmd *) arg;
+ PVRSRV_BRIDGE_PACKAGE sBridgePackageKM = { 0 };
+ CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pDRMFile->filp);
+ PVRSRV_ERROR error;
+
+ if(psConnection == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Connection is closed", __FUNCTION__));
+ return -EFAULT;
+ }
+
+ if(OSGetDriverSuspended())
+ {
+ return -EINTR;
+ }
+
+ PVR_ASSERT(psSrvkmCmd != NULL);
+
+ DRM_DEBUG("tgid=%d, tgid_connection=%d, bridge_id=%d, func_id=%d",
+ task_tgid_nr(current),
+ ((ENV_CONNECTION_DATA *)PVRSRVConnectionPrivateData(psConnection))->owner,
+ psSrvkmCmd->bridge_id,
+ psSrvkmCmd->bridge_func_id);
+
+ sBridgePackageKM.ui32BridgeID = psSrvkmCmd->bridge_id;
+ sBridgePackageKM.ui32FunctionID = psSrvkmCmd->bridge_func_id;
+ sBridgePackageKM.ui32Size = sizeof(sBridgePackageKM);
+ sBridgePackageKM.pvParamIn = CAST_BRIDGE_CMD_PTR_TO_PTR(psSrvkmCmd->in_data_ptr);
+ sBridgePackageKM.ui32InBufferSize = psSrvkmCmd->in_data_size;
+ sBridgePackageKM.pvParamOut = CAST_BRIDGE_CMD_PTR_TO_PTR(psSrvkmCmd->out_data_ptr);
+ sBridgePackageKM.ui32OutBufferSize = psSrvkmCmd->out_data_size;
+
+ error = BridgedDispatchKM(psConnection, &sBridgePackageKM);
+ return OSPVRSRVToNativeError(error);
+}
+
+int
+PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma)
+{
+ CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
+ IMG_HANDLE hSecurePMRHandle = (IMG_HANDLE)((uintptr_t)ps_vma->vm_pgoff);
+ PMR *psPMR;
+ PVRSRV_ERROR eError;
+
+ if(psConnection == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Invalid connection data"));
+ return -ENOENT;
+ }
+
+ /*
+ * The bridge lock used here to protect PVRSRVLookupHandle is replaced
+ * by a specific lock considering that the handle functions have now
+ * their own lock. This change was necessary to solve the lockdep issues
+ * related with the PVRSRV_MMap.
+ */
+ mutex_lock(&g_sMMapMutex);
+
+ eError = PVRSRVLookupHandle(psConnection->psHandleBase,
+ (void **)&psPMR,
+ hSecurePMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Note: PMRMMapPMR will take a reference on the PMR.
+ * Unref the handle immediately, because we have now done
+ * the required operation on the PMR (whether it succeeded or not)
+ */
+ eError = PMRMMapPMR(psPMR, ps_vma);
+ PVRSRVReleaseHandle(psConnection->psHandleBase, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ mutex_unlock(&g_sMMapMutex);
+
+ return 0;
+
+e1:
+ PMRUnrefPMR(psPMR);
+ goto em1;
+e0:
+ PVR_DPF((PVR_DBG_ERROR, "Error in mmap critical section"));
+em1:
+ mutex_unlock(&g_sMMapMutex);
+
+ PVR_DPF((PVR_DBG_ERROR, "Unable to translate error %d", eError));
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return -ENOENT; // -EAGAIN // or what?
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Debug Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides kernel side Debug Functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <stdarg.h>
+
+#include "allocmem.h"
+#include "pvrversion.h"
+#include "img_types.h"
+#include "servicesext.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "pvr_debugfs.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "rgxinit.h"
+#include "lists.h"
+#include "osfunc.h"
+
+/* Handle used by DebugFS to get GPU utilisation stats */
+static IMG_HANDLE ghGpuUtilUserDebugFS = NULL;
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/******** BUFFERED LOG MESSAGES ********/
+
+/* Because we don't want to have to handle CCB wrapping, each buffered
+ * message is rounded up to PVRSRV_DEBUG_CCB_MESG_MAX bytes. This means
+ * there is the same fixed number of messages that can be stored,
+ * regardless of message length.
+ */
+
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+
+#define PVRSRV_DEBUG_CCB_MESG_MAX PVR_MAX_DEBUG_MESSAGE_LEN
+
+#include <linux/syscalls.h>
+#include <linux/time.h>
+
+typedef struct
+{
+ const IMG_CHAR *pszFile;
+ IMG_INT iLine;
+ IMG_UINT32 ui32TID;
+ IMG_UINT32 ui32PID;
+ IMG_CHAR pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX];
+ struct timeval sTimeVal;
+}
+PVRSRV_DEBUG_CCB;
+
+static PVRSRV_DEBUG_CCB gsDebugCCB[PVRSRV_DEBUG_CCB_MAX] = { { 0 } };
+
+static IMG_UINT giOffset = 0;
+
+static DEFINE_MUTEX(gsDebugCCBMutex);
+
+static void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+ const IMG_CHAR *szBuffer)
+{
+ mutex_lock(&gsDebugCCBMutex);
+
+ gsDebugCCB[giOffset].pszFile = pszFileName;
+ gsDebugCCB[giOffset].iLine = ui32Line;
+ gsDebugCCB[giOffset].ui32TID = current->pid;
+ gsDebugCCB[giOffset].ui32PID = current->tgid;
+
+ do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal);
+
+ strncpy(gsDebugCCB[giOffset].pcMesg, szBuffer, PVRSRV_DEBUG_CCB_MESG_MAX - 1);
+ gsDebugCCB[giOffset].pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX - 1] = 0;
+
+ giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX;
+
+ mutex_unlock(&gsDebugCCBMutex);
+}
+
+IMG_EXPORT void PVRSRVDebugPrintfDumpCCB(void)
+{
+ int i;
+
+ mutex_lock(&gsDebugCCBMutex);
+
+ for (i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++)
+ {
+ PVRSRV_DEBUG_CCB *psDebugCCBEntry =
+ &gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX];
+
+ /* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */
+ if (!psDebugCCBEntry->pszFile)
+ {
+ continue;
+ }
+
+ printk(KERN_ERR "%s:%d: (%ld.%ld, tid=%u, pid=%u) %s\n",
+ psDebugCCBEntry->pszFile,
+ psDebugCCBEntry->iLine,
+ (long)psDebugCCBEntry->sTimeVal.tv_sec,
+ (long)psDebugCCBEntry->sTimeVal.tv_usec,
+ psDebugCCBEntry->ui32TID,
+ psDebugCCBEntry->ui32PID,
+ psDebugCCBEntry->pcMesg);
+
+ /* Clear this entry so it doesn't get printed the next time again. */
+ psDebugCCBEntry->pszFile = NULL;
+ }
+
+ mutex_unlock(&gsDebugCCBMutex);
+}
+
+#else /* defined(PVRSRV_DEBUG_CCB_MAX) */
+static INLINE void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+ const IMG_CHAR *szBuffer)
+{
+ (void)pszFileName;
+ (void)szBuffer;
+ (void)ui32Line;
+}
+
+IMG_EXPORT void PVRSRVDebugPrintfDumpCCB(void)
+{
+ /* Not available */
+}
+
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz,
+ const IMG_CHAR *pszFormat, va_list VArgs)
+ __printf(3, 0);
+
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+#define PVR_MAX_FILEPATH_LEN 256
+
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz,
+ const IMG_CHAR *pszFormat, ...)
+ __printf(3, 4);
+
+/* NOTE: Must NOT be static! Used in module.c.. */
+IMG_UINT32 gPVRDebugLevel =
+ (
+ DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING
+
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+ | DBGPRIV_BUFFERED
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+ | DBGPRIV_DEBUG
+#endif /* defined(PVR_DPF_ADHOC_DEBUG_ON) */
+ );
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) || defined(PVRSRV_NEED_PVR_TRACE) */
+
+#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
+
+/* Message buffer for non-IRQ messages */
+static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
+
+/* Message buffer for IRQ messages */
+static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
+
+/* The lock is used to control access to gszBufferNonIRQ */
+static DEFINE_MUTEX(gsDebugMutexNonIRQ);
+
+/* The lock is used to control access to gszBufferIRQ */
+static DEFINE_SPINLOCK(gsDebugLockIRQ);
+
+#define USE_SPIN_LOCK (in_interrupt() || !preemptible())
+
+static inline void GetBufferLock(unsigned long *pulLockFlags)
+{
+ if (USE_SPIN_LOCK)
+ {
+ spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
+ }
+ else
+ {
+ mutex_lock(&gsDebugMutexNonIRQ);
+ }
+}
+
+static inline void ReleaseBufferLock(unsigned long ulLockFlags)
+{
+ if (USE_SPIN_LOCK)
+ {
+ spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
+ }
+ else
+ {
+ mutex_unlock(&gsDebugMutexNonIRQ);
+ }
+}
+
+static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz)
+{
+ if (USE_SPIN_LOCK)
+ {
+ *ppszBuf = gszBufferIRQ;
+ *pui32BufSiz = sizeof(gszBufferIRQ);
+ }
+ else
+ {
+ *ppszBuf = gszBufferNonIRQ;
+ *pui32BufSiz = sizeof(gszBufferNonIRQ);
+ }
+}
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, pointed
+ * to by the var args list.
+ */
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, va_list VArgs)
+{
+ IMG_UINT32 ui32Used;
+ IMG_UINT32 ui32Space;
+ IMG_INT32 i32Len;
+
+ ui32Used = strlen(pszBuf);
+ BUG_ON(ui32Used >= ui32BufSiz);
+ ui32Space = ui32BufSiz - ui32Used;
+
+ i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
+ pszBuf[ui32BufSiz - 1] = 0;
+
+ /* Return true if string was truncated */
+ return i32Len < 0 || i32Len >= (IMG_INT32)ui32Space;
+}
+
+/*************************************************************************/ /*!
+@Function PVRSRVReleasePrintf
+@Description To output an important message to the user in release builds
+@Input pszFormat The message format string
+@Input ... Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...)
+{
+ va_list vaArgs;
+ unsigned long ulLockFlags = 0;
+ IMG_CHAR *pszBuf;
+ IMG_UINT32 ui32BufSiz;
+ IMG_INT32 result;
+
+ SelectBuffer(&pszBuf, &ui32BufSiz);
+
+ va_start(vaArgs, pszFormat);
+
+ GetBufferLock(&ulLockFlags);
+
+ result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR_K: %u: ", current->pid);
+ PVR_ASSERT(result>0);
+ ui32BufSiz -= result;
+
+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+ {
+ printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+ printk(KERN_ERR "%s\n", pszBuf);
+ }
+
+ ReleaseBufferLock(ulLockFlags);
+ va_end(vaArgs);
+}
+
+#if defined(PVRSRV_NEED_PVR_TRACE)
+
+/*************************************************************************/ /*!
+@Function PVRTrace
+@Description To output a debug message to the user
+@Input pszFormat The message format string
+@Input ... Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVTrace(const IMG_CHAR *pszFormat, ...)
+{
+ va_list VArgs;
+ unsigned long ulLockFlags = 0;
+ IMG_CHAR *pszBuf;
+ IMG_UINT32 ui32BufSiz;
+ IMG_INT32 result;
+
+ SelectBuffer(&pszBuf, &ui32BufSiz);
+
+ va_start(VArgs, pszFormat);
+
+ GetBufferLock(&ulLockFlags);
+
+ result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR: %u: ", current->pid);
+ PVR_ASSERT(result>0);
+ ui32BufSiz -= result;
+
+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
+ {
+ printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+ printk(KERN_ERR "%s\n", pszBuf);
+ }
+
+ ReleaseBufferLock(ulLockFlags);
+
+ va_end(VArgs);
+}
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, calling
+ * VBAppend to do the actual work.
+ */
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...)
+{
+ va_list VArgs;
+ IMG_BOOL bTrunc;
+
+ va_start (VArgs, pszFormat);
+
+ bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
+
+ va_end (VArgs);
+
+ return bTrunc;
+}
+
+/*************************************************************************/ /*!
+@Function PVRSRVDebugPrintf
+@Description To output a debug message to the user
+@Input uDebugLevel The current debug level
+@Input pszFile The source file generating the message
+@Input uLine The line of the source file
+@Input pszFormat The message format string
+@Input ... Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+ const IMG_CHAR *pszFullFileName,
+ IMG_UINT32 ui32Line,
+ const IMG_CHAR *pszFormat,
+ ...)
+{
+ IMG_BOOL bNoLoc;
+ const IMG_CHAR *pszFileName = pszFullFileName;
+ IMG_CHAR *pszLeafName;
+
+ bNoLoc = (IMG_BOOL)((ui32DebugLevel & DBGPRIV_CALLTRACE) |
+ (ui32DebugLevel & DBGPRIV_BUFFERED)) ? IMG_TRUE : IMG_FALSE;
+
+ if (gPVRDebugLevel & ui32DebugLevel)
+ {
+ va_list vaArgs;
+ unsigned long ulLockFlags = 0;
+ IMG_CHAR *pszBuf;
+ IMG_UINT32 ui32BufSiz;
+
+ SelectBuffer(&pszBuf, &ui32BufSiz);
+
+ va_start(vaArgs, pszFormat);
+
+ GetBufferLock(&ulLockFlags);
+
+ switch (ui32DebugLevel)
+ {
+ case DBGPRIV_FATAL:
+ {
+ strncpy(pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_ERROR:
+ {
+ strncpy(pszBuf, "PVR_K:(Error): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_WARNING:
+ {
+ strncpy(pszBuf, "PVR_K:(Warn): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_MESSAGE:
+ {
+ strncpy(pszBuf, "PVR_K:(Mesg): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_VERBOSE:
+ {
+ strncpy(pszBuf, "PVR_K:(Verb): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_DEBUG:
+ {
+ strncpy(pszBuf, "PVR_K:(Debug): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_CALLTRACE:
+ case DBGPRIV_ALLOC:
+ case DBGPRIV_BUFFERED:
+ default:
+ {
+ strncpy(pszBuf, "PVR_K: ", (ui32BufSiz - 2));
+ break;
+ }
+ }
+ pszBuf[ui32BufSiz - 1] = '\0';
+
+ if (current->pid == task_tgid_nr(current))
+ {
+ (void) BAppend(pszBuf, ui32BufSiz, "%5u: ", current->pid);
+ }
+ else
+ {
+ (void) BAppend(pszBuf, ui32BufSiz, "%5u-%5u: ", task_tgid_nr(current) /* pid id of group*/, current->pid /* task id */);
+ }
+
+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+ {
+ printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+ IMG_BOOL bTruncated = IMG_FALSE;
+
+#if !defined(__sh__)
+ pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '/');
+
+ if (pszLeafName)
+ {
+ pszFileName = pszLeafName+1;
+ }
+#endif /* __sh__ */
+
+#if defined(DEBUG)
+ {
+ static const IMG_CHAR *lastFile = NULL;
+
+ if (lastFile == pszFileName)
+ {
+ bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line);
+ }
+ else
+ {
+ bTruncated = BAppend(pszBuf, ui32BufSiz, " [%s:%u]", pszFileName, ui32Line);
+ lastFile = pszFileName;
+ }
+ }
+#endif
+
+ if (bTruncated)
+ {
+ printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+ if (ui32DebugLevel & DBGPRIV_BUFFERED)
+ {
+ AddToBufferCCB(pszFileName, ui32Line, pszBuf);
+ }
+ else
+ {
+ printk(KERN_ERR "%s\n", pszBuf);
+ }
+ }
+ }
+
+ ReleaseBufferLock(ulLockFlags);
+
+ va_end (vaArgs);
+ }
+}
+
+#endif /* PVRSRV_NEED_PVR_DPF */
+
+
+/*************************************************************************/ /*!
+ Version DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugVersionCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+ va_list va)
+{
+ loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+ loff_t uiPosition = va_arg(va, loff_t);
+ loff_t uiCurrentPosition = *puiCurrentPosition;
+
+ (*puiCurrentPosition)++;
+
+ return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugVersionSeqStart(struct seq_file *psSeqFile,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ if (*puiPosition == 0)
+ {
+ return SEQ_START_TOKEN;
+ }
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugVersionCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _DebugVersionSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugVersionSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ (*puiPosition)++;
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugVersionCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static int _DebugVersionSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ if (pvData == SEQ_START_TOKEN)
+ {
+ if(psPVRSRVData->sDriverInfo.bIsNoMatch)
+ {
+ seq_printf(psSeqFile, "Driver UM Version: %d (%s) %s\n",
+ psPVRSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision,
+ (psPVRSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug",
+ PVR_BUILD_DIR);
+ seq_printf(psSeqFile, "Driver KM Version: %d (%s) %s\n",
+ psPVRSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision,
+ (BUILD_TYPE_RELEASE == psPVRSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug",
+ PVR_BUILD_DIR);
+ }else
+ {
+ seq_printf(psSeqFile, "Driver Version: %s (%s) %s\n",
+ PVRVERSION_STRING,
+ PVR_BUILD_TYPE, PVR_BUILD_DIR);
+ }
+ }
+ else if (pvData != NULL)
+ {
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+ seq_printf(psSeqFile, "\nDevice Name: %s\n", psDevNode->psDevConfig->pszName);
+
+ if (psDevNode->psDevConfig->pszVersion)
+ {
+ seq_printf(psSeqFile, "Device Version: %s\n", psDevNode->psDevConfig->pszVersion);
+ }
+
+ if (psDevNode->pfnDeviceVersionString)
+ {
+ IMG_CHAR *pszDeviceVersionString;
+
+ if (psDevNode->pfnDeviceVersionString(psDevNode, &pszDeviceVersionString) == PVRSRV_OK)
+ {
+ seq_printf(psSeqFile, "%s\n", pszDeviceVersionString);
+
+ OSFreeMem(pszDeviceVersionString);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct seq_operations gsDebugVersionReadOps =
+{
+ .start = _DebugVersionSeqStart,
+ .stop = _DebugVersionSeqStop,
+ .next = _DebugVersionSeqNext,
+ .show = _DebugVersionSeqShow,
+};
+
+/*************************************************************************/ /*!
+ Status DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugStatusCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+ va_list va)
+{
+ loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+ loff_t uiPosition = va_arg(va, loff_t);
+ loff_t uiCurrentPosition = *puiCurrentPosition;
+
+ (*puiCurrentPosition)++;
+
+ return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugStatusSeqStart(struct seq_file *psSeqFile,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ if (*puiPosition == 0)
+ {
+ return SEQ_START_TOKEN;
+ }
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugStatusCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _DebugStatusSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugStatusSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ (*puiPosition)++;
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugStatusCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static int _DebugStatusSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ if (pvData == SEQ_START_TOKEN)
+ {
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+
+ if (psPVRSRVData != NULL)
+ {
+ switch (psPVRSRVData->eServicesState)
+ {
+ case PVRSRV_SERVICES_STATE_OK:
+ seq_printf(psSeqFile, "Driver Status: OK\n");
+ break;
+ case PVRSRV_SERVICES_STATE_BAD:
+ seq_printf(psSeqFile, "Driver Status: BAD\n");
+ break;
+ default:
+ seq_printf(psSeqFile, "Driver Status: %d\n", psPVRSRVData->eServicesState);
+ break;
+ }
+ }
+ }
+ else if (pvData != NULL)
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+ IMG_CHAR *pszStatus = "";
+ IMG_CHAR *pszReason = "";
+ PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus;
+ PVRSRV_DEVICE_HEALTH_REASON eHealthReason;
+
+ /* Update the health status now if possible... */
+ if (psDeviceNode->pfnUpdateHealthStatus)
+ {
+ psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_FALSE);
+ }
+ eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus);
+ eHealthReason = OSAtomicRead(&psDeviceNode->eHealthReason);
+
+ switch (eHealthStatus)
+ {
+ case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszStatus = "OK"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszStatus = "NOT RESPONDING"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszStatus = "DEAD"; break;
+ default: pszStatus = "UNKNOWN"; break;
+ }
+
+ switch (eHealthReason)
+ {
+ case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " (FW Assert)"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " (Poll failure)"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " (Global Event Object timeouts rising)"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " (KCCB offset invalid)"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " (KCCB stalled)"; break;
+ default: pszReason = " (Unknown reason)"; break;
+ }
+
+ seq_printf(psSeqFile, "Firmware Status: %s%s\n", pszStatus, pszReason);
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /*
+ * Guest drivers do not support the following functionality:
+ * - Perform actual on-chip fw tracing
+ * - Collect actual on-chip GPU utilization stats
+ * - Perform actual on-chip GPU power/dvfs management
+ */
+ PVR_UNREFERENCED_PARAMETER(ghGpuUtilUserDebugFS);
+#else
+ /* Write other useful stats to aid the test cycle... */
+ if (psDeviceNode->pvDevice != NULL)
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+ /* Calculate the number of HWR events in total across all the DMs... */
+ if (psRGXFWIfTraceBufCtl != NULL)
+ {
+ IMG_UINT32 ui32HWREventCount = 0;
+ IMG_UINT32 ui32CRREventCount = 0;
+ IMG_UINT32 ui32DMIndex;
+
+ for (ui32DMIndex = 0; ui32DMIndex < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; ui32DMIndex++)
+ {
+ ui32HWREventCount += psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[ui32DMIndex];
+ ui32CRREventCount += psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[ui32DMIndex];
+ }
+
+ seq_printf(psSeqFile, "HWR Event Count: %d\n", ui32HWREventCount);
+ seq_printf(psSeqFile, "CRR Event Count: %d\n", ui32CRREventCount);
+ }
+
+ /* Write the number of APM events... */
+ seq_printf(psSeqFile, "APM Event Count: %d\n", psDevInfo->ui32ActivePMReqTotal);
+
+ /* Write the current GPU Utilisation values... */
+ if (psDevInfo->pfnGetGpuUtilStats &&
+ eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK)
+ {
+ RGXFWIF_GPU_UTIL_STATS sGpuUtilStats;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode,
+ ghGpuUtilUserDebugFS,
+ &sGpuUtilStats);
+
+ if ((eError == PVRSRV_OK) &&
+ ((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative))
+ {
+ IMG_UINT64 util;
+ IMG_UINT32 rem;
+
+ util = 100 * (sGpuUtilStats.ui64GpuStatActiveHigh +
+ sGpuUtilStats.ui64GpuStatActiveLow);
+ util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem);
+
+ seq_printf(psSeqFile, "GPU Utilisation: %u%%\n", (IMG_UINT32)util);
+ }
+ else
+ {
+ seq_printf(psSeqFile, "GPU Utilisation: -\n");
+ }
+ }
+ }
+#endif
+ }
+
+ return 0;
+}
+
+static IMG_INT DebugStatusSet(const char __user *pcBuffer,
+ size_t uiCount,
+ loff_t uiPosition,
+ void *pvData)
+{
+ IMG_CHAR acDataBuffer[6];
+
+ if (uiPosition != 0)
+ {
+ return -EIO;
+ }
+
+ if (uiCount > (sizeof(acDataBuffer) / sizeof(acDataBuffer[0])))
+ {
+ return -EINVAL;
+ }
+
+ if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+ {
+ return -EINVAL;
+ }
+
+ if (acDataBuffer[uiCount - 1] != '\n')
+ {
+ return -EINVAL;
+ }
+
+ if (((acDataBuffer[0] == 'k') || ((acDataBuffer[0] == 'K'))) && uiCount == 2)
+ {
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ psPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_BAD;
+ }
+ else
+ {
+ return -EINVAL;
+ }
+
+ return uiCount;
+}
+
+static struct seq_operations gsDebugStatusReadOps =
+{
+ .start = _DebugStatusSeqStart,
+ .stop = _DebugStatusSeqStop,
+ .next = _DebugStatusSeqNext,
+ .show = _DebugStatusSeqShow,
+};
+
+/*************************************************************************/ /*!
+ Dump Debug DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugDumpDebugCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va)
+{
+ loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+ loff_t uiPosition = va_arg(va, loff_t);
+ loff_t uiCurrentPosition = *puiCurrentPosition;
+
+ (*puiCurrentPosition)++;
+
+ return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugDumpDebugSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ if (*puiPosition == 0)
+ {
+ return SEQ_START_TOKEN;
+ }
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugDumpDebugCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _DebugDumpDebugSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugDumpDebugSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ (*puiPosition)++;
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugDumpDebugCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _DumpDebugSeqPrintf(void *pvDumpDebugFile,
+ const IMG_CHAR *pszFormat, ...)
+{
+ struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+ va_list ArgList;
+
+ va_start(ArgList, pszFormat);
+ vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+ va_end(ArgList);
+ seq_printf(psSeqFile, "%s\n", szBuffer);
+}
+
+static int _DebugDumpDebugSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ if (pvData != NULL && pvData != SEQ_START_TOKEN)
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+ if (psDeviceNode->pvDevice != NULL)
+ {
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+ _DumpDebugSeqPrintf, psSeqFile);
+ }
+ }
+
+ return 0;
+}
+
+static struct seq_operations gsDumpDebugReadOps =
+{
+ .start = _DebugDumpDebugSeqStart,
+ .stop = _DebugDumpDebugSeqStop,
+ .next = _DebugDumpDebugSeqNext,
+ .show = _DebugDumpDebugSeqShow,
+};
+/*************************************************************************/ /*!
+ Firmware Trace DebugFS entry
+*/ /**************************************************************************/
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+static void *_DebugFWTraceCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va)
+{
+ loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+ loff_t uiPosition = va_arg(va, loff_t);
+ loff_t uiCurrentPosition = *puiCurrentPosition;
+
+ (*puiCurrentPosition)++;
+
+ return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugFWTraceSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ if (*puiPosition == 0)
+ {
+ return SEQ_START_TOKEN;
+ }
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugFWTraceCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _DebugFWTraceSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugFWTraceSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ (*puiPosition)++;
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugFWTraceCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _FWTraceSeqPrintf(void *pvDumpDebugFile,
+ const IMG_CHAR *pszFormat, ...)
+{
+ struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+ va_list ArgList;
+
+ va_start(ArgList, pszFormat);
+ vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+ va_end(ArgList);
+ seq_printf(psSeqFile, "%s\n", szBuffer);
+}
+
+static int _DebugFWTraceSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ if (pvData != NULL && pvData != SEQ_START_TOKEN)
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+ if (psDeviceNode->pvDevice != NULL)
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ RGXDumpFirmwareTrace(_FWTraceSeqPrintf, psSeqFile, psDevInfo);
+ }
+ }
+
+ return 0;
+}
+
+static struct seq_operations gsFWTraceReadOps =
+{
+ .start = _DebugFWTraceSeqStart,
+ .stop = _DebugFWTraceSeqStop,
+ .next = _DebugFWTraceSeqNext,
+ .show = _DebugFWTraceSeqShow,
+};
+#endif
+/*************************************************************************/ /*!
+ Debug level DebugFS entry
+*/ /**************************************************************************/
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+static void *DebugLevelSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+ if (*puiPosition == 0)
+ {
+ return psSeqFile->private;
+ }
+
+ return NULL;
+}
+
+static void DebugLevelSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *DebugLevelSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+ PVR_UNREFERENCED_PARAMETER(puiPosition);
+
+ return NULL;
+}
+
+static int DebugLevelSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ if (pvData != NULL)
+ {
+ IMG_UINT32 uiDebugLevel = *((IMG_UINT32 *)pvData);
+
+ seq_printf(psSeqFile, "%u\n", uiDebugLevel);
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct seq_operations gsDebugLevelReadOps =
+{
+ .start = DebugLevelSeqStart,
+ .stop = DebugLevelSeqStop,
+ .next = DebugLevelSeqNext,
+ .show = DebugLevelSeqShow,
+};
+
+
+static IMG_INT DebugLevelSet(const char __user *pcBuffer,
+ size_t uiCount,
+ loff_t uiPosition,
+ void *pvData)
+{
+ IMG_UINT32 *uiDebugLevel = (IMG_UINT32 *)pvData;
+ IMG_CHAR acDataBuffer[6];
+
+ if (uiPosition != 0)
+ {
+ return -EIO;
+ }
+
+ if (uiCount > (sizeof(acDataBuffer) / sizeof(acDataBuffer[0])))
+ {
+ return -EINVAL;
+ }
+
+ if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+ {
+ return -EINVAL;
+ }
+
+ if (acDataBuffer[uiCount - 1] != '\n')
+ {
+ return -EINVAL;
+ }
+
+ if (sscanf(acDataBuffer, "%u", &gPVRDebugLevel) == 0)
+ {
+ return -EINVAL;
+ }
+
+ /* As this is Linux the next line uses a GCC builtin function */
+ (*uiDebugLevel) &= (1 << __builtin_ffsl(DBGPRIV_LAST)) - 1;
+
+ return uiCount;
+}
+#endif /* defined(DEBUG) */
+
+static PVR_DEBUGFS_ENTRY_DATA *gpsVersionDebugFSEntry;
+
+static PVR_DEBUGFS_ENTRY_DATA *gpsStatusDebugFSEntry;
+static PVR_DEBUGFS_ENTRY_DATA *gpsDumpDebugDebugFSEntry;
+
+static PVR_DEBUGFS_ENTRY_DATA *gpsFWTraceDebugFSEntry;
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+static PVR_DEBUGFS_ENTRY_DATA *gpsDebugLevelDebugFSEntry;
+#endif
+
+int PVRDebugCreateDebugFSEntries(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ int iResult;
+
+ PVR_ASSERT(psPVRSRVData != NULL);
+
+ /*
+ * The DebugFS entries are designed to work in a single device system but
+ * this function will be called multiple times in a multi-device system.
+ * Return an error in this case.
+ */
+ if (gpsVersionDebugFSEntry)
+ {
+ return -EEXIST;
+ }
+
+#if !defined(NO_HARDWARE)
+ if (RGXRegisterGpuUtilStats(&ghGpuUtilUserDebugFS) != PVRSRV_OK)
+ {
+ return -ENOMEM;
+ }
+#endif
+
+ iResult = PVRDebugFSCreateEntry("version",
+ NULL,
+ &gsDebugVersionReadOps,
+ NULL,
+ NULL,
+ NULL,
+ psPVRSRVData,
+ &gpsVersionDebugFSEntry);
+ if (iResult != 0)
+ {
+ return iResult;
+ }
+
+ iResult = PVRDebugFSCreateEntry("status",
+ NULL,
+ &gsDebugStatusReadOps,
+ (PVRSRV_ENTRY_WRITE_FUNC *)DebugStatusSet,
+ NULL,
+ NULL,
+ psPVRSRVData,
+ &gpsStatusDebugFSEntry);
+ if (iResult != 0)
+ {
+ goto ErrorRemoveVersionEntry;
+ }
+
+ iResult = PVRDebugFSCreateEntry("debug_dump",
+ NULL,
+ &gsDumpDebugReadOps,
+ NULL,
+ NULL,
+ NULL,
+ psPVRSRVData,
+ &gpsDumpDebugDebugFSEntry);
+ if (iResult != 0)
+ {
+ goto ErrorRemoveStatusEntry;
+ }
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ iResult = PVRDebugFSCreateEntry("firmware_trace",
+ NULL,
+ &gsFWTraceReadOps,
+ NULL,
+ NULL,
+ NULL,
+ psPVRSRVData,
+ &gpsFWTraceDebugFSEntry);
+ if (iResult != 0)
+ {
+ goto ErrorRemoveDumpDebugEntry;
+ }
+#endif
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+ iResult = PVRDebugFSCreateEntry("debug_level",
+ NULL,
+ &gsDebugLevelReadOps,
+ (PVRSRV_ENTRY_WRITE_FUNC *)DebugLevelSet,
+ NULL,
+ NULL,
+ &gPVRDebugLevel,
+ &gpsDebugLevelDebugFSEntry);
+ if (iResult != 0)
+ {
+ goto ErrorRemoveFWTraceLogEntry;
+ }
+#endif
+
+ return 0;
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+ErrorRemoveFWTraceLogEntry:
+ PVRDebugFSRemoveEntry(&gpsFWTraceDebugFSEntry);
+#endif
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ErrorRemoveDumpDebugEntry:
+ PVRDebugFSRemoveEntry(&gpsDumpDebugDebugFSEntry);
+#endif
+ErrorRemoveStatusEntry:
+ PVRDebugFSRemoveEntry(&gpsStatusDebugFSEntry);
+
+ErrorRemoveVersionEntry:
+ PVRDebugFSRemoveEntry(&gpsVersionDebugFSEntry);
+
+ return iResult;
+}
+
+void PVRDebugRemoveDebugFSEntries(void)
+{
+#if !defined(NO_HARDWARE)
+ if (ghGpuUtilUserDebugFS != NULL)
+ {
+ RGXUnregisterGpuUtilStats(ghGpuUtilUserDebugFS);
+ ghGpuUtilUserDebugFS = NULL;
+ }
+#endif
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+ if (gpsDebugLevelDebugFSEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsDebugLevelDebugFSEntry);
+ }
+#endif
+
+ if (gpsFWTraceDebugFSEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsFWTraceDebugFSEntry);
+ }
+
+ if (gpsDumpDebugDebugFSEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsDumpDebugDebugFSEntry);
+ }
+
+ if (gpsStatusDebugFSEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsStatusDebugFSEntry);
+ }
+
+ if (gpsVersionDebugFSEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsVersionDebugFSEntry);
+ }
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title PVR Debug Declarations
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides debug functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_DEBUG_H__
+#define __PVR_DEBUG_H__
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+#if defined(_MSC_VER)
+# define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127))
+#else
+# define MSC_SUPPRESS_4127
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define PVR_MAX_DEBUG_MESSAGE_LEN (512) /*!< Max length of a Debug Message */
+
+/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */
+#define DBGPRIV_FATAL 0x001UL /*!< Debug-Fatal. Privately used by pvr_debug. */
+#define DBGPRIV_ERROR 0x002UL /*!< Debug-Error. Privately used by pvr_debug. */
+#define DBGPRIV_WARNING 0x004UL /*!< Debug-Warning. Privately used by pvr_debug. */
+#define DBGPRIV_MESSAGE 0x008UL /*!< Debug-Message. Privately used by pvr_debug. */
+#define DBGPRIV_VERBOSE 0x010UL /*!< Debug-Verbose. Privately used by pvr_debug. */
+#define DBGPRIV_CALLTRACE 0x020UL /*!< Debug-CallTrace. Privately used by pvr_debug. */
+#define DBGPRIV_ALLOC 0x040UL /*!< Debug-Alloc. Privately used by pvr_debug. */
+#define DBGPRIV_BUFFERED 0x080UL /*!< Debug-Buffered. Privately used by pvr_debug. */
+#define DBGPRIV_DEBUG 0x100UL /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */
+#define DBGPRIV_DBGDRV_MESSAGE 0x200UL /*!< Debug-DbgDrivMessage. Privately used by pvr_debug. */
+#define DBGPRIV_LAST 0x200UL /*!< Always set to highest mask value. Privately used by pvr_debug. */
+
+
+#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG)
+#define PVRSRV_NEED_PVR_ASSERT
+#endif
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF)
+#define PVRSRV_NEED_PVR_DPF
+#endif
+
+#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING))
+#define PVRSRV_NEED_PVR_TRACE
+#endif
+
+#if !defined(DOXYGEN)
+#if defined(__KERNEL__)
+ IMG_IMPORT const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError);
+# define PVRSRVGETERRORSTRING PVRSRVGetErrorStringKM
+#else
+/*************************************************************************/ /*
+PVRSRVGetErrorString
+Returns a string describing the provided PVRSRV_ERROR code
+NB No doxygen comments provided as this function does not require porting
+ for other operating systems
+*/ /**************************************************************************/
+ IMG_IMPORT const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError);
+# define PVRSRVGETERRORSTRING PVRSRVGetErrorString
+#endif
+#endif
+
+/* PVR_ASSERT() and PVR_DBG_BREAK handling */
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) || defined(DOXYGEN)
+
+/* Unfortunately the klocworks static analysis checker doesn't understand our
+ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+ * macros in a special way when the code is analysed by klocworks avoids
+ * them. */
+#if defined(__KLOCWORK__)
+ #define PVR_ASSERT(x) do { if (!(x)) abort(); } while (0)
+#else /* ! __KLOCWORKS__ */
+
+#if defined(_WIN32)
+#define PVR_ASSERT(expr) do \
+ { \
+ MSC_SUPPRESS_4127 \
+ if (unlikely(!(expr))) \
+ { \
+ PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,\
+ "*** Debug assertion failed!"); \
+ __debugbreak(); \
+ } \
+ MSC_SUPPRESS_4127 \
+ } while (0)
+
+#else
+
+#if defined(LINUX) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+/* In Linux kernel mode, use BUG() directly. This produces the correct
+ filename and line number in the panic message. */
+#define PVR_ASSERT(EXPR) do \
+ { \
+ if (unlikely(!(EXPR))) \
+ { \
+ PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__, \
+ "Debug assertion failed!"); \
+ BUG(); \
+ } \
+ } while (0)
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+/*************************************************************************/ /*!
+@Function PVRSRVDebugAssertFail
+@Description Indicate to the user that a debug assertion has failed and
+ prevent the program from continuing.
+ Invoked from the macro PVR_ASSERT().
+@Input pszFile The name of the source file where the assertion failed
+@Input ui32Line The line number of the failed assertion
+@Input pszAssertion String describing the assertion
+@Return NEVER!
+*/ /**************************************************************************/
+IMG_IMPORT void IMG_CALLCONV __noreturn
+PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
+ IMG_UINT32 ui32Line,
+ const IMG_CHAR *pszAssertion);
+
+#define PVR_ASSERT(EXPR) do \
+ { \
+ if (unlikely(!(EXPR))) \
+ PVRSRVDebugAssertFail(__FILE__, __LINE__, #EXPR); \
+ } while (0)
+
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+#endif /* __KLOCWORKS__ */
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT)*/
+
+#if defined(__KLOCWORK__)
+ #define PVR_DBG_BREAK do { abort(); } while (0)
+#else
+ #if defined (WIN32)
+ #define PVR_DBG_BREAK __debugbreak(); /*!< Implementation of PVR_DBG_BREAK for (non-WinCE) Win32 */
+ #else
+ #if defined(PVR_DBG_BREAK_ASSERT_FAIL)
+ /*!< Implementation of PVR_DBG_BREAK that maps onto PVRSRVDebugAssertFail */
+ #if defined(_WIN32)
+ #define PVR_DBG_BREAK DBG_BREAK
+ #else
+ #if defined(LINUX) && defined(__KERNEL__)
+ #define PVR_DBG_BREAK BUG()
+ #else
+ #define PVR_DBG_BREAK PVRSRVDebugAssertFail(__FILE__, __LINE__, "PVR_DBG_BREAK")
+ #endif
+ #endif
+ #else
+ /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+ #define PVR_DBG_BREAK
+ #endif
+ #endif
+#endif
+
+
+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */
+ /* Unfortunately the klocworks static analysis checker doesn't understand our
+ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+ * macros in a special way when the code is analysed by klocworks avoids
+ * them. */
+ #if defined(__KLOCWORK__)
+ #define PVR_ASSERT(EXPR) do { if (unlikely(!(EXPR))) abort(); } while (0)
+ #else
+ #define PVR_ASSERT(EXPR) (void)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */
+ #endif
+
+ #define PVR_DBG_BREAK /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+
+/* PVR_DPF() handling */
+
+#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN)
+
+ /* New logging mechanism */
+ #define PVR_DBG_FATAL DBGPRIV_FATAL /*!< Debug level passed to PVRSRVDebugPrintf() for fatal errors. */
+ #define PVR_DBG_ERROR DBGPRIV_ERROR /*!< Debug level passed to PVRSRVDebugPrintf() for non-fatal errors. */
+ #define PVR_DBG_WARNING DBGPRIV_WARNING /*!< Debug level passed to PVRSRVDebugPrintf() for warnings. */
+ #define PVR_DBG_MESSAGE DBGPRIV_MESSAGE /*!< Debug level passed to PVRSRVDebugPrintf() for information only. */
+ #define PVR_DBG_VERBOSE DBGPRIV_VERBOSE /*!< Debug level passed to PVRSRVDebugPrintf() for very low-priority debug. */
+ #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE
+ #define PVR_DBG_ALLOC DBGPRIV_ALLOC
+ #define PVR_DBG_BUFFERED DBGPRIV_BUFFERED /*!< Debug level passed to PVRSRVDebugPrintf() when debug should be written to the debug circular buffer. */
+ #define PVR_DBG_DEBUG DBGPRIV_DEBUG
+ #define PVR_DBGDRIV_MESSAGE DBGPRIV_DBGDRV_MESSAGE
+
+ /* These levels are always on with PVRSRV_NEED_PVR_DPF */
+ #define __PVR_DPF_0x001UL(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__)
+ #define __PVR_DPF_0x002UL(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__)
+ #define __PVR_DPF_0x080UL(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__)
+
+ /*
+ The AdHoc-Debug level is only supported when enabled in the local
+ build environment and may need to be used in both debug and release
+ builds. An error is generated in the formal build if it is checked in.
+ */
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+ #define __PVR_DPF_0x100UL(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__)
+#else
+ /* Use an undefined token here to stop compilation dead in the offending module */
+ #define __PVR_DPF_0x100UL(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing
+#endif
+
+ /* Some are compiled out completely in release builds */
+#if defined(DEBUG) || defined(DOXYGEN)
+ #define __PVR_DPF_0x004UL(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__)
+ #define __PVR_DPF_0x008UL(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__)
+ #define __PVR_DPF_0x010UL(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__)
+ #define __PVR_DPF_0x020UL(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__)
+ #define __PVR_DPF_0x040UL(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__)
+ #define __PVR_DPF_0x200UL(...) PVRSRVDebugPrintf(DBGPRIV_DBGDRV_MESSAGE, __VA_ARGS__)
+#else
+ #define __PVR_DPF_0x004UL(...)
+ #define __PVR_DPF_0x008UL(...)
+ #define __PVR_DPF_0x010UL(...)
+ #define __PVR_DPF_0x020UL(...)
+ #define __PVR_DPF_0x040UL(...)
+ #define __PVR_DPF_0x200UL(...)
+#endif
+
+ /* Translate the different log levels to separate macros
+ * so they can each be compiled out.
+ */
+#if defined(DEBUG)
+ #define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl (__FILE__, __LINE__, __VA_ARGS__)
+#else
+ #define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl ("", 0, __VA_ARGS__)
+#endif
+
+ /* Get rid of the double bracketing */
+ #define PVR_DPF(x) __PVR_DPF x
+
+ #define PVR_LOG_ERROR(_rc, _call) \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__));
+
+ #define PVR_LOG_IF_ERROR(_rc, _call) do \
+ { if (unlikely(_rc != PVRSRV_OK)) \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGR_IF_NOMEM(_expr, _call) do \
+ { if (unlikely(_expr == NULL)) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \
+ return (PVRSRV_ERROR_OUT_OF_MEMORY); }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGG_IF_NOMEM(_expr, _call, _err, _go) do \
+ { if (unlikely(_expr == NULL)) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \
+ _err = PVRSRV_ERROR_OUT_OF_MEMORY; \
+ goto _go; } \
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGR_IF_ERROR(_rc, _call) do \
+ { if (unlikely(_rc != PVRSRV_OK)) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+ return (_rc); }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGRN_IF_ERROR(_rc, _call) do \
+ { if (unlikely(_rc != PVRSRV_OK)) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+ return; }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGG_IF_ERROR(_rc, _call, _go) do \
+ { if (unlikely(_rc != PVRSRV_OK)) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+ goto _go; }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOG_IF_FALSE(_expr, _msg) do \
+ { if (unlikely(!(_expr))) \
+ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGR_IF_FALSE(_expr, _msg, _rc) do \
+ { if (unlikely(!(_expr))) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+ return (_rc); }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGG_IF_FALSE(_expr, _msg, _go) do \
+ { if (unlikely(!(_expr))) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+ goto _go; }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+/*************************************************************************/ /*!
+@Function PVRSRVDebugPrintf
+@Description Output a debug message to the user, using an OS-specific
+ method, to a log or console which can be read by developers
+ Invoked from the macro PVR_DPF().
+@Input ui32DebugLevel The debug level of the message. This can
+ be used to restrict the output of debug
+ messages based on their severity.
+ If this is PVR_DBG_BUFFERED, the message
+ should be written into a debug circular
+ buffer instead of being output immediately
+ (useful when performance would otherwise
+ be adversely affected).
+ The debug circular buffer shall only be
+ output when PVRSRVDebugPrintfDumpCCB() is
+ called.
+@Input pszFileName The source file containing the code that is
+ generating the message
+@Input ui32Line The line number in the source file
+@Input pszFormat The formatted message string
+@Input ... Zero or more arguments for use by the
+ formatted string
+@Return None
+*/ /**************************************************************************/
+IMG_IMPORT void IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+ const IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32Line,
+ const IMG_CHAR *pszFormat,
+ ...) __printf(4, 5);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDebugPrintfDumpCCB
+@Description When PVRSRVDebugPrintf() is called with the ui32DebugLevel
+ specified as DBGPRIV_BUFFERED, the debug shall be written to
+ the debug circular buffer instead of being output immediately.
+ (This could be used to obtain debug without incurring a
+ performance hit by printing it at that moment).
+ This function shall dump the contents of that debug circular
+ buffer to be output in an OS-specific method to a log or
+ console which can be read by developers.
+@Return None
+*/ /**************************************************************************/
+IMG_IMPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void);
+
+#else /* defined(PVRSRV_NEED_PVR_DPF) */
+
+ #define PVR_DPF(X) /*!< Null Implementation of PowerVR Debug Printf (does nothing) */
+
+ #define PVR_LOG_ERROR(_rc, _call) (void)(_rc)
+ #define PVR_LOG_IF_ERROR(_rc, _call) (void)(_rc)
+
+ #define PVR_LOGR_IF_NOMEM(_expr, _call) do { if (unlikely(_expr == NULL)) { return (PVRSRV_ERROR_OUT_OF_MEMORY); } MSC_SUPPRESS_4127 } while (0)
+ #define PVR_LOGG_IF_NOMEM(_expr, _call, _err, _go) do { if (unlikely(_expr == NULL)) { _err = PVRSRV_ERROR_OUT_OF_MEMORY; goto _go; } MSC_SUPPRESS_4127 } while (0)
+ #define PVR_LOGR_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while(0)
+ #define PVR_LOGRN_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return; } MSC_SUPPRESS_4127 } while(0)
+ #define PVR_LOGG_IF_ERROR(_rc, _call, _go) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while(0)
+
+ #define PVR_LOG_IF_FALSE(_expr, _msg) (void)(_expr)
+ #define PVR_LOGR_IF_FALSE(_expr, _msg, _rc) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while(0)
+ #define PVR_LOGG_IF_FALSE(_expr, _msg, _go) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while(0)
+
+ #undef PVR_DPF_FUNCTION_TRACE_ON
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+
+#if defined(DEBUG)
+ #define PVR_LOG_WARN(_rc, _call) \
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__));
+
+ #define PVR_LOG_WARN_IF_ERROR(_rc, _call) do \
+ { if (unlikely(_rc != PVRSRV_OK)) \
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+ MSC_SUPPRESS_4127\
+ } while (0)
+#else
+ #define PVR_LOG_WARN(_rc, _call) (void)(_rc)
+ #define PVR_LOG_WARN_IF_ERROR(_rc, _call) (void)(_rc)
+#endif
+
+
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+
+ #define PVR_DPF_ENTERED \
+ PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered", __func__, __LINE__))
+
+ #define PVR_DPF_ENTERED1(p1) \
+ PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (0x%lx)", __func__, __LINE__, ((unsigned long)p1)))
+
+ #define PVR_DPF_RETURN_RC(a) \
+ do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "-< %s:%d returned %d", __func__, __LINE__, (_r))); return (_r); MSC_SUPPRESS_4127 } while (0)
+
+ #define PVR_DPF_RETURN_RC1(a,p1) \
+ do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "-< %s:%d returned %d (0x%lx)", __func__, __LINE__, (_r), ((unsigned long)p1))); return (_r); MSC_SUPPRESS_4127 } while (0)
+
+ #define PVR_DPF_RETURN_VAL(a) \
+ do { PVR_DPF((PVR_DBG_CALLTRACE, "-< %s:%d returned with value", __func__, __LINE__ )); return (a); MSC_SUPPRESS_4127 } while (0)
+
+ #define PVR_DPF_RETURN_OK \
+ do { PVR_DPF((PVR_DBG_CALLTRACE, "-< %s:%d returned ok", __func__, __LINE__)); return PVRSRV_OK; MSC_SUPPRESS_4127 } while (0)
+
+ #define PVR_DPF_RETURN \
+ do { PVR_DPF((PVR_DBG_CALLTRACE, "-< %s:%d returned", __func__, __LINE__)); return; MSC_SUPPRESS_4127 } while (0)
+
+ #if !defined(DEBUG)
+ #error PVR DPF Function trace enabled in release build, rectify
+ #endif
+
+#else /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+
+ #define PVR_DPF_ENTERED
+ #define PVR_DPF_ENTERED1(p1)
+ #define PVR_DPF_RETURN_RC(a) return (a)
+ #define PVR_DPF_RETURN_RC1(a,p1) return (a)
+ #define PVR_DPF_RETURN_VAL(a) return (a)
+ #define PVR_DPF_RETURN_OK return PVRSRV_OK
+ #define PVR_DPF_RETURN return
+
+#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+
+#if defined(__KERNEL__) || defined(DOXYGEN)
+/*Use PVR_DPF() unless message is necessary in release build */
+#ifdef PVR_DISABLE_LOGGING
+#define PVR_LOG(X)
+#else
+#define PVR_LOG(X) PVRSRVReleasePrintf X;
+#endif
+
+/*************************************************************************/ /*!
+@Function PVRSRVReleasePrintf
+@Description Output an important message, using an OS-specific method,
+ to a log or console which can be read by developers in
+ release builds.
+ Invoked from the macro PVR_LOG().
+@Input pszFormat The message format string
+@Input ... Zero or more arguments for use by the format string
+@Return None
+*/ /**************************************************************************/
+IMG_IMPORT void IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) __printf(1, 2);
+#endif
+
+/* PVR_TRACE() handling */
+
+#if defined(PVRSRV_NEED_PVR_TRACE) || defined(DOXYGEN)
+
+ #define PVR_TRACE(X) PVRSRVTrace X /*!< PowerVR Debug Trace Macro */
+ /* Empty string implementation that is -O0 build friendly */
+ #define PVR_TRACE_EMPTY_LINE() PVR_TRACE(("%s", ""))
+
+/*************************************************************************/ /*!
+@Function PVRTrace
+@Description Output a debug message to the user
+ Invoked from the macro PVR_TRACE().
+@Input pszFormat The message format string
+@Input ... Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+IMG_IMPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... )
+ __printf(1, 2);
+
+#else /* defined(PVRSRV_NEED_PVR_TRACE) */
+ /*! Null Implementation of PowerVR Debug Trace Macro (does nothing) */
+ #define PVR_TRACE(X)
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+
+#if defined(PVRSRV_NEED_PVR_ASSERT)
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_32BITS)
+#endif
+ INLINE static IMG_UINT32 TRUNCATE_64BITS_TO_32BITS(IMG_UINT64 uiInput)
+ {
+ IMG_UINT32 uiTruncated;
+
+ uiTruncated = (IMG_UINT32)uiInput;
+ PVR_ASSERT(uiInput == uiTruncated);
+ return uiTruncated;
+ }
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_SIZE_T)
+#endif
+ INLINE static size_t TRUNCATE_64BITS_TO_SIZE_T(IMG_UINT64 uiInput)
+ {
+ size_t uiTruncated;
+
+ uiTruncated = (size_t)uiInput;
+ PVR_ASSERT(uiInput == uiTruncated);
+ return uiTruncated;
+ }
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_SIZE_T_TO_32BITS)
+#endif
+ INLINE static IMG_UINT32 TRUNCATE_SIZE_T_TO_32BITS(size_t uiInput)
+ {
+ IMG_UINT32 uiTruncated;
+
+ uiTruncated = (IMG_UINT32)uiInput;
+ PVR_ASSERT(uiInput == uiTruncated);
+ return uiTruncated;
+ }
+
+
+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */
+ #define TRUNCATE_64BITS_TO_32BITS(expr) ((IMG_UINT32)(expr))
+ #define TRUNCATE_64BITS_TO_SIZE_T(expr) ((size_t)(expr))
+ #define TRUNCATE_SIZE_T_TO_32BITS(expr) ((IMG_UINT32)(expr))
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+/* Macros used to trace calls */
+#if defined(DEBUG)
+ #define PVR_DBG_FILELINE , __FILE__, __LINE__
+ #define PVR_DBG_FILELINE_PARAM , const IMG_CHAR *pszaFile, IMG_UINT32 ui32Line
+ #define PVR_DBG_FILELINE_ARG , pszaFile, ui32Line
+ #define PVR_DBG_FILELINE_FMT " %s:%u"
+ #define PVR_DBG_FILELINE_UNREF() do { PVR_UNREFERENCED_PARAMETER(pszaFile); \
+ PVR_UNREFERENCED_PARAMETER(ui32Line); } while(0)
+#else
+ #define PVR_DBG_FILELINE
+ #define PVR_DBG_FILELINE_PARAM
+ #define PVR_DBG_FILELINE_ARG
+ #define PVR_DBG_FILELINE_FMT
+ #define PVR_DBG_FILELINE_UNREF()
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_DEBUG_H__ */
+
+/******************************************************************************
+ End of file (pvr_debug.h)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Functions for creating debugfs directories and entries.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "allocmem.h"
+
+#define PVR_DEBUGFS_DIR_NAME PVR_DRM_NAME
+
+/* Define to set the PVR_DPF debug output level for pvr_debugfs.
+ * Normally, leave this set to PVR_DBGDRIV_MESSAGE, but when debugging
+ * you can temporarily change this to PVR_DBG_ERROR.
+ */
+#if defined(PVRSRV_NEED_PVR_DPF)
+#define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBGDRIV_MESSAGE
+#else
+#define PVR_DEBUGFS_PVR_DPF_LEVEL 0
+#endif
+
+static struct dentry *gpsPVRDebugFSEntryDir = NULL;
+
+/* Lock used when adjusting refCounts and deleting entries */
+static struct mutex gDebugFSLock;
+
+/*************************************************************************/ /*!
+ Statistic entry read functions
+*/ /**************************************************************************/
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+typedef struct _PVR_DEBUGFS_RAW_DRIVER_STAT_
+{
+ OS_STATS_PRINT_FUNC *pfStatsPrint;
+ PVR_DEBUGFS_ENTRY_DATA *pvDebugFsEntry;
+} PVR_DEBUGFS_RAW_DRIVER_STAT;
+#endif
+
+typedef struct _PVR_DEBUGFS_DRIVER_STAT_
+{
+ void *pvData;
+ OS_STATS_PRINT_FUNC *pfnStatsPrint;
+ PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnIncStatMemRefCount;
+ PVRSRV_DEC_STAT_MEM_REFCOUNT_FUNC *pfnDecStatMemRefCount;
+ IMG_UINT32 ui32RefCount;
+ PVR_DEBUGFS_ENTRY_DATA *pvDebugFSEntry;
+} PVR_DEBUGFS_DRIVER_STAT;
+
+typedef struct _PVR_DEBUGFS_DIR_DATA_
+{
+ struct dentry *psDir;
+ PVR_DEBUGFS_DIR_DATA *psParentDir;
+ IMG_UINT32 ui32RefCount;
+} PVR_DEBUGFS_DIR_DATA;
+
+typedef struct _PVR_DEBUGFS_ENTRY_DATA_
+{
+ struct dentry *psEntry;
+ PVR_DEBUGFS_DIR_DATA *psParentDir;
+ IMG_UINT32 ui32RefCount;
+ PVR_DEBUGFS_DRIVER_STAT *psStatData;
+} PVR_DEBUGFS_ENTRY_DATA;
+
+typedef struct _PVR_DEBUGFS_PRIV_DATA_
+{
+ const struct seq_operations *psReadOps;
+ PVRSRV_ENTRY_WRITE_FUNC *pfnWrite;
+ void *pvData;
+ PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *pfIncPvDataRefCnt;
+ PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *pfDecPvDataRefCnt;
+ IMG_BOOL bValid;
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+} PVR_DEBUGFS_PRIV_DATA;
+
+static IMG_BOOL _RefDirEntry(PVR_DEBUGFS_DIR_DATA *psDirEntry);
+static inline void _UnrefAndMaybeDestroyDirEntry(PVR_DEBUGFS_DIR_DATA **ppsDirEntry);
+static void _UnrefAndMaybeDestroyDirEntryWhileLocked(PVR_DEBUGFS_DIR_DATA **ppsDirEntry);
+static IMG_BOOL _RefDebugFSEntryNoLock(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry);
+static void _UnrefAndMaybeDestroyDebugFSEntry(PVR_DEBUGFS_ENTRY_DATA **ppsDebugFSEntry);
+static IMG_BOOL _RefStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry);
+static IMG_BOOL _UnrefAndMaybeDestroyStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry);
+
+static void _StatsSeqPrintf(void *pvFile, const IMG_CHAR *pszFormat, ...)
+{
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+ va_list ArgList;
+
+ va_start(ArgList, pszFormat);
+ vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+ seq_printf((struct seq_file *)pvFile, "%s", szBuffer);
+ va_end(ArgList);
+}
+
+static void *_DebugFSStatisticSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+ PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+
+ if (psStatData)
+ {
+ /* take reference on psStatData (for duration of stat iteration) */
+ if (!_RefStatEntry(psStatData))
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called for '%s' but failed"
+ " to take ref on stat entry, returning -EIO(%d)", __func__,
+ psStatData->pvDebugFSEntry->psEntry->d_iname, -EIO));
+ return NULL;
+ }
+
+ if (*puiPosition == 0)
+ {
+ return psStatData;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+ }
+
+ return NULL;
+}
+
+static void _DebugFSStatisticSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ if (psStatData)
+ {
+ /* drop ref taken on stat memory, and if it is now zero, be sure we don't try to read it again */
+ if (psStatData->ui32RefCount > 0)
+ {
+ /* drop reference on psStatData (held for duration of stat iteration) */
+ _UnrefAndMaybeDestroyStatEntry((void*)psStatData);
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+ }
+}
+
+static void *_DebugFSStatisticSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ if (psStatData)
+ {
+ if (psStatData->pvData)
+ {
+ if (puiPosition)
+ {
+ (*puiPosition)++;
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called with puiPosition NULL", __FUNCTION__));
+ }
+ }
+ else
+ {
+ /* psStatData->pvData is NULL */
+ /* NB This is valid if the stat has no structure associated with it (eg. driver_stats, which prints totals stored in a number of global vars) */
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+ }
+
+ return NULL;
+}
+
+static int _DebugFSStatisticSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)pvData;
+
+ if (psStatData != NULL)
+ {
+ psStatData->pfnStatsPrint((void*)psSeqFile, psStatData->pvData, _StatsSeqPrintf);
+ return 0;
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL, returning -ENODATA(%d)", __FUNCTION__, -ENODATA));
+ }
+
+ return -ENODATA;
+}
+
+static struct seq_operations gsDebugFSStatisticReadOps =
+{
+ .start = _DebugFSStatisticSeqStart,
+ .stop = _DebugFSStatisticSeqStop,
+ .next = _DebugFSStatisticSeqNext,
+ .show = _DebugFSStatisticSeqShow,
+};
+
+
+/*************************************************************************/ /*!
+ Common internal API
+*/ /**************************************************************************/
+
+static int _DebugFSFileOpen(struct inode *psINode, struct file *psFile)
+{
+ PVR_DEBUGFS_PRIV_DATA *psPrivData;
+ int iResult = -EIO;
+ IMG_BOOL bRefRet = IMG_FALSE;
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry = NULL;
+
+ mutex_lock(&gDebugFSLock);
+
+ PVR_ASSERT(psINode);
+ psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+
+ if (psPrivData)
+ {
+ /* Check that psPrivData is still valid to use */
+ if (psPrivData->bValid)
+ {
+ psDebugFSEntry = psPrivData->psDebugFSEntry;
+
+ /* Take ref on stat entry before opening seq file - this ref will be dropped if we
+ * fail to open the seq file or when we close it
+ */
+ if (psDebugFSEntry)
+ {
+ bRefRet = _RefDebugFSEntryNoLock(psDebugFSEntry);
+ mutex_unlock(&gDebugFSLock);
+ if (psPrivData->pfIncPvDataRefCnt)
+ {
+ psPrivData->pfIncPvDataRefCnt(psPrivData->pvData);
+ }
+ if (bRefRet)
+ {
+ iResult = seq_open(psFile, psPrivData->psReadOps);
+ if (iResult == 0)
+ {
+ struct seq_file *psSeqFile = psFile->private_data;
+
+ psSeqFile->private = psPrivData->pvData;
+ }
+ else
+ {
+ if (psPrivData->pfDecPvDataRefCnt)
+ {
+ psPrivData->pfDecPvDataRefCnt(psPrivData->pvData);
+ }
+ /* Drop ref if we failed to open seq file */
+ _UnrefAndMaybeDestroyDebugFSEntry(&psPrivData->psDebugFSEntry);
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d", __FUNCTION__, iResult));
+ }
+ }
+ }
+ else
+ {
+ mutex_unlock(&gDebugFSLock);
+ }
+ }
+ else
+ {
+ mutex_unlock(&gDebugFSLock);
+ }
+ }
+ else
+ {
+ mutex_unlock(&gDebugFSLock);
+ }
+
+ return iResult;
+}
+
+static int _DebugFSFileClose(struct inode *psINode, struct file *psFile)
+{
+ int iResult;
+ PVR_DEBUGFS_PRIV_DATA *psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry = NULL;
+
+ if (psPrivData)
+ {
+ psDebugFSEntry = psPrivData->psDebugFSEntry;
+ }
+ iResult = seq_release(psINode, psFile);
+ if (psDebugFSEntry)
+ {
+ _UnrefAndMaybeDestroyDebugFSEntry(&psPrivData->psDebugFSEntry);
+ }
+ if (psPrivData && psPrivData->pfDecPvDataRefCnt)
+ {
+ psPrivData->pfDecPvDataRefCnt(psPrivData->pvData);
+ }
+ return iResult;
+}
+
+static ssize_t _DebugFSFileWrite(struct file *psFile,
+ const char __user *pszBuffer,
+ size_t uiCount,
+ loff_t *puiPosition)
+{
+ struct inode *psINode = psFile->f_path.dentry->d_inode;
+ PVR_DEBUGFS_PRIV_DATA *psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+
+ if (psPrivData->pfnWrite == NULL)
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called for file '%s', which does not have pfnWrite defined, returning -EIO(%d)", __FUNCTION__, psFile->f_path.dentry->d_iname, -EIO));
+ return -EIO;
+ }
+
+ return psPrivData->pfnWrite(pszBuffer, uiCount, *puiPosition, psPrivData->pvData);
+}
+
+static const struct file_operations gsPVRDebugFSFileOps =
+{
+ .owner = THIS_MODULE,
+ .open = _DebugFSFileOpen,
+ .read = seq_read,
+ .write = _DebugFSFileWrite,
+ .llseek = seq_lseek,
+ .release = _DebugFSFileClose,
+};
+
+
+/*************************************************************************/ /*!
+ Public API
+*/ /**************************************************************************/
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSInit
+@Description Initialise PVR debugfs support. This should be called before
+ using any PVRDebugFS functions.
+@Return int On success, returns 0. Otherwise, returns an
+ error code.
+*/ /**************************************************************************/
+int PVRDebugFSInit(void)
+{
+ PVR_ASSERT(gpsPVRDebugFSEntryDir == NULL);
+
+ mutex_init(&gDebugFSLock);
+
+ gpsPVRDebugFSEntryDir = debugfs_create_dir(PVR_DEBUGFS_DIR_NAME, NULL);
+ if (gpsPVRDebugFSEntryDir == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot create '%s' debugfs root directory",
+ __FUNCTION__, PVR_DEBUGFS_DIR_NAME));
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSDeInit
+@Description Deinitialise PVR debugfs support. This should be called only
+ if PVRDebugFSInit() has already been called. All debugfs
+ directories and entries should be removed otherwise this
+ function will fail.
+@Return void
+*/ /**************************************************************************/
+void PVRDebugFSDeInit(void)
+{
+ if (gpsPVRDebugFSEntryDir != NULL)
+ {
+ debugfs_remove(gpsPVRDebugFSEntryDir);
+ gpsPVRDebugFSEntryDir = NULL;
+ mutex_destroy(&gDebugFSLock);
+ }
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSCreateEntryDir
+@Description Create a directory for debugfs entries that will be located
+ under the root directory, as created by
+ PVRDebugFSCreateEntries().
+@Input pszName String containing the name for the directory.
+@Input psParentDir The parent directory in which to create the new
+ directory. This should either be NULL, meaning it
+ should be created in the root directory, or a
+ pointer to a directory as returned by this
+ function.
+@Output ppsNewDir On success, points to the newly created
+ directory.
+@Return int On success, returns 0. Otherwise, returns an
+ error code.
+*/ /**************************************************************************/
+int PVRDebugFSCreateEntryDir(IMG_CHAR *pszName,
+ PVR_DEBUGFS_DIR_DATA *psParentDir,
+ PVR_DEBUGFS_DIR_DATA **ppsNewDir)
+{
+ PVR_DEBUGFS_DIR_DATA *psNewDir;
+
+ PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+
+ if (pszName == NULL || ppsNewDir == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid param", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ psNewDir = OSAllocMemNoStats(sizeof(*psNewDir));
+
+ if (psNewDir == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot allocate memory for '%s' pvr_debugfs structure",
+ __FUNCTION__, pszName));
+ return -ENOMEM;
+ }
+
+ psNewDir->psParentDir = psParentDir;
+ psNewDir->psDir = debugfs_create_dir(pszName, (psNewDir->psParentDir) ? psNewDir->psParentDir->psDir : gpsPVRDebugFSEntryDir);
+
+ if (psNewDir->psDir == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot create '%s' debugfs directory",
+ __FUNCTION__, pszName));
+
+ OSFreeMemNoStats(psNewDir);
+ return -ENOMEM;
+ }
+
+ *ppsNewDir = psNewDir;
+ psNewDir->ui32RefCount = 1;
+
+ /* if parent directory is not gpsPVRDebugFSEntryDir, increment its refCount */
+ if (psNewDir->psParentDir)
+ {
+ /* if we fail to acquire the reference that probably means that
+ * parent dir was already freed - we have to cleanup in this situation */
+ if (!_RefDirEntry(psNewDir->psParentDir))
+ {
+ _UnrefAndMaybeDestroyDirEntry(ppsNewDir);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSRemoveEntryDir
+@Description Remove a directory that was created by
+ PVRDebugFSCreateEntryDir(). Any directories or files created
+ under the directory being removed should be removed first.
+@Input ppsDir Pointer representing the directory to be removed.
+ Has to be double pointer to avoid possible races
+ and use-after-free situations.
+@Return void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveEntryDir(PVR_DEBUGFS_DIR_DATA **ppsDir)
+{
+ _UnrefAndMaybeDestroyDirEntry(ppsDir);
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSCreateEntry
+@Description Create an entry in the specified directory.
+@Input pszName String containing the name for the entry.
+@Input psParentDir Pointer from PVRDebugFSCreateEntryDir()
+ representing the directory in which to create
+ the entry or NULL for the root directory.
+@Input psReadOps Pointer to structure containing the necessary
+ functions to read from the entry.
+@Input pfnWrite Callback function used to write to the entry.
+@Input pvData Private data to be passed to the read
+ functions, in the seq_file private member, and
+ the write function callback.
+@Output ppsNewEntry On success, points to the newly created entry.
+@Return int On success, returns 0. Otherwise, returns an
+ error code.
+*/ /**************************************************************************/
+int PVRDebugFSCreateEntry(const char *pszName,
+ PVR_DEBUGFS_DIR_DATA *psParentDir,
+ const struct seq_operations *psReadOps,
+ PVRSRV_ENTRY_WRITE_FUNC *pfnWrite,
+ PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *pfnIncPvDataRefCnt,
+ PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *pfnDecPvDataRefCnt,
+ void *pvData,
+ PVR_DEBUGFS_ENTRY_DATA **ppsNewEntry)
+{
+ PVR_DEBUGFS_PRIV_DATA *psPrivData;
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+ struct dentry *psEntry;
+ umode_t uiMode;
+
+ PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+ PVR_ASSERT(!((pfnIncPvDataRefCnt != NULL && pfnDecPvDataRefCnt == NULL) ||
+ (pfnIncPvDataRefCnt == NULL && pfnDecPvDataRefCnt != NULL)));
+
+ psPrivData = OSAllocMemNoStats(sizeof(*psPrivData));
+ if (psPrivData == NULL)
+ {
+ return -ENOMEM;
+ }
+ psDebugFSEntry = OSAllocMemNoStats(sizeof(*psDebugFSEntry));
+ if (psDebugFSEntry == NULL)
+ {
+ OSFreeMemNoStats(psPrivData);
+ return -ENOMEM;
+ }
+
+ psPrivData->psReadOps = psReadOps;
+ psPrivData->pfnWrite = pfnWrite;
+ psPrivData->pvData = (void*)pvData;
+ psPrivData->pfIncPvDataRefCnt = pfnIncPvDataRefCnt;
+ psPrivData->pfDecPvDataRefCnt = pfnDecPvDataRefCnt;
+ psPrivData->bValid = IMG_TRUE;
+ /* Store ptr to debugFSEntry in psPrivData, so a ref can be taken on it
+ * when the client opens a file */
+ psPrivData->psDebugFSEntry = psDebugFSEntry;
+
+ uiMode = S_IFREG;
+
+ if (psReadOps != NULL)
+ {
+ uiMode |= S_IRUGO;
+ }
+
+ if (pfnWrite != NULL)
+ {
+ uiMode |= S_IWUSR;
+ }
+
+ psDebugFSEntry->psParentDir = psParentDir;
+ psDebugFSEntry->ui32RefCount = 1;
+ psDebugFSEntry->psStatData = (PVR_DEBUGFS_DRIVER_STAT*)pvData;
+
+ if (psDebugFSEntry->psParentDir)
+ {
+ /* increment refCount of parent directory */
+ if (!_RefDirEntry(psDebugFSEntry->psParentDir))
+ {
+ kfree(psDebugFSEntry);
+ kfree(psPrivData);
+ return -EFAULT;
+ }
+ }
+
+ psEntry = debugfs_create_file(pszName,
+ uiMode,
+ (psParentDir != NULL) ? psParentDir->psDir : gpsPVRDebugFSEntryDir,
+ psPrivData,
+ &gsPVRDebugFSFileOps);
+ if (IS_ERR(psEntry))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot create debugfs '%s' file",
+ __FUNCTION__, pszName));
+
+ return PTR_ERR(psEntry);
+ }
+
+ psDebugFSEntry->psEntry = psEntry;
+ *ppsNewEntry = (void*)psDebugFSEntry;
+
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSRemoveEntry
+@Description Removes an entry that was created by PVRDebugFSCreateEntry().
+@Input ppsDebugFSEntry Pointer representing the entry to be removed.
+ Has to be double pointer to avoid possible races
+ and use-after-free situations.
+@Return void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveEntry(PVR_DEBUGFS_ENTRY_DATA **ppsDebugFSEntry)
+{
+ _UnrefAndMaybeDestroyDebugFSEntry(ppsDebugFSEntry);
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSCreateStatisticEntry
+@Description Create a statistic entry in the specified directory.
+@Input pszName String containing the name for the entry.
+@Input psDir Pointer from PVRDebugFSCreateEntryDir()
+ representing the directory in which to create
+ the entry or NULL for the root directory.
+@Input pfnStatsPrint A callback function used to print all the
+ statistics when reading from the statistic
+ entry.
+@Input pfnIncStatMemRefCount A callback function used take a
+ reference on the memory backing the
+ statistic.
+@Input pfnDecStatMemRefCount A callback function used drop a
+ reference on the memory backing the
+ statistic.
+@Input pvData Private data to be passed to the provided
+ callback function.
+
+@Return PVR_DEBUGFS_DRIVER_STAT* On success, a pointer representing
+ the newly created statistic entry.
+ Otherwise, NULL.
+*/ /**************************************************************************/
+PVR_DEBUGFS_DRIVER_STAT *PVRDebugFSCreateStatisticEntry(const char *pszName,
+ PVR_DEBUGFS_DIR_DATA *psDir,
+ OS_STATS_PRINT_FUNC *pfnStatsPrint,
+ PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnIncStatMemRefCount,
+ PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnDecStatMemRefCount,
+ void *pvData)
+{
+ PVR_DEBUGFS_DRIVER_STAT *psStatData;
+ PVR_DEBUGFS_ENTRY_DATA * psDebugFSEntry;
+
+ int iResult;
+
+ if (pszName == NULL || pfnStatsPrint == NULL)
+ {
+ return NULL;
+ }
+ if ((pfnIncStatMemRefCount != NULL || pfnDecStatMemRefCount != NULL) && pvData == NULL)
+ {
+ return NULL;
+ }
+
+ psStatData = OSAllocZMemNoStats(sizeof(*psStatData));
+ if (psStatData == NULL)
+ {
+ return NULL;
+ }
+
+ psStatData->pvData = pvData;
+ psStatData->pfnStatsPrint = pfnStatsPrint;
+ psStatData->pfnIncStatMemRefCount = pfnIncStatMemRefCount;
+ psStatData->pfnDecStatMemRefCount = pfnDecStatMemRefCount;
+ psStatData->ui32RefCount = 1;
+
+ iResult = PVRDebugFSCreateEntry(pszName,
+ psDir,
+ &gsDebugFSStatisticReadOps,
+ NULL,
+ (PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *) _RefStatEntry,
+ (PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *) _UnrefAndMaybeDestroyStatEntry,
+ psStatData,
+ &psDebugFSEntry);
+ if (iResult != 0)
+ {
+ OSFreeMemNoStats(psStatData);
+ return NULL;
+ }
+ psStatData->pvDebugFSEntry = (void*)psDebugFSEntry;
+
+ if (pfnIncStatMemRefCount)
+ {
+ /* call function to take reference on the memory holding the stat */
+ psStatData->pfnIncStatMemRefCount((void*)psStatData->pvData);
+ }
+
+ psDebugFSEntry->ui32RefCount = 1;
+
+ return psStatData;
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSRemoveStatisticEntry
+@Description Removes a statistic entry that was created by
+ PVRDebugFSCreateStatisticEntry().
+@Input psStatEntry Pointer representing the statistic entry to be
+ removed.
+@Return void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveStatisticEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+ PVR_ASSERT(psStatEntry != NULL);
+ /* drop reference on pvStatEntry*/
+ _UnrefAndMaybeDestroyStatEntry(psStatEntry);
+}
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+static void *_DebugFSRawStatisticSeqStart(struct seq_file *psSeqFile,
+ loff_t *puiPosition)
+{
+ PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+ (PVR_DEBUGFS_RAW_DRIVER_STAT *) psSeqFile->private;
+
+ if (psStatData)
+ {
+ if (*puiPosition == 0)
+ {
+ return psStatData;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+ " NULL", __func__));
+ }
+
+ return NULL;
+}
+
+static void _DebugFSRawStatisticSeqStop(struct seq_file *psSeqFile,
+ void *pvData)
+{
+ PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+ (PVR_DEBUGFS_RAW_DRIVER_STAT *) psSeqFile->private;
+
+ if (!psStatData)
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+ " NULL", __func__));
+ }
+}
+
+static void *_DebugFSRawStatisticSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+ (PVR_DEBUGFS_RAW_DRIVER_STAT *) psSeqFile->private;
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ if (!psStatData)
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+ " NULL", __func__));
+ }
+
+ return NULL;
+}
+
+static int _DebugFSRawStatisticSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+ (PVR_DEBUGFS_RAW_DRIVER_STAT *) pvData;
+
+ if (psStatData != NULL)
+ {
+ psStatData->pfStatsPrint((void *) psSeqFile, NULL,
+ _StatsSeqPrintf);
+ return 0;
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+ " NULL, returning -ENODATA(%d)", __FUNCTION__, -ENODATA));
+ }
+
+ return -ENODATA;
+}
+
+static struct seq_operations gsDebugFSRawStatisticReadOps =
+{
+ .start = _DebugFSRawStatisticSeqStart,
+ .stop = _DebugFSRawStatisticSeqStop,
+ .next = _DebugFSRawStatisticSeqNext,
+ .show = _DebugFSRawStatisticSeqShow,
+};
+
+PVR_DEBUGFS_RAW_DRIVER_STAT *PVRDebugFSCreateRawStatisticEntry(
+ const IMG_CHAR *pszFileName,
+ void *pvParentDir,
+ OS_STATS_PRINT_FUNC *pfStatsPrint)
+{
+ PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData;
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFsEntry;
+
+ int iResult;
+
+ if (pszFileName == NULL || pfStatsPrint == NULL)
+ {
+ return NULL;
+ }
+
+ psStatData = OSAllocZMemNoStats(sizeof(*psStatData));
+ if (psStatData == NULL)
+ {
+ return NULL;
+ }
+
+ psStatData->pfStatsPrint = pfStatsPrint;
+
+ PVR_ASSERT((pvParentDir == NULL));
+
+ iResult = PVRDebugFSCreateEntry(pszFileName,
+ pvParentDir,
+ &gsDebugFSRawStatisticReadOps,
+ NULL,
+ NULL,
+ NULL,
+ psStatData,
+ &psDebugFsEntry);
+ if (iResult != 0)
+ {
+ OSFreeMemNoStats(psStatData);
+ return NULL;
+ }
+ psStatData->pvDebugFsEntry = (void *) psDebugFsEntry;
+
+ psDebugFsEntry->ui32RefCount = 1;
+
+ return psStatData;
+}
+
+void PVRDebugFSRemoveRawStatisticEntry(PVR_DEBUGFS_RAW_DRIVER_STAT *psStatEntry)
+{
+ PVR_ASSERT(psStatEntry != NULL);
+
+ PVRDebugFSRemoveEntry(&psStatEntry->pvDebugFsEntry);
+ OSFreeMemNoStats(psStatEntry);
+}
+#endif
+
+static IMG_BOOL _RefDirEntry(PVR_DEBUGFS_DIR_DATA *psDirEntry)
+{
+ IMG_BOOL bStatus = IMG_FALSE;
+
+ PVR_ASSERT(psDirEntry != NULL && psDirEntry->psDir != NULL);
+
+ mutex_lock(&gDebugFSLock);
+
+ if (psDirEntry->ui32RefCount > 0)
+ {
+ /* Increment refCount */
+ psDirEntry->ui32RefCount++;
+ bStatus = IMG_TRUE;
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to ref psDirEntry '%s'"
+ " when ui32RefCount is zero", __FUNCTION__,
+ psDirEntry->psDir->d_iname));
+ }
+
+ mutex_unlock(&gDebugFSLock);
+
+ return bStatus;
+}
+
+static void _UnrefAndMaybeDestroyDirEntryWhileLocked(PVR_DEBUGFS_DIR_DATA **ppsDirEntry)
+{
+ PVR_DEBUGFS_DIR_DATA *psDirEntry = *ppsDirEntry;
+
+ PVR_ASSERT(psDirEntry != NULL && psDirEntry->psDir != NULL);
+
+ if (psDirEntry->ui32RefCount > 0)
+ {
+ /* Decrement refCount and free if now zero */
+ if (--psDirEntry->ui32RefCount == 0)
+ {
+ /* if parent directory is not gpsPVRDebugFSEntryDir, decrement its refCount */
+ debugfs_remove(psDirEntry->psDir);
+ if (psDirEntry->psParentDir)
+ {
+ _UnrefAndMaybeDestroyDirEntryWhileLocked(&psDirEntry->psParentDir);
+ }
+ OSFreeMemNoStats(psDirEntry);
+ *ppsDirEntry = NULL;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psDirEntry '%s'"
+ " when ui32RefCount is zero", __FUNCTION__,
+ psDirEntry->psDir->d_iname));
+ }
+}
+
+static inline void _UnrefAndMaybeDestroyDirEntry(PVR_DEBUGFS_DIR_DATA **ppsDirEntry)
+{
+ mutex_lock(&gDebugFSLock);
+ _UnrefAndMaybeDestroyDirEntryWhileLocked(ppsDirEntry);
+ mutex_unlock(&gDebugFSLock);
+}
+
+static IMG_BOOL _RefDebugFSEntryNoLock(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry)
+{
+ IMG_BOOL bResult = IMG_FALSE;
+
+ PVR_ASSERT(psDebugFSEntry != NULL);
+
+ bResult = (psDebugFSEntry->ui32RefCount > 0);
+ if (bResult)
+ {
+ /* Increment refCount of psDebugFSEntry */
+ psDebugFSEntry->ui32RefCount++;
+ }
+
+ return bResult;
+}
+
+static void _UnrefAndMaybeDestroyDebugFSEntry(PVR_DEBUGFS_ENTRY_DATA **ppsDebugFSEntry)
+{
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+
+ mutex_lock(&gDebugFSLock);
+ /* Decrement refCount of psDebugFSEntry, and free if now zero */
+ psDebugFSEntry = *ppsDebugFSEntry;
+ PVR_ASSERT(psDebugFSEntry != NULL);
+
+ if (psDebugFSEntry->ui32RefCount > 0)
+ {
+ if (--psDebugFSEntry->ui32RefCount == 0)
+ {
+ struct dentry *psEntry = psDebugFSEntry->psEntry;
+
+ if (psEntry)
+ {
+ /* Free any private data that was provided to debugfs_create_file() */
+ if (psEntry->d_inode->i_private != NULL)
+ {
+ PVR_DEBUGFS_PRIV_DATA *psPrivData = (PVR_DEBUGFS_PRIV_DATA*)psDebugFSEntry->psEntry->d_inode->i_private;
+
+ psPrivData->bValid = IMG_FALSE;
+ psPrivData->psDebugFSEntry = NULL;
+ OSFreeMemNoStats(psEntry->d_inode->i_private);
+ psEntry->d_inode->i_private = NULL;
+ }
+ debugfs_remove(psEntry);
+ }
+ /* decrement refcount of parent directory */
+ if (psDebugFSEntry->psParentDir)
+ {
+ _UnrefAndMaybeDestroyDirEntryWhileLocked(&psDebugFSEntry->psParentDir);
+ }
+
+ /* now free the memory allocated for psDebugFSEntry */
+ OSFreeMemNoStats(psDebugFSEntry);
+ *ppsDebugFSEntry = NULL;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psDebugFSEntry '%s' when ui32RefCount is zero", __FUNCTION__, psDebugFSEntry->psEntry->d_iname));
+ }
+
+ mutex_unlock(&gDebugFSLock);
+}
+
+static IMG_BOOL _RefStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+ IMG_BOOL bResult = IMG_FALSE;
+
+ PVR_ASSERT(psStatEntry != NULL);
+
+ mutex_lock(&gDebugFSLock);
+
+ bResult = (psStatEntry->ui32RefCount > 0);
+ if (bResult)
+ {
+ /* Increment refCount of psStatEntry */
+ psStatEntry->ui32RefCount++;
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to ref psStatEntry '%s' when ui32RefCount is zero", __FUNCTION__, psStatEntry->pvDebugFSEntry->psEntry->d_iname));
+ }
+
+ mutex_unlock(&gDebugFSLock);
+
+ return bResult;
+}
+
+static IMG_BOOL _UnrefAndMaybeDestroyStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+ IMG_BOOL bResult;
+
+ PVR_ASSERT(psStatEntry != NULL);
+
+ mutex_lock(&gDebugFSLock);
+
+ bResult = (psStatEntry->ui32RefCount > 0);
+
+ if (bResult)
+ {
+ /* Decrement refCount of psStatData, and free if now zero */
+ if (--psStatEntry->ui32RefCount == 0)
+ {
+ mutex_unlock(&gDebugFSLock);
+
+ if (psStatEntry->pvDebugFSEntry)
+ {
+ _UnrefAndMaybeDestroyDebugFSEntry((PVR_DEBUGFS_ENTRY_DATA**)&psStatEntry->pvDebugFSEntry);
+ }
+ if (psStatEntry->pfnDecStatMemRefCount)
+ {
+ /* call function to drop reference on the memory holding the stat */
+ psStatEntry->pfnDecStatMemRefCount((void*)psStatEntry->pvData);
+ }
+ OSFreeMemNoStats(psStatEntry);
+ }
+ else
+ {
+ mutex_unlock(&gDebugFSLock);
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psStatEntry '%s' when ui32RefCount is zero", __FUNCTION__, psStatEntry->pvDebugFSEntry->psEntry->d_iname));
+ mutex_unlock(&gDebugFSLock);
+ }
+
+ return bResult;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Functions for creating debugfs directories and entries.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DEBUGFS_H__)
+#define __PVR_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "img_types.h"
+#include "osfunc.h"
+
+typedef ssize_t (PVRSRV_ENTRY_WRITE_FUNC)(const char __user *pszBuffer,
+ size_t uiCount,
+ loff_t uiPosition,
+ void *pvData);
+
+
+typedef IMG_UINT32 (PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+typedef IMG_UINT32 (PVRSRV_DEC_STAT_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+
+typedef IMG_UINT32 (PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN)(void *pvData);
+typedef IMG_UINT32 (PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN)(void *pvData);
+
+typedef struct _PVR_DEBUGFS_DIR_DATA_ PVR_DEBUGFS_DIR_DATA;
+typedef struct _PVR_DEBUGFS_ENTRY_DATA_ PVR_DEBUGFS_ENTRY_DATA;
+typedef struct _PVR_DEBUGFS_DRIVER_STAT_ PVR_DEBUGFS_DRIVER_STAT;
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+typedef struct _PVR_DEBUGFS_RAW_DRIVER_STAT_ PVR_DEBUGFS_RAW_DRIVER_STAT;
+#endif
+
+int PVRDebugFSInit(void);
+void PVRDebugFSDeInit(void);
+
+int PVRDebugFSCreateEntryDir(IMG_CHAR *pszName,
+ PVR_DEBUGFS_DIR_DATA *psParentDir,
+ PVR_DEBUGFS_DIR_DATA **ppsNewDir);
+
+void PVRDebugFSRemoveEntryDir(PVR_DEBUGFS_DIR_DATA **ppsDir);
+
+int PVRDebugFSCreateEntry(const char *pszName,
+ PVR_DEBUGFS_DIR_DATA *psParentDir,
+ const struct seq_operations *psReadOps,
+ PVRSRV_ENTRY_WRITE_FUNC *pfnWrite,
+ PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *pfnIncPvDataRefCnt,
+ PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *pfnDecPvDataRefCnt,
+ void *pvData,
+ PVR_DEBUGFS_ENTRY_DATA **ppsNewEntry);
+
+void PVRDebugFSRemoveEntry(PVR_DEBUGFS_ENTRY_DATA **ppsDebugFSEntry);
+
+PVR_DEBUGFS_DRIVER_STAT *PVRDebugFSCreateStatisticEntry(const char *pszName,
+ PVR_DEBUGFS_DIR_DATA *psDir,
+ OS_STATS_PRINT_FUNC *pfnStatsPrint,
+ PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnIncStatMemRefCount,
+ PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnDecStatMemRefCount,
+ void *pvData);
+
+void PVRDebugFSRemoveStatisticEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry);
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+PVR_DEBUGFS_RAW_DRIVER_STAT *PVRDebugFSCreateRawStatisticEntry(
+ const IMG_CHAR *pszFileName,
+ void *pvParentDir,
+ OS_STATS_PRINT_FUNC* pfnStatsPrint);
+
+void PVRDebugFSRemoveRawStatisticEntry(PVR_DEBUGFS_RAW_DRIVER_STAT *psStatEntry);
+#endif
+
+#endif /* !defined(__PVR_DEBUGFS_H__) */
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PVR DRM definitions shared between kernel and user space.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRM_H__)
+#define __PVR_DRM_H__
+
+#include <linux/types.h>
+
+#if defined(__KERNEL__)
+#include <drm/drm.h>
+#elif defined(SUPPORT_ANDROID_PLATFORM) && \
+ !defined(PVR_ANDROID_OLD_LIBDRM_HEADER_PATH)
+#include <drm.h>
+#else
+#include <libdrm/drm.h>
+#endif
+
+/*
+ * IMPORTANT:
+ * All structures below are designed to be the same size when compiled for 32
+ * and/or 64 bit architectures, i.e. there should be no compiler inserted
+ * padding. This is achieved by sticking to the following rules:
+ * 1) only use fixed width types
+ * 2) always naturally align fields by arranging them appropriately and by using
+ * padding fields when necessary
+ *
+ * These rules should _always_ be followed when modifying or adding new
+ * structures to this file.
+ */
+
+struct drm_pvr_srvkm_cmd {
+ __u32 bridge_id;
+ __u32 bridge_func_id;
+ __u64 in_data_ptr;
+ __u64 out_data_ptr;
+ __u32 in_data_size;
+ __u32 out_data_size;
+};
+
+struct drm_pvr_dbgdrv_cmd {
+ __u32 cmd;
+ __u32 pad;
+ __u64 in_data_ptr;
+ __u64 out_data_ptr;
+ __u32 in_data_size;
+ __u32 out_data_size;
+};
+
+/*
+ * DRM command numbers, relative to DRM_COMMAND_BASE.
+ * These defines must be prefixed with "DRM_".
+ */
+#define DRM_PVR_SRVKM_CMD 0 /* Used for PVR Services ioctls */
+#define DRM_PVR_DBGDRV_CMD 1 /* Debug driver (PDUMP) ioctls */
+
+
+/* These defines must be prefixed with "DRM_IOCTL_". */
+#define DRM_IOCTL_PVR_SRVKM_CMD DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM_CMD, struct drm_pvr_srvkm_cmd)
+#define DRM_IOCTL_PVR_DBGDRV_CMD DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_DBGDRV_CMD, struct drm_pvr_dbgdrv_cmd)
+
+#endif /* defined(__PVR_DRM_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File pvr_dvfs.h
+@Title System level interface for DVFS
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_DVFS_H_
+#define _PVR_DVFS_H_
+
+#if defined(PVR_DVFS)
+#include <linux/devfreq.h>
+#endif
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "lock.h"
+
+typedef void (*PFN_SYS_DEV_DVFS_SET_FREQUENCY)(IMG_UINT32 ui32Freq);
+typedef void (*PFN_SYS_DEV_DVFS_SET_VOLTAGE)(IMG_UINT32 ui32Volt);
+
+typedef struct _IMG_OPP_
+{
+ IMG_UINT32 ui32Volt;
+ /*
+ * Unit of frequency in Hz.
+ */
+ IMG_UINT32 ui32Freq;
+} IMG_OPP;
+
+typedef struct _IMG_DVFS_DEVICE_CFG_
+{
+ const IMG_OPP *pasOPPTable;
+ IMG_UINT32 ui32OPPTableSize;
+#if defined(PVR_DVFS)
+ IMG_UINT32 ui32PollMs;
+#endif
+ IMG_BOOL bIdleReq;
+ PFN_SYS_DEV_DVFS_SET_FREQUENCY pfnSetFrequency;
+ PFN_SYS_DEV_DVFS_SET_VOLTAGE pfnSetVoltage;
+
+#if defined(CONFIG_DEVFREQ_THERMAL) && defined(PVR_DVFS)
+ struct devfreq_cooling_power *psPowerOps;
+#endif
+
+} IMG_DVFS_DEVICE_CFG;
+
+#if defined(PVR_DVFS)
+typedef struct _IMG_DVFS_GOVERNOR_
+{
+ IMG_BOOL bEnabled;
+} IMG_DVFS_GOVERNOR;
+
+typedef struct _IMG_DVFS_GOVERNOR_CFG_
+{
+ IMG_UINT32 ui32UpThreshold;
+ IMG_UINT32 ui32DownDifferential;
+} IMG_DVFS_GOVERNOR_CFG;
+#endif
+
+#if defined(__linux__)
+#if defined(PVR_DVFS)
+typedef struct _IMG_DVFS_DEVICE_
+{
+ struct dev_pm_opp *psOPP;
+ struct devfreq *psDevFreq;
+ IMG_BOOL bEnabled;
+ IMG_HANDLE hGpuUtilUserDVFS;
+ struct devfreq_simple_ondemand_data data;
+#if defined(CONFIG_DEVFREQ_THERMAL)
+ struct thermal_cooling_device *psDevfreqCoolingDevice;
+#endif
+} IMG_DVFS_DEVICE;
+#endif
+
+#if defined(SUPPORT_PDVFS)
+typedef struct _PDVFS_DATA_
+{
+ IMG_HANDLE hReactiveTimer;
+ IMG_BOOL bWorkInFrame;
+} PDVFS_DATA;
+#endif
+
+typedef struct _IMG_DVFS_
+{
+#if defined(PVR_DVFS)
+ IMG_DVFS_DEVICE sDVFSDevice;
+ IMG_DVFS_GOVERNOR sDVFSGovernor;
+ IMG_DVFS_GOVERNOR_CFG sDVFSGovernorCfg;
+#endif
+#if defined(SUPPORT_PDVFS)
+ PDVFS_DATA sPDVFSData;
+#endif
+ IMG_DVFS_DEVICE_CFG sDVFSDeviceCfg;
+} PVRSRV_DVFS;
+#endif/* (__linux__) */
+
+#endif /* _PVR_DVFS_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title PowerVR devfreq device implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Linux module setup
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/devfreq.h>
+#if defined(CONFIG_DEVFREQ_THERMAL)
+#include <linux/devfreq_cooling.h>
+#include <rk_init_v2.h>
+#endif
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
+#include <linux/pm_opp.h>
+#define OPP_GET_OPP_COUNT dev_pm_opp_get_opp_count
+#define OPP_GET_FREQ dev_pm_opp_get_freq
+#define OPP_GET_VOLTAGE dev_pm_opp_get_voltage
+#define OPP_ADD dev_pm_opp_add
+#define OPP_FIND_FREQ_CEIL dev_pm_opp_find_freq_ceil
+#define OPP_FIND_FREQ_FLOOR dev_pm_opp_find_freq_floor
+#define OPP_STRUCT dev_pm_opp
+#else
+#include <linux/opp.h>
+#define OPP_GET_OPP_COUNT opp_get_opp_count
+#define OPP_GET_FREQ opp_get_freq
+#define OPP_GET_VOLTAGE opp_get_voltage
+#define OPP_ADD opp_add
+#define OPP_FIND_FREQ_CEIL opp_find_freq_ceil
+#define OPP_FIND_FREQ_FLOOR opp_find_freq_floor
+#define OPP_STRUCT opp
+#endif
+
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#include "rgxdevice.h"
+#include "rgxinit.h"
+#include "pvr_dvfs_device.h"
+#include "power.h"
+
+#include <linux/device.h>
+
+#if 0
+#define dev_pm_opp_of_add_table of_init_opp_table
+#define dev_pm_opp_of_remove_table of_free_opp_table
+#endif
+
+
+static PVRSRV_DEVICE_NODE* gpsDeviceNode = NULL;
+
+static IMG_INT32 devfreq_target(struct device *dev, long unsigned *requested_freq, IMG_UINT32 flags)
+{
+ RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData;
+ IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo = psRGXData->psRGXTimingInfo;
+ IMG_UINT32 ui32Freq, ui32CurFreq, ui32Volt, ui32CurVolt;
+ struct OPP_STRUCT *opp;
+
+ if (!psDVFSDevice->bEnabled)
+ {
+ return 0;
+ }
+
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, requested_freq, flags);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ PVR_DPF((PVR_DBG_ERROR, "Invalid OPP"));
+ return PTR_ERR(opp);
+ }
+
+ ui32Freq = OPP_GET_FREQ(opp);
+ ui32Volt = OPP_GET_VOLTAGE(opp);
+ rcu_read_unlock();
+
+ ui32CurFreq = psRGXTimingInfo->ui32CoreClockSpeed;
+ ui32CurVolt = psRGXTimingInfo->ui32CoreVoltage;
+
+ if (ui32CurFreq == ui32Freq)
+ {
+ if(ui32CurVolt == ui32Volt)
+ return 0;
+ psDVFSDeviceCfg->pfnSetVoltage(ui32Volt);
+ psRGXTimingInfo->ui32CoreVoltage = ui32Volt;
+ return 0;
+ }
+
+ if (PVRSRV_OK != PVRSRVDevicePreClockSpeedChange(gpsDeviceNode,
+ psDVFSDeviceCfg->bIdleReq,
+ NULL))
+ {
+ dev_err(dev, "PVRSRVDevicePreClockSpeedChange failed\n");
+ return -EPERM;
+ }
+
+ /* Increasing frequency, change voltage first */
+ if (ui32Freq > ui32CurFreq)
+ {
+ psDVFSDeviceCfg->pfnSetVoltage(ui32Volt);
+ }
+
+ psDVFSDeviceCfg->pfnSetFrequency(ui32Freq);
+
+ /* Decreasing frequency, change frequency first */
+ if (ui32Freq < ui32CurFreq)
+ {
+ psDVFSDeviceCfg->pfnSetVoltage(ui32Volt);
+ }
+
+ psRGXTimingInfo->ui32CoreClockSpeed = ui32Freq;
+ psRGXTimingInfo->ui32CoreVoltage = ui32Volt;
+
+ PVRSRVDevicePostClockSpeedChange(gpsDeviceNode, psDVFSDeviceCfg->bIdleReq,
+ NULL);
+
+ return 0;
+}
+
+static int devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = gpsDeviceNode->pvDevice;
+ IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+ RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo = psRGXData->psRGXTimingInfo;
+ RGXFWIF_GPU_UTIL_STATS sGpuUtilStats;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ stat->current_frequency = psRGXTimingInfo->ui32CoreClockSpeed;
+
+ if (psDevInfo->pfnGetGpuUtilStats == NULL)
+ {
+ /* Not yet ready. So set times to something sensible. */
+ stat->busy_time = 0;
+ stat->total_time = 0;
+ return 0;
+ }
+
+ eError = psDevInfo->pfnGetGpuUtilStats(psDevInfo->psDeviceNode,
+ psDVFSDevice->hGpuUtilUserDVFS,
+ &sGpuUtilStats);
+
+ if (eError != PVRSRV_OK)
+ {
+ return -EAGAIN;
+ }
+
+ stat->busy_time = sGpuUtilStats.ui64GpuStatActiveHigh + sGpuUtilStats.ui64GpuStatActiveLow;
+ stat->total_time = sGpuUtilStats.ui64GpuStatCumulative;
+
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+static IMG_INT32 devfreq_cur_freq(struct device *dev, unsigned long *freq)
+{
+ RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData;
+
+ *freq = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+ return 0;
+}
+#endif
+
+static struct devfreq_dev_profile img_devfreq_dev_profile = {
+ .target = devfreq_target,
+ .get_dev_status = devfreq_get_dev_status,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ .get_cur_freq = devfreq_cur_freq,
+#endif
+};
+
+static int FillOPPTable(struct device *dev)
+{
+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg =
+ &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+ const IMG_OPP *iopp;
+ int i, err = 0;
+
+ for (i = 0, iopp = psDVFSDeviceCfg->pasOPPTable;
+ i < psDVFSDeviceCfg->ui32OPPTableSize;
+ i++, iopp++)
+ {
+ err = OPP_ADD(dev, iopp->ui32Freq, iopp->ui32Volt);
+ if (err) {
+ dev_err(dev, "Could not add OPP entry, %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int GetOPPValues(struct device *dev,
+ unsigned long *min_freq,
+ unsigned long *min_volt,
+ unsigned long *max_freq)
+{
+ struct OPP_STRUCT *opp;
+ int count, i, err = 0;
+ unsigned long freq;
+
+ /* ChromiumOS kernels are carrying a fix which changes the type of
+ * freq_table in struct devfreq_dev_profile to 'unsigned long'.
+ * However, this change has not been merged upstream, so we need
+ * to support using the older 'unsigned int' type too.
+ */
+#if defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
+ unsigned long *freq_table;
+#else
+ unsigned int *freq_table;
+#endif
+
+ /* Start RCU read-side critical section to access device opp_list. */
+ rcu_read_lock();
+ count = OPP_GET_OPP_COUNT(dev);
+ if (count < 0) {
+ dev_err(dev, "Could not fetch OPP count, %d\n", count);
+ rcu_read_unlock();
+ return count;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+ freq_table = devm_kcalloc(dev, count, sizeof(*freq_table), GFP_ATOMIC);
+#else
+ freq_table = kcalloc(count, sizeof(*freq_table), GFP_ATOMIC);
+#endif
+
+ if (!freq_table) {
+ rcu_read_unlock();
+ return -ENOMEM;
+ }
+
+ /*
+ * Iterate over OPP table.
+ * Iteration 0 finds "opp w/ freq >= 0 Hz".
+ */
+ freq = 0;
+ opp = OPP_FIND_FREQ_CEIL(dev, &freq);
+ if (IS_ERR(opp)) {
+ err = PTR_ERR(opp);
+ dev_err(dev, "Couldn't find lowest frequency, %d\n", err);
+ goto exit;
+ }
+
+ freq_table[0] = freq;
+ *min_freq = freq;
+ *min_volt = OPP_GET_VOLTAGE(opp);
+ dev_info(dev, "opp[%d/%d]: (%lu Hz, %lu uV)\n", 1, count,
+ freq, *min_volt);
+
+ /*
+ * Iteration i > 0 finds "opp w/ freq >= (opp[i-1].freq + 1)".
+ */
+ for (i = 1; i < count; i++) {
+ freq++;
+ opp = OPP_FIND_FREQ_CEIL(dev, &freq);
+ if (IS_ERR(opp)) {
+ err = PTR_ERR(opp);
+ dev_err(dev, "Couldn't find %dth frequency, %d\n", i, err);
+ goto exit;
+ }
+
+ freq_table[i] = freq;
+ *max_freq = freq;
+ dev_info(dev, "opp[%d/%d]: (%lu Hz, %lu uV)\n", i + 1, count,
+ freq, OPP_GET_VOLTAGE(opp));
+ }
+
+exit:
+
+ rcu_read_unlock();
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (!err)
+ {
+ img_devfreq_dev_profile.freq_table = freq_table;
+ img_devfreq_dev_profile.max_state = count;
+ }
+ else
+#endif
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+ devm_kfree(dev, freq_table);
+#else
+ kfree(freq_table);
+#endif
+ }
+
+ return err;
+}
+
+#if defined(CONFIG_DEVFREQ_THERMAL)
+static int RegisterCoolingDevice(struct device *dev,
+ IMG_DVFS_DEVICE *psDVFSDevice,
+ struct devfreq_cooling_power *powerOps)
+{
+ struct device_node *of_node;
+ int err = 0;
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* Not supported in GuestOS drivers */
+ PVR_UNREFERENCED_PARAMETER(dev);
+ PVR_UNREFERENCED_PARAMETER(psDVFSDevice);
+ PVR_UNREFERENCED_PARAMETER(powerOps);
+ PVR_UNREFERENCED_PARAMETER(of_node);
+#else
+ if (!powerOps)
+ {
+ dev_info(dev, "Cooling: power ops not registered, not enabling cooling");
+ return 0;
+ }
+
+ err = rk_power_model_simple_init(dev);
+ if (err && err != -ENODEV && err != -EPROBE_DEFER) {
+ dev_err(dev,
+ "Failed to initialize simple power model (%d)\n",
+ err);
+ err = 0;
+ //return err;
+ }
+
+ of_node = of_node_get(dev->of_node);
+
+ psDVFSDevice->psDevfreqCoolingDevice = of_devfreq_cooling_register_power(
+ of_node, psDVFSDevice->psDevFreq, powerOps);
+
+ if (IS_ERR(psDVFSDevice->psDevfreqCoolingDevice))
+ {
+ err = PTR_ERR(psDVFSDevice->psDevfreqCoolingDevice);
+ dev_err(dev, "Failed to register as devfreq cooling device %d", err);
+ }
+
+ of_node_put(of_node);
+#endif
+
+ return err;
+}
+#endif
+
+#define TO_IMG_ERR(err) ((err == -EPROBE_DEFER) ? PVRSRV_ERROR_PROBE_DEFER : PVRSRV_ERROR_INIT_FAILURE)
+
+PVRSRV_ERROR InitDVFS(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_DVFS_DEVICE *psDVFSDevice = NULL;
+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL;
+ IMG_DVFS_GOVERNOR_CFG *psDVFSGovernorCfg = NULL;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL;
+ struct device *psDev = psDeviceNode->psDevConfig->pvOSDevice;
+ unsigned long min_freq = 0, max_freq = 0, min_volt = 0;
+ PVRSRV_ERROR eError;
+ int err;
+
+ if (gpsDeviceNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "DVFS already initialised for device node %p",
+ gpsDeviceNode));
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+ gpsDeviceNode = psDeviceNode;
+ psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+ psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+ psDVFSGovernorCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSGovernorCfg;
+ psRGXTimingInfo = ((RGX_DATA *)psDeviceNode->psDevConfig->hDevData)->psRGXTimingInfo;
+
+ eError = RGXRegisterGpuUtilStats(&psDVFSDevice->hGpuUtilUserDVFS);
+ if (eError != PVRSRV_OK) {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to register to the GPU utilisation stats, %d", eError));
+ return eError;
+ }
+
+
+#if defined(CONFIG_OF) && defined(CONFIG_PM_OPP)
+ /* Register the OPPs if they are available in device tree */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) \
+ || defined(LSK_OPPV2_BACKPORT)
+ err = dev_pm_opp_of_add_table(psDev);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ err = of_init_opp_table(psDev);
+#else
+ err = 0;
+#endif /* LINUX_VERSION_CODE */
+#endif
+ if (err) {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to init opp table from devicetree, %d", err));
+ eError = TO_IMG_ERR(err);
+ goto err_exit;
+ }
+
+ if (psDVFSDeviceCfg->pasOPPTable) {
+ err = FillOPPTable(psDev);
+ if (err) {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to fill OPP table with data, %d", err));
+ eError = TO_IMG_ERR(err);
+ goto err_exit;
+ }
+ }
+
+ err = GetOPPValues(psDev, &min_freq, &min_volt, &max_freq);
+ if (err) {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to read OPP points, %d", err));
+ eError = TO_IMG_ERR(err);
+ goto err_exit;
+ }
+
+ img_devfreq_dev_profile.initial_freq = min_freq;
+ img_devfreq_dev_profile.polling_ms = psDVFSDeviceCfg->ui32PollMs;
+
+ psRGXTimingInfo->ui32CoreClockSpeed = min_freq;
+
+ psDVFSDeviceCfg->pfnSetFrequency(min_freq);
+ psDVFSDeviceCfg->pfnSetVoltage(min_volt);
+
+ psDVFSDevice->data.upthreshold = psDVFSGovernorCfg->ui32UpThreshold;
+ psDVFSDevice->data.downdifferential = psDVFSGovernorCfg->ui32DownDifferential;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+ psDVFSDevice->psDevFreq = devm_devfreq_add_device(psDev,
+ &img_devfreq_dev_profile, "simple_ondemand",
+ &psDVFSDevice->data);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+ psDVFSDevice->psDevFreq = devfreq_add_device(psDev,
+ &img_devfreq_dev_profile, "simple_ondemand",
+ &psDVFSDevice->data);
+#else
+ psDVFSDevice->psDevFreq = devfreq_add_device(psDev,
+ &img_devfreq_dev_profile, &devfreq_simple_ondemand,
+ &psDVFSDevice->data);
+#endif
+
+ if (IS_ERR(psDVFSDevice->psDevFreq))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to add as devfreq device %p, %ld",
+ psDVFSDevice->psDevFreq, PTR_ERR(psDVFSDevice->psDevFreq)));
+ eError = TO_IMG_ERR(PTR_ERR(psDVFSDevice->psDevFreq));
+ goto err_exit;
+ }
+
+ eError = SuspendDVFS();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInit: Failed to suspend DVFS"));
+ eError = eError;
+ goto err_exit;
+ }
+
+ psDVFSDevice->psDevFreq->min_freq = min_freq;
+ psDVFSDevice->psDevFreq->max_freq = max_freq;
+
+ err = devfreq_register_opp_notifier(psDev, psDVFSDevice->psDevFreq);
+ if (err) {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to register opp notifier, %d", err));
+ eError = TO_IMG_ERR(err);
+ goto err_exit;
+ }
+
+#if defined(CONFIG_DEVFREQ_THERMAL)
+ err = RegisterCoolingDevice(psDev, psDVFSDevice,
+ psDVFSDeviceCfg->psPowerOps);
+ if (err) {
+ eError = TO_IMG_ERR(err);
+ goto err_exit;
+ }
+#endif
+
+ PVR_TRACE(("PVR DVFS activated: %lu-%lu Hz, Period: %ums", min_freq,
+ max_freq, psDVFSDeviceCfg->ui32PollMs));
+
+ return PVRSRV_OK;
+
+err_exit:
+ DeinitDVFS(psDeviceNode);
+ return eError;
+}
+
+void DeinitDVFS(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_DVFS_DEVICE *psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+ struct device *psDev = psDeviceNode->psDevConfig->pvOSDevice;
+ IMG_INT32 i32Error;
+
+ PVR_ASSERT(psDeviceNode == gpsDeviceNode);
+
+ if (!psDVFSDevice)
+ return;
+
+#if defined(CONFIG_DEVFREQ_THERMAL)
+ if (!IS_ERR_OR_NULL(psDVFSDevice->psDevfreqCoolingDevice))
+ {
+ devfreq_cooling_unregister(psDVFSDevice->psDevfreqCoolingDevice);
+ psDVFSDevice->psDevfreqCoolingDevice = NULL;
+ }
+#endif
+
+ if (psDVFSDevice->psDevFreq)
+ {
+ i32Error = devfreq_unregister_opp_notifier(psDev, psDVFSDevice->psDevFreq);
+ if (i32Error < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to unregister OPP notifier"));
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
+ devfreq_remove_device(psDVFSDevice->psDevFreq);
+#endif
+
+ psDVFSDevice->psDevFreq = NULL;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ kfree(img_devfreq_dev_profile.freq_table);
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ dev_pm_opp_of_remove_table(psDev);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ of_free_opp_table(psDev);
+#endif
+
+ RGXUnregisterGpuUtilStats(psDVFSDevice->hGpuUtilUserDVFS);
+ psDVFSDevice->hGpuUtilUserDVFS = NULL;
+
+ gpsDeviceNode = NULL;
+}
+
+PVRSRV_ERROR SuspendDVFS(void)
+{
+ IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+
+ psDVFSDevice->bEnabled = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR ResumeDVFS(void)
+{
+ IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* Not supported in GuestOS drivers */
+ psDVFSDevice->bEnabled = IMG_FALSE;
+#else
+ psDVFSDevice->bEnabled = IMG_TRUE;
+#endif
+
+ return PVRSRV_OK;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File pvr_dvfs.c
+@Title System level interface for DVFS
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This file defined the API between services and system layer
+ required for Ion integration.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_DVFS_DEVICE_H_
+#define _PVR_DVFS_DEVICE_H_
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+
+PVRSRV_ERROR InitDVFS(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+void DeinitDVFS(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR SuspendDVFS(void);
+
+PVRSRV_ERROR ResumeDVFS(void);
+
+#endif /* _PVR_DVFS_DEVICE_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File pvr_fd_sync_kernel.h
+@Title Kernel/userspace interface definitions to use the kernel sync
+ driver
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* vi: set ts=8: */
+
+
+#ifndef _PVR_FD_SYNC_KERNEL_H_
+#define _PVR_FD_SYNC_KERNEL_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define PVR_SYNC_MAX_QUERY_FENCE_POINTS 14
+
+#define PVR_SYNC_IOC_MAGIC 'W'
+
+#define PVR_SYNC_IOC_RENAME \
+ _IOW(PVR_SYNC_IOC_MAGIC, 4, struct pvr_sync_rename_ioctl_data)
+
+#define PVR_SYNC_IOC_FORCE_SW_ONLY \
+ _IO(PVR_SYNC_IOC_MAGIC, 5)
+
+#define PVRSYNC_MODNAME "pvr_sync"
+
+struct pvr_sync_pt_info {
+ /* Output */
+ __u32 id;
+ __u32 ui32FWAddr;
+ __u32 ui32CurrOp;
+ __u32 ui32NextOp;
+ __u32 ui32TlTaken;
+} __attribute__((packed, aligned(8)));
+
+struct pvr_sync_rename_ioctl_data
+{
+ /* Input */
+ char szName[32];
+} __attribute__((packed, aligned(8)));
+
+#endif /* _PVR_FD_SYNC_KERNEL_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File pvr_gputrace.c
+@Title PVR GPU Trace module Linux implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_error.h"
+#include "srvkm.h"
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "pvr_uaccess.h"
+
+#include "pvr_gputrace.h"
+#include "rgxhwperf.h"
+
+#include "trace_events.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpu.h>
+#include "rogue_trace_events.h"
+
+
+
+/******************************************************************************
+ Module internal implementation
+******************************************************************************/
+
+/* DebugFS entry for the feature's on/off file */
+static PVR_DEBUGFS_ENTRY_DATA *gpsPVRDebugFSGpuTracingOnEntry = NULL;
+
+/* This variable is set when the gpu tracing is enabled but the HWPerf
+ * resources have not been initialised yet. This may most likely happen
+ * if the driver was built with SUPPORT_KERNEL_SRVINIT=1.
+ * If this variable is IMG_TRUE it means that the gpu tracing was enabled
+ * but the full initialisation is yet to be done. */
+static IMG_BOOL gbFTraceGPUEventsPreEnabled = IMG_FALSE;
+/* When this variable is IMG_TRUE it means that the gpu tracing has been fully
+ * initialised and enabled. */
+static IMG_BOOL gbFTraceGPUEventsEnabled = IMG_FALSE;
+
+/*
+ If SUPPORT_GPUTRACE_EVENTS is defined the drive is built with support
+ to route RGX HWPerf packets to the Linux FTrace mechanism. To allow
+ this routing feature to be switched on and off at run-time the following
+ debugfs entry is created:
+ /sys/kernel/debug/pvr/gpu_tracing_on
+ To enable GPU events in the FTrace log type the following on the target:
+ echo Y > /sys/kernel/debug/pvr/gpu_tracing_on
+ To disable, type:
+ echo N > /sys/kernel/debug/pvr/gpu_tracing_on
+
+ It is also possible to enable this feature at driver load by setting the
+ default application hint "EnableFTraceGPU=1" in /etc/powervr.ini.
+*/
+
+static void *GpuTracingSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+ if (*puiPosition == 0)
+ {
+ /* We want only one entry in the sequence, one call to show() */
+ return (void*)1;
+ }
+
+ return NULL;
+}
+
+
+static void GpuTracingSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+}
+
+
+static void *GpuTracingSeqNext(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ return NULL;
+}
+
+
+static int GpuTracingSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ const IMG_CHAR *pszInit = "N\n";
+
+ if (gbFTraceGPUEventsEnabled)
+ // fully operational
+ pszInit = "Y\n";
+ else if (gbFTraceGPUEventsPreEnabled)
+ // partially initialised (probably HWPerf not initialised yet)
+ pszInit = "P\n";
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ seq_puts(psSeqFile, pszInit);
+ return 0;
+}
+
+
+static struct seq_operations gsGpuTracingReadOps =
+{
+ .start = GpuTracingSeqStart,
+ .stop = GpuTracingSeqStop,
+ .next = GpuTracingSeqNext,
+ .show = GpuTracingSeqShow,
+};
+
+
+static IMG_INT GpuTracingSet(const IMG_CHAR *buffer, size_t count, loff_t uiPosition, void *data)
+{
+ IMG_CHAR cFirstChar;
+
+ PVR_UNREFERENCED_PARAMETER(uiPosition);
+ PVR_UNREFERENCED_PARAMETER(data);
+
+ if (!count)
+ {
+ return -EINVAL;
+ }
+
+ if (pvr_copy_from_user(&cFirstChar, buffer, 1))
+ {
+ return -EFAULT;
+ }
+
+ switch (cFirstChar)
+ {
+ case '0':
+ case 'n':
+ case 'N':
+ {
+ PVRGpuTraceEnabledSet(IMG_FALSE);
+ PVR_TRACE(("DISABLED GPU FTrace"));
+ break;
+ }
+ case '1':
+ case 'y':
+ case 'Y':
+ {
+ if (PVRGpuTraceEnabledSet(IMG_TRUE) == PVRSRV_OK)
+ {
+ PVR_TRACE(("ENABLED GPU FTrace"));
+ }
+ else
+ {
+ PVR_TRACE(("FAILED to enable GPU FTrace"));
+ }
+ break;
+ }
+ }
+
+ return count;
+}
+
+
+/******************************************************************************
+ Module In-bound API
+******************************************************************************/
+
+
+void PVRGpuTraceClientWork(
+ const IMG_UINT32 ui32CtxId,
+ const IMG_UINT32 ui32JobId,
+ const IMG_CHAR* pszKickType)
+{
+ PVR_ASSERT(pszKickType);
+
+ PVR_DPF((PVR_DBG_VERBOSE, "PVRGpuTraceClientKick(%s): contextId %u, "
+ "jobId %u", pszKickType, ui32CtxId, ui32JobId));
+
+ if (PVRGpuTraceEnabled())
+ {
+ trace_gpu_job_enqueue(ui32CtxId, ui32JobId, pszKickType);
+ }
+}
+
+
+void PVRGpuTraceWorkSwitch(
+ IMG_UINT64 ui64HWTimestampInOSTime,
+ const IMG_UINT32 ui32CtxId,
+ const IMG_UINT32 ui32CtxPriority,
+ const IMG_UINT32 ui32JobId,
+ const IMG_CHAR* pszWorkType,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+ PVR_ASSERT(pszWorkType);
+
+ /* Invert the priority cause this is what systrace expects. Lower values
+ * convey a higher priority to systrace. */
+ trace_gpu_sched_switch(pszWorkType, ui64HWTimestampInOSTime,
+ eSwType == PVR_GPUTRACE_SWITCH_TYPE_END ? 0 : ui32CtxId,
+ 2-ui32CtxPriority, ui32JobId);
+}
+
+void PVRGpuTraceUfo(
+ IMG_UINT64 ui64OSTimestamp,
+ const RGX_HWPERF_UFO_EV eEvType,
+ const IMG_UINT32 ui32ExtJobRef,
+ const IMG_UINT32 ui32CtxId,
+ const IMG_UINT32 ui32JobId,
+ const IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+ switch (eEvType) {
+ case RGX_HWPERF_UFO_EV_UPDATE:
+ trace_rogue_ufo_updates(ui64OSTimestamp, ui32CtxId,
+ ui32JobId, ui32UFOCount, puData);
+ break;
+ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+ trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+ ui32JobId, IMG_FALSE, ui32UFOCount, puData);
+ break;
+ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+ trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+ ui32JobId, IMG_TRUE, ui32UFOCount, puData);
+ break;
+ case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+ trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+ ui32JobId, IMG_FALSE, ui32UFOCount, puData);
+ break;
+ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+ trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+ ui32JobId, IMG_TRUE, ui32UFOCount, puData);
+ break;
+ default:
+ break;
+ }
+}
+
+void PVRGpuTraceFirmware(
+ IMG_UINT64 ui64HWTimestampInOSTime,
+ const IMG_CHAR* pszWorkType,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+ trace_rogue_firmware_activity(ui64HWTimestampInOSTime, pszWorkType, eSwType);
+}
+
+void PVRGpuTraceEventsLost(
+ const RGX_HWPERF_STREAM_ID eStreamId,
+ const IMG_UINT32 ui32LastOrdinal,
+ const IMG_UINT32 ui32CurrOrdinal)
+{
+ trace_rogue_events_lost(eStreamId, ui32LastOrdinal, ui32CurrOrdinal);
+}
+
+PVRSRV_ERROR PVRGpuTraceInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+ eError = RGXHWPerfFTraceGPUInit(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ return eError;
+
+ eError = PVRDebugFSCreateEntry("gpu_tracing_on", NULL, &gsGpuTracingReadOps,
+ (PVRSRV_ENTRY_WRITE_FUNC *)GpuTracingSet,
+ NULL, NULL, NULL,
+ &gpsPVRDebugFSGpuTracingOnEntry);
+ if (eError != PVRSRV_OK)
+ {
+ RGXHWPerfFTraceGPUDeInit(psDeviceNode);
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+void PVRGpuTraceDeInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /* gbFTraceGPUEventsEnabled and gbFTraceGPUEventsPreEnabled are cleared
+ * in this function. */
+ PVRGpuTraceEnabledSet(IMG_FALSE);
+
+ RGXHWPerfFTraceGPUDeInit(psDeviceNode);
+
+ /* Can be NULL if driver startup failed */
+ if (gpsPVRDebugFSGpuTracingOnEntry)
+ {
+ PVRDebugFSRemoveEntry(&gpsPVRDebugFSGpuTracingOnEntry);
+ }
+}
+
+IMG_BOOL PVRGpuTraceEnabled(void)
+{
+ return gbFTraceGPUEventsEnabled;
+}
+
+void PVRGpuTraceSetEnabled(IMG_BOOL bEnabled)
+{
+ gbFTraceGPUEventsEnabled = bEnabled;
+}
+
+IMG_BOOL PVRGpuTracePreEnabled(void)
+{
+ return gbFTraceGPUEventsPreEnabled;
+}
+
+void PVRGpuTraceSetPreEnabled(IMG_BOOL bEnabled)
+{
+ gbFTraceGPUEventsPreEnabled = bEnabled;
+}
+
+/******************************************************************************
+ End of file (pvr_gputrace.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File pvr_gputrace.h
+@Title PVR GPU Trace module common environment interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_GPUTRACE_H_
+#define PVR_GPUTRACE_H_
+
+#include "img_types.h"
+#include "rgx_hwperf_km.h"
+
+
+/******************************************************************************
+ Module out-bound API
+******************************************************************************/
+
+/*
+ The device layer of the KM driver defines these two APIs to allow a
+ platform module to set and retrieve the feature's on/off state.
+*/
+extern PVRSRV_ERROR PVRGpuTraceEnabledSet(IMG_BOOL bNewValue);
+
+/******************************************************************************
+ Module In-bound API
+******************************************************************************/
+
+typedef enum {
+ PVR_GPUTRACE_SWITCH_TYPE_UNDEF = 0,
+
+ PVR_GPUTRACE_SWITCH_TYPE_BEGIN = 1,
+ PVR_GPUTRACE_SWITCH_TYPE_END = 2
+
+} PVR_GPUTRACE_SWITCH_TYPE;
+
+void PVRGpuTraceClientWork(
+ const IMG_UINT32 ui32ExtJobRef,
+ const IMG_UINT32 ui32IntJobRef,
+ const IMG_CHAR* pszKickType);
+
+
+void PVRGpuTraceWorkSwitch(
+ IMG_UINT64 ui64OSTimestamp,
+ const IMG_UINT32 ui32ContextId,
+ const IMG_UINT32 ui32CtxPriority,
+ const IMG_UINT32 ui32JobId,
+ const IMG_CHAR* pszWorkType,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType);
+
+void PVRGpuTraceUfo(
+ IMG_UINT64 ui64OSTimestamp,
+ const RGX_HWPERF_UFO_EV eEvType,
+ const IMG_UINT32 ui32ExtJobRef,
+ const IMG_UINT32 ui32CtxId,
+ const IMG_UINT32 ui32JobId,
+ const IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void PVRGpuTraceFirmware(
+ IMG_UINT64 ui64HWTimestampInOSTime,
+ const IMG_CHAR* pszWorkType,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType);
+
+void PVRGpuTraceEventsLost(
+ const RGX_HWPERF_STREAM_ID eStreamId,
+ const IMG_UINT32 ui32LastOrdinal,
+ const IMG_UINT32 ui32CurrOrdinal);
+
+/* Early initialisation of GPU Ftrace events logic.
+ * This function creates debugfs entry and initialises some necessary
+ * structures. */
+PVRSRV_ERROR PVRGpuTraceInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+void PVRGpuTraceDeInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_BOOL PVRGpuTraceEnabled(void);
+void PVRGpuTraceSetEnabled(IMG_BOOL bEnabled);
+IMG_BOOL PVRGpuTracePreEnabled(void);
+void PVRGpuTraceSetPreEnabled(IMG_BOOL bEnabled);
+
+/* FTrace events callbacks */
+
+void PVRGpuTraceEnableUfoCallback(void);
+void PVRGpuTraceDisableUfoCallback(void);
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void);
+void PVRGpuTraceDisableFirmwareActivityCallback(void);
+
+#endif /* PVR_GPUTRACE_H_ */
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the HWPerf OS specific initialisations.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __PVR_HWPERF_H__
+#define __PVR_HWPERF_H__
+
+#include "rgxdevice.h"
+#include "pvrsrv_error.h"
+
+#if defined(LINUX)
+PVRSRV_ERROR PVRSRVHWperfCreateDebugFs(void);
+void PVRSRVHWperfDestroyDebugFs(void);
+#endif
+
+#endif /* __PVR_HWPERF_H__*/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Intrinsics definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_INTRINSICS_H_
+#define _PVR_INTRINSICS_H_
+
+/* PVR_CTZLL:
+ * Count the number of trailing zeroes in a long long integer
+ */
+
+#if defined(__GNUC__)
+#if defined(__x86_64__)
+
+ #define PVR_CTZLL __builtin_ctzll
+#endif
+#endif
+
+/* PVR_CLZLL:
+ * Count the number of leading zeroes in a long long integer
+ */
+
+#if defined(__GNUC__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \
+ defined(__arm__) || defined(__mips)
+
+#define PVR_CLZLL __builtin_clzll
+
+#endif
+#endif
+
+#endif /* _PVR_INTRINSICS_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title PowerVR notifier interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "device.h"
+#include "dllist.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrversion.h"
+
+
+/*************************************************************************/ /*!
+Command Complete Notifier Interface
+*/ /**************************************************************************/
+
+typedef struct PVRSRV_CMDCOMP_NOTIFY_TAG
+{
+ PVRSRV_CMDCOMP_HANDLE hCmdCompHandle;
+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify;
+ DLLIST_NODE sListNode;
+} PVRSRV_CMDCOMP_NOTIFY;
+
+/* Head of the list of callbacks called when command complete happens */
+static DLLIST_NODE g_sCmdCompNotifyHead;
+static POSWR_LOCK g_hCmdCompNotifyLock;
+
+PVRSRV_ERROR
+PVRSRVCmdCompleteInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ eError = OSWRLockCreate(&g_hCmdCompNotifyLock);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ dllist_init(&g_sCmdCompNotifyHead);
+
+ return PVRSRV_OK;
+}
+
+void
+PVRSRVCmdCompleteDeinit(void)
+{
+ /* Check that all notify function have been unregistered */
+ if (!dllist_is_empty(&g_sCmdCompNotifyHead))
+ {
+ PDLLIST_NODE psNode;
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Command complete notify list is not empty!", __func__));
+
+ /* Clean up any stragglers */
+ psNode = dllist_get_next_node(&g_sCmdCompNotifyHead);
+ while (psNode)
+ {
+ PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+ dllist_remove_node(psNode);
+
+ psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+ OSFreeMem(psNotify);
+
+ psNode = dllist_get_next_node(&g_sCmdCompNotifyHead);
+ }
+ }
+
+ if (g_hCmdCompNotifyLock)
+ {
+ OSWRLockDestroy(g_hCmdCompNotifyLock);
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify,
+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify,
+ PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+ PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+ if (!phNotify || !pfnCmdCompleteNotify || !hCmdCompHandle)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p, %p, %p)",
+ __func__, phNotify, pfnCmdCompleteNotify, hCmdCompHandle));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psNotify = OSAllocMem(sizeof(*psNotify));
+ if (!psNotify)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not enough memory to allocate CmdCompleteNotify function",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Set-up the notify data */
+ psNotify->hCmdCompHandle = hCmdCompHandle;
+ psNotify->pfnCmdCompleteNotify = pfnCmdCompleteNotify;
+
+ /* Add it to the list of Notify functions */
+ OSWRLockAcquireWrite(g_hCmdCompNotifyLock);
+ dllist_add_to_tail(&g_sCmdCompNotifyHead, &psNotify->sListNode);
+ OSWRLockReleaseWrite(g_hCmdCompNotifyLock);
+
+ *phNotify = psNotify;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify)
+{
+ PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+ psNotify = (PVRSRV_CMDCOMP_NOTIFY *) hNotify;
+ if (!psNotify)
+ {
+ PVR_DPF((PVR_DBG_ERROR," %s: Bad arguments (%p)", __func__, hNotify));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ OSWRLockAcquireWrite(g_hCmdCompNotifyLock);
+ dllist_remove_node(&psNotify->sListNode);
+ OSWRLockReleaseWrite(g_hCmdCompNotifyLock);
+
+ OSFreeMem(psNotify);
+
+ return PVRSRV_OK;
+}
+
+void
+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+#if !defined(NO_HARDWARE)
+ DLLIST_NODE *psNode, *psNext;
+#endif
+
+ /* Call notify callbacks to check if blocked work items can now proceed */
+#if !defined(NO_HARDWARE)
+ OSWRLockAcquireRead(g_hCmdCompNotifyLock);
+ dllist_foreach_node(&g_sCmdCompNotifyHead, psNode, psNext)
+ {
+ PVRSRV_CMDCOMP_NOTIFY *psNotify =
+ IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+
+ if (hCmdCompCallerHandle != psNotify->hCmdCompHandle)
+ {
+ psNotify->pfnCmdCompleteNotify(psNotify->hCmdCompHandle);
+ }
+ }
+ OSWRLockReleaseRead(g_hCmdCompNotifyLock);
+#endif
+
+ if (psPVRSRVData->hGlobalEventObject)
+ {
+ OSEventObjectSignal(psPVRSRVData->hGlobalEventObject);
+ }
+}
+
+/*************************************************************************/ /*!
+Debug Notifier Interface
+*/ /**************************************************************************/
+
+typedef struct DEBUG_REQUEST_ENTRY_TAG
+{
+ IMG_UINT32 ui32RequesterID;
+ DLLIST_NODE sListHead;
+} DEBUG_REQUEST_ENTRY;
+
+typedef struct DEBUG_REQUEST_TABLE_TAG
+{
+ POSWR_LOCK hLock;
+ IMG_UINT32 ui32RequestCount;
+ DEBUG_REQUEST_ENTRY asEntry[1];
+} DEBUG_REQUEST_TABLE;
+
+typedef struct DEBUG_REQUEST_NOTIFY_TAG
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle;
+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify;
+ IMG_UINT32 ui32RequesterID;
+ DLLIST_NODE sListNode;
+} DEBUG_REQUEST_NOTIFY;
+
+
+PVRSRV_ERROR
+PVRSRVRegisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 *paui32Table,
+ IMG_UINT32 ui32Length)
+{
+ DEBUG_REQUEST_TABLE *psDebugTable;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+
+ if (psDevNode->hDebugTable)
+ {
+ return PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED;
+ }
+
+ psDebugTable = OSAllocMem(sizeof(DEBUG_REQUEST_TABLE) +
+ (sizeof(DEBUG_REQUEST_ENTRY) * (ui32Length-1)));
+ if (!psDebugTable)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ eError = OSWRLockCreate(&psDebugTable->hLock);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorFreeDebugTable;
+ }
+
+ psDebugTable->ui32RequestCount = ui32Length;
+
+ /* Init the list heads */
+ for (i = 0; i < ui32Length; i++)
+ {
+ psDebugTable->asEntry[i].ui32RequesterID = paui32Table[i];
+ dllist_init(&psDebugTable->asEntry[i].sListHead);
+ }
+
+ psDevNode->hDebugTable = (IMG_HANDLE *) psDebugTable;
+
+ return PVRSRV_OK;
+
+ErrorFreeDebugTable:
+ OSFreeMem(psDebugTable);
+ psDebugTable = NULL;
+
+ return eError;
+}
+
+void
+PVRSRVUnregisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ DEBUG_REQUEST_TABLE *psDebugTable;
+ IMG_UINT32 i;
+
+ PVR_ASSERT(psDevNode->hDebugTable);
+ psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+ psDevNode->hDebugTable = NULL;
+
+ for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+ {
+ if (!dllist_is_empty(&psDebugTable->asEntry[i].sListHead))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Found registered callback(s) on %d",
+ __func__, i));
+ }
+ }
+
+ OSWRLockDestroy(psDebugTable->hLock);
+ psDebugTable->hLock = NULL;
+
+ OSFreeMem(psDebugTable);
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+ IMG_UINT32 ui32RequesterID,
+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle)
+{
+ DEBUG_REQUEST_TABLE *psDebugTable;
+ DEBUG_REQUEST_NOTIFY *psNotify;
+ PDLLIST_NODE psHead = NULL;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+
+ if (!phNotify || !psDevNode || !pfnDbgRequestNotify)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p, %p, %p)",
+ __func__, phNotify, psDevNode, pfnDbgRequestNotify));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+
+ PVR_ASSERT(psDebugTable);
+
+ psNotify = OSAllocMem(sizeof(*psNotify));
+ if (!psNotify)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not enough memory to allocate DbgRequestNotify structure",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Set-up the notify data */
+ psNotify->psDevNode = psDevNode;
+ psNotify->hDbgRequestHandle = hDbgRequestHandle;
+ psNotify->pfnDbgRequestNotify = pfnDbgRequestNotify;
+ psNotify->ui32RequesterID = ui32RequesterID;
+
+ /* Lock down all the lists */
+ OSWRLockAcquireWrite(psDebugTable->hLock);
+
+ /* Find which list to add it to */
+ for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+ {
+ if (psDebugTable->asEntry[i].ui32RequesterID == ui32RequesterID)
+ {
+ psHead = &psDebugTable->asEntry[i].sListHead;
+ }
+ }
+
+ if (!psHead)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to find debug requester", __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ErrorReleaseLock;
+ }
+
+ /* Add it to the list of Notify functions */
+ dllist_add_to_tail(psHead, &psNotify->sListNode);
+
+ /* Unlock the lists */
+ OSWRLockReleaseWrite(psDebugTable->hLock);
+
+ *phNotify = psNotify;
+
+ return PVRSRV_OK;
+
+ErrorReleaseLock:
+ OSWRLockReleaseWrite(psDebugTable->hLock);
+ OSFreeMem(psNotify);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify)
+{
+ DEBUG_REQUEST_NOTIFY *psNotify = (DEBUG_REQUEST_NOTIFY *) hNotify;
+ DEBUG_REQUEST_TABLE *psDebugTable;
+
+ if (!psNotify)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p)", __func__, hNotify));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDebugTable = (DEBUG_REQUEST_TABLE *) psNotify->psDevNode->hDebugTable;
+
+ OSWRLockAcquireWrite(psDebugTable->hLock);
+ dllist_remove_node(&psNotify->sListNode);
+ OSWRLockReleaseWrite(psDebugTable->hLock);
+
+ OSFreeMem(psNotify);
+
+ return PVRSRV_OK;
+}
+
+void
+PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ DEBUG_REQUEST_TABLE *psDebugTable =
+ (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+ static const IMG_CHAR *apszVerbosityTable[] = { "Low", "Medium", "High" };
+ const IMG_CHAR *szVerbosityLevel;
+ IMG_UINT32 i;
+ IMG_UINT32 j;
+
+ static_assert(IMG_ARR_NUM_ELEMS(apszVerbosityTable) == DEBUG_REQUEST_VERBOSITY_MAX+1,
+ "Incorrect number of verbosity levels");
+
+ PVR_ASSERT(psDebugTable);
+
+ if (!pfnDumpDebugPrintf)
+ {
+ /*
+ * Only dump the call stack to the kernel log if the debug text is going
+ * there.
+ */
+ OSDumpStack();
+ }
+
+ OSWRLockAcquireRead(psDebugTable->hLock);
+
+ if (ui32VerbLevel < IMG_ARR_NUM_ELEMS(apszVerbosityTable))
+ {
+ szVerbosityLevel = apszVerbosityTable[ui32VerbLevel];
+ }
+ else
+ {
+ szVerbosityLevel = "unknown";
+ PVR_ASSERT(!"Invalid verbosity level received");
+ }
+
+ PVR_DUMPDEBUG_LOG("------------[ PVR DBG: START (%s) ]------------",
+ szVerbosityLevel);
+
+ PVR_DUMPDEBUG_LOG("DDK info: %s (%s) %s",
+ PVRVERSION_STRING, PVR_BUILD_TYPE, PVR_BUILD_DIR);
+ PVR_DUMPDEBUG_LOG("Time now: %015llu", OSClockus64());
+
+ switch (psPVRSRVData->eServicesState)
+ {
+ case PVRSRV_SERVICES_STATE_OK:
+ PVR_DUMPDEBUG_LOG("Services State: OK");
+ break;
+ case PVRSRV_SERVICES_STATE_BAD:
+ PVR_DUMPDEBUG_LOG("Services State: BAD");
+ break;
+ default:
+ PVR_DUMPDEBUG_LOG("Services State: UNKNOWN (%d)",
+ psPVRSRVData->eServicesState);
+ break;
+ }
+
+ /* For each verbosity level */
+ for (j = 0; j <= ui32VerbLevel; j++)
+ {
+ /* For each requester */
+ for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+ {
+ DLLIST_NODE *psNode;
+ DLLIST_NODE *psNext;
+
+ dllist_foreach_node(&psDebugTable->asEntry[i].sListHead, psNode, psNext)
+ {
+ DEBUG_REQUEST_NOTIFY *psNotify =
+ IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode);
+ psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, j,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ }
+ }
+
+ PVR_DUMPDEBUG_LOG("------------[ PVR DBG: END ]------------");
+ OSWRLockReleaseRead(psDebugTable->hLock);
+}
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title PowerVR notifier interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__PVR_NOTIFIER_H__)
+#define __PVR_NOTIFIER_H__
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+
+/**************************************************************************/ /*!
+Command Complete Notifier Interface
+*/ /***************************************************************************/
+
+typedef IMG_HANDLE PVRSRV_CMDCOMP_HANDLE;
+typedef void (*PFN_CMDCOMP_NOTIFY)(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+
+/**************************************************************************/ /*!
+@Function PVRSRVCmdCompleteInit
+@Description Performs initialisation of the command complete notifier
+ interface.
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVCmdCompleteInit(void);
+
+/**************************************************************************/ /*!
+@Function PVRSRVCmdCompleteDeinit
+@Description Performs cleanup for the command complete notifier interface.
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+void
+PVRSRVCmdCompleteDeinit(void);
+
+/**************************************************************************/ /*!
+@Function PVRSRVRegisterCmdCompleteNotify
+@Description Register a callback function that is called when some device
+ finishes some work, which is signalled via a call to
+ PVRSRVCheckStatus.
+@Output phNotify On success, points to command complete
+ notifier handle
+@Input pfnCmdCompleteNotify Function callback
+@Input hPrivData Data to be passed back to the caller via
+ the callback function
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify,
+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify,
+ PVRSRV_CMDCOMP_HANDLE hPrivData);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnregisterCmdCompleteNotify
+@Description Unregister a previously registered callback function.
+@Input hNotify Command complete notifier handle
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify);
+
+/**************************************************************************/ /*!
+@Function PVRSRVCheckStatus
+@Description Notify any registered command complete handlers that some work
+ has been finished (unless hCmdCompCallerHandle matches a
+ handler's hPrivData). Also signal the global event object.
+@Input hCmdCompCallerHandle Used to prevent a handler from being
+ notified. A NULL value results in all
+ handlers being notified.
+*/ /***************************************************************************/
+void
+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle);
+
+
+/**************************************************************************/ /*!
+Debug Notifier Interface
+*/ /***************************************************************************/
+
+#define DEBUG_REQUEST_DC 0
+#define DEBUG_REQUEST_SERVERSYNC 1
+#define DEBUG_REQUEST_SYS 2
+#define DEBUG_REQUEST_ANDROIDSYNC 3
+#define DEBUG_REQUEST_LINUXFENCE 4
+#define DEBUG_REQUEST_SYNCCHECKPOINT 5
+#define DEBUG_REQUEST_HTB 6
+#define DEBUG_REQUEST_APPHINT 7
+
+#define DEBUG_REQUEST_VERBOSITY_LOW 0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1
+#define DEBUG_REQUEST_VERBOSITY_HIGH 2
+#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH
+
+/*
+ * Macro used within debug dump functions to send output either to PVR_LOG or
+ * a custom function. The custom function should be stored as a function pointer
+ * in a local variable called 'pfnDumpDebugPrintf'. 'pvDumpDebugFile' is also
+ * required as a local variable to serve as a file identifier for the printf
+ * function if required.
+ */
+#define PVR_DUMPDEBUG_LOG(...) \
+ do \
+ { \
+ if (pfnDumpDebugPrintf) \
+ pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \
+ else \
+ PVR_LOG((__VA_ARGS__)); \
+ } while(0)
+
+struct _PVRSRV_DEVICE_NODE_;
+
+typedef IMG_HANDLE PVRSRV_DBGREQ_HANDLE;
+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile,
+ const IMG_CHAR *pszFormat, ...);
+typedef void (*PFN_DBGREQ_NOTIFY)(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVRegisterDbgTable
+@Description Registers a debug requester table for the given device. The
+ order in which the debug requester IDs appear in the given
+ table determine the order in which a set of notifier callbacks
+ will be called. In other words, the requester ID that appears
+ first will have all of its associated debug notifier callbacks
+ called first. This will then be followed by all the callbacks
+ associated with the next requester ID in the table and so on.
+@Input psDevNode Device node with which to register requester table
+@Input paui32Table Array of requester IDs
+@Input ui32Length Number of elements in paui32Table
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_UINT32 *paui32Table, IMG_UINT32 ui32Length);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnregisterDbgTable
+@Description Unregisters a debug requester table.
+@Input psDevNode Device node for which the requester table should
+ be unregistered
+@Return void
+*/ /***************************************************************************/
+void
+PVRSRVUnregisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+/**************************************************************************/ /*!
+@Function PVRSRVRegisterDbgRequestNotify
+@Description Register a callback function that is called when a debug request
+ is made via a call PVRSRVDebugRequest. There are a number of
+ verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to
+ DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once
+ for each level up to the highest level specified to
+ PVRSRVDebugRequest.
+@Output phNotify On success, points to debug notifier handle
+@Input psDevNode Device node for which the debug callback
+ should be registered
+@Input pfnDbgRequestNotify Function callback
+@Input ui32RequesterID Requester ID. This is used to determine
+ the order in which callbacks are called
+@Input hDbgReqeustHandle Data to be passed back to the caller via
+ the callback function
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify,
+ struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+ IMG_UINT32 ui32RequesterID,
+ PVRSRV_DBGREQ_HANDLE hDbgReqeustHandle);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnregisterDbgRequestNotify
+@Description Unregister a previously registered callback function.
+@Input hNotify Debug notifier handle.
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify);
+
+/**************************************************************************/ /*!
+@Function PVRSRVDebugRequest
+@Description Notify any registered debug request handlers that a debug
+ request has been made and at what level.
+@Input psDevNode Device node for which the debug request has
+ been made
+@Input ui32VerbLevel The maximum verbosity level to dump
+@Input pfnDumpDebugPrintf Used to specify the print function that
+ should be used to dump any debug
+ information. If this argument is NULL then
+ PVR_LOG() will be used as the default print
+ function.
+@Input pvDumpDebugFile Optional file identifier to be passed to
+ the print function if required.
+@Return void
+*/ /***************************************************************************/
+void
+PVRSRVDebugRequest(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+#endif /* !defined(__PVR_NOTIFIER_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services Transport Layer compatibility header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport layer common types and definitions included into
+ both user mode and kernel mode source.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*
+ This header is provided to maintain compatibility with source files
+ outside the DDK that include this header that has been moved/renamed to:
+ DDK/include/pvrsrv_tlcommon.h.
+ */
+
+#include "pvrsrv_tlcommon.h"
+
+/******************************************************************************
+ End of file (pvr_tlcommon.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Utility functions for user space access
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __PVR_UACCESS_H__
+#define __PVR_UACCESS_H__
+
+#include <asm/uaccess.h>
+
+static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes)
+{
+ if (access_ok(VERIFY_WRITE, pvTo, ulBytes))
+ {
+ return __copy_to_user(pvTo, pvFrom, ulBytes);
+ }
+
+ return ulBytes;
+}
+
+
+#if defined(__KLOCWORK__)
+ /* this part is only to tell Klocwork not to report false positive because
+ it doesn't understand that pvr_copy_from_user will initialise the memory
+ pointed to by pvTo */
+#include <linux/string.h> /* get the memset prototype */
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+ if (pvTo != NULL)
+ {
+ memset(pvTo, 0xAA, ulBytes);
+ return 0;
+ }
+ return 1;
+}
+
+#else /* real implementation */
+
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+ /*
+ * The compile time correctness checking introduced for copy_from_user in
+ * Linux 2.6.33 isn't fully compatible with our usage of the function.
+ */
+ if (access_ok(VERIFY_READ, pvFrom, ulBytes))
+ {
+ return __copy_from_user(pvTo, pvFrom, ulBytes);
+ }
+
+ return ulBytes;
+}
+#endif /* klocworks */
+
+#endif /* __PVR_UACCESS_H__ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@Title Module Author and License.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVRMODULE_H_
+#define _PVRMODULE_H_
+
+MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+MODULE_LICENSE("Dual MIT/GPL");
+
+#endif /* _PVRMODULE_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title core services functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main APIs for core services functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxdebug.h"
+#include "handle.h"
+#include "connection_server.h"
+#include "pdump_km.h"
+#include "ra.h"
+#include "allocmem.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "srvcore.h"
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+#include "pvrsrv_vz.h"
+#endif
+#include "services_km.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "sync.h"
+#include "sync_server.h"
+#include "devicemem.h"
+#include "cache_km.h"
+
+#include "log2.h"
+
+#include "lists.h"
+#include "dllist.h"
+#include "syscommon.h"
+#include "sysvalidation.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+
+#include "tlintern.h"
+#include "htbserver.h"
+
+#if defined (SUPPORT_RGX)
+#include "rgxinit.h"
+#include "rgxfwutils.h"
+#endif
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ #if !defined(GPUVIRT_SIZEOF_ARENA0)
+ #define GPUVIRT_SIZEOF_ARENA0 64 * 1024 * 1024 //Giving 64 megs of LMA memory to arena 0 for firmware and other allocations
+ #endif
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "devicemem_history_server.h"
+#endif
+
+#if defined(PVR_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+#include "rgx_options.h"
+#include "srvinit.h"
+#include "rgxutils.h"
+#endif
+
+#include "oskm_apphint.h"
+#include "pvrsrv_apphint.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+/*! Wait 100ms before retrying deferred clean-up again */
+#define CLEANUP_THREAD_WAIT_RETRY_TIMEOUT 100000ULL
+
+/*! Wait 8hrs when no deferred clean-up required. Allows a poll several times
+ * a day to check for any missed clean-up. */
+#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT 28800000000ULL
+
+
+#define PVRSRV_PROC_HANDLE_BASE_INIT 10
+
+static PVRSRV_DATA *gpsPVRSRVData = NULL;
+static IMG_UINT32 g_ui32InitFlags;
+
+/* mark which parts of Services were initialised */
+#define INIT_DATA_ENABLE_PDUMPINIT 0x1U
+
+static IMG_UINT32 g_aui32DebugOrderTable[] = {
+ DEBUG_REQUEST_SYS,
+ DEBUG_REQUEST_APPHINT,
+ DEBUG_REQUEST_HTB,
+ DEBUG_REQUEST_DC,
+ DEBUG_REQUEST_SYNCCHECKPOINT,
+ DEBUG_REQUEST_SERVERSYNC,
+ DEBUG_REQUEST_ANDROIDSYNC,
+ DEBUG_REQUEST_LINUXFENCE
+};
+
+/* Add work to the cleanup thread work list.
+ * The work item will be executed by the cleanup thread
+ */
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData)
+{
+ PVRSRV_DATA *psPVRSRVData;
+ PVRSRV_ERROR eError;
+
+ psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ PVR_ASSERT(psData != NULL);
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if(psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK || psPVRSRVData->bUnload)
+#else
+ if(psPVRSRVData->bUnload)
+#endif
+ {
+ CLEANUP_THREAD_FN pfnFree = psData->pfnFree;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Cleanup thread has already quit: doing work immediately"));
+
+ eError = pfnFree(psData->pvData);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+ "(callback " IMG_PFN_FMTSPEC "). "
+ "Immediate free will not be retried.",
+ pfnFree));
+ }
+ }
+ else
+ {
+ /* add this work item to the list */
+ OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+ dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, &psData->sNode);
+ OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+ /* signal the cleanup thread to ensure this item gets processed */
+ eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+}
+
+/* Pop an item from the head of the cleanup thread work list */
+static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData)
+{
+ DLLIST_NODE *psNode;
+
+ OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+ psNode = dllist_get_next_node(&psPVRSRVData->sCleanupThreadWorkList);
+ if(psNode != NULL)
+ {
+ dllist_remove_node(psNode);
+ }
+ OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+ return psNode;
+}
+
+/* Process the cleanup thread work list */
+static IMG_BOOL _CleanupThreadProcessWorkList(PVRSRV_DATA *psPVRSRVData,
+ IMG_BOOL *pbUseGlobalEO)
+{
+ DLLIST_NODE *psNodeIter, *psNodeLast;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bNeedRetry = IMG_FALSE;
+
+ /* any callback functions which return error will be
+ * moved to the back of the list, and additional items can be added
+ * to the list at any time so we ensure we only iterate from the
+ * head of the list to the current tail (since the tail may always
+ * be changing)
+ */
+
+ OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+ psNodeLast = psPVRSRVData->sCleanupThreadWorkList.psPrevNode;
+ OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+ do
+ {
+ PVRSRV_CLEANUP_THREAD_WORK *psData;
+
+ psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData);
+
+ if(psNodeIter != NULL)
+ {
+ CLEANUP_THREAD_FN pfnFree;
+
+ psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode);
+
+ /* get the function pointer address here so we have access to it
+ * in order to report the error in case of failure, without having
+ * to depend on psData not having been freed
+ */
+ pfnFree = psData->pfnFree;
+
+ *pbUseGlobalEO = psData->bDependsOnHW;
+ eError = pfnFree(psData->pvData);
+
+ if(eError != PVRSRV_OK)
+ {
+ /* move to back of the list, if this item's
+ * retry count hasn't hit zero.
+ */
+ if(psData->ui32RetryCount-- > 0)
+ {
+ OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+ dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, psNodeIter);
+ OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+ bNeedRetry = IMG_TRUE;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+ "(callback " IMG_PFN_FMTSPEC "). "
+ "Retry limit reached",
+ pfnFree));
+ }
+ }
+ }
+ } while((psNodeIter != NULL) && (psNodeIter != psNodeLast));
+
+ return bNeedRetry;
+}
+
+// #define CLEANUP_DPFL PVR_DBG_WARNING
+#define CLEANUP_DPFL PVR_DBG_MESSAGE
+
+/* Create/initialise data required by the cleanup thread,
+ * before the cleanup thread is started
+ */
+static PVRSRV_ERROR _CleanupThreadPrepare(PVRSRV_DATA *psPVRSRVData)
+{
+ PVRSRV_ERROR eError;
+
+ /* Create the clean up event object */
+
+ eError = OSEventObjectCreate("PVRSRV_CLEANUP_EVENTOBJECT", &gpsPVRSRVData->hCleanupEventObject);
+ PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", Exit);
+
+ /* initialise the mutex and linked list required for the cleanup thread work list */
+
+ eError = OSLockCreate(&psPVRSRVData->hCleanupThreadWorkListLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", Exit);
+
+ dllist_init(&psPVRSRVData->sCleanupThreadWorkList);
+
+Exit:
+ return eError;
+}
+
+static void CleanupThread(void *pvData)
+{
+ PVRSRV_DATA *psPVRSRVData = pvData;
+ IMG_BOOL bRetryWorkList = IMG_FALSE;
+ IMG_HANDLE hGlobalEvent;
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eRc;
+ IMG_BOOL bUseGlobalEO = IMG_FALSE;
+
+ /* Store the process id (pid) of the clean-up thread */
+ psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID();
+
+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread starting... "));
+
+ /* Open an event on the clean up event object so we can listen on it,
+ * abort the clean up thread and driver if this fails.
+ */
+ eRc = OSEventObjectOpen(psPVRSRVData->hCleanupEventObject, &hOSEvent);
+ PVR_ASSERT(eRc == PVRSRV_OK);
+
+ eRc = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hGlobalEvent);
+ PVR_ASSERT(eRc == PVRSRV_OK);
+
+ /* While the driver is in a good state and is not being unloaded
+ * try to free any deferred items when signalled
+ */
+ while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) &&
+ (!psPVRSRVData->bUnload))
+ {
+ IMG_HANDLE hEvent;
+
+ /* Wait until signalled for deferred clean up OR wait for a
+ * short period if the previous deferred clean up was not able
+ * to release all the resources before trying again.
+ * Bridge lock re-acquired on our behalf before the wait call returns.
+ */
+
+ if(bRetryWorkList && bUseGlobalEO)
+ {
+ hEvent = hGlobalEvent;
+ }
+ else
+ {
+ hEvent = hOSEvent;
+ }
+
+ eRc = OSEventObjectWaitTimeout(hEvent,
+ bRetryWorkList ?
+ CLEANUP_THREAD_WAIT_RETRY_TIMEOUT :
+ CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT);
+ if (eRc == PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait timeout"));
+ }
+ else if (eRc == PVRSRV_OK)
+ {
+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait OK, signal received"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CleanupThread: wait error %d", eRc));
+ }
+
+ bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData, &bUseGlobalEO);
+ }
+
+ OSLockDestroy(psPVRSRVData->hCleanupThreadWorkListLock);
+
+ eRc = OSEventObjectClose(hOSEvent);
+ PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose");
+
+ eRc = OSEventObjectClose(hGlobalEvent);
+ PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose");
+
+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread ending... "));
+}
+
+static IMG_BOOL DevicesWatchdogThread_Powered_Any(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_ON;
+ PVRSRV_ERROR eError;
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ /* Power lock cannot be acquired at this time (sys power is off) */
+ return IMG_FALSE;
+ }
+
+ /* Any other error is unexpected so we assume the device is on */
+ PVR_DPF((PVR_DBG_ERROR,
+ "DevicesWatchdogThread: Failed to acquire power lock for device %p (%s)",
+ psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ return IMG_TRUE;
+ }
+
+ (void) PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ return (ePowerState == PVRSRV_DEV_POWER_STATE_ON) ? IMG_TRUE : IMG_FALSE;
+}
+
+static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode,
+ va_list va)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ PVRSRV_DEVICE_HEALTH_STATUS *pePreviousHealthStatus, eHealthStatus;
+ PVRSRV_ERROR eError;
+
+ pePreviousHealthStatus = va_arg(va, PVRSRV_DEVICE_HEALTH_STATUS *);
+
+ if (psDeviceNode->pfnUpdateHealthStatus != NULL)
+ {
+ eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "DevicesWatchdogThread: "
+ "Could not check for fatal error (%d)!",
+ eError));
+ }
+ }
+ eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus);
+
+ if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_OK)
+ {
+ if (eHealthStatus != *pePreviousHealthStatus)
+ {
+ if (!(psDevInfo->ui32DeviceFlags &
+ RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: "
+ "Device not responding!!!"));
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+ NULL, NULL);
+ }
+ }
+ }
+
+ *pePreviousHealthStatus = eHealthStatus;
+}
+
+static void DevicesWatchdogThread(void *pvData)
+{
+ PVRSRV_DATA *psPVRSRVData = pvData;
+ PVRSRV_DEVICE_HEALTH_STATUS ePreviousHealthStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Timeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power off sleep time: %d.",
+ DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT));
+
+ /* Open an event on the devices watchdog event object so we can listen on it
+ and abort the devices watchdog thread. */
+ eError = OSEventObjectOpen(psPVRSRVData->hDevicesWatchdogEvObj, &hOSEvent);
+ PVR_LOGRN_IF_ERROR(eError, "OSEventObjectOpen");
+
+ /* Loop continuously checking the device status every few seconds. */
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) &&
+ !psPVRSRVData->bUnload)
+#else
+ while (!psPVRSRVData->bUnload)
+#endif
+ {
+ IMG_BOOL bPwrIsOn = IMG_FALSE;
+
+ /* Wait time between polls (done at the start of the loop to allow devices
+ to initialise) or for the event signal (shutdown or power on). */
+ eError = OSEventObjectWaitTimeout(hOSEvent, (IMG_UINT64)ui32Timeout * 1000);
+
+#ifdef PVR_TESTING_UTILS
+ psPVRSRVData->ui32DevicesWdWakeupCounter++;
+#endif
+ if (eError == PVRSRV_OK)
+ {
+ if (psPVRSRVData->bUnload)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event received."));
+ break;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state change event received."));
+ }
+ }
+ else if (eError != PVRSRV_ERROR_TIMEOUT)
+ {
+ /* If timeout do nothing otherwise print warning message. */
+ PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: "
+ "Error (%d) when waiting for event!", eError));
+ }
+
+ bPwrIsOn = List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList,
+ DevicesWatchdogThread_Powered_Any);
+ if (bPwrIsOn || psPVRSRVData->ui32DevicesWatchdogPwrTrans)
+ {
+ psPVRSRVData->ui32DevicesWatchdogPwrTrans = 0;
+ ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+ }
+ else
+ {
+ ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT;
+ }
+
+ List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+ DevicesWatchdogThread_ForEachVaCb,
+ &ePreviousHealthStatus);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+ SysPrintAndResetFaultStatusRegister();
+#endif
+ }
+
+ eError = OSEventObjectClose(hOSEvent);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+
+
+PVRSRV_DATA *PVRSRVGetPVRSRVData()
+{
+ return gpsPVRSRVData;
+}
+
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDriverInit(void)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DATA *psPVRSRVData = NULL;
+
+ IMG_UINT32 ui32AppHintCleanupThreadPriority;
+ IMG_UINT32 ui32AppHintCleanupThreadWeight;
+ IMG_UINT32 ui32AppHintWatchdogThreadPriority;
+ IMG_UINT32 ui32AppHintWatchdogThreadWeight;
+
+ void *pvAppHintState = NULL;
+ IMG_UINT32 ui32AppHintDefault;
+
+ /*
+ * As this function performs one time driver initialisation, use the
+ * Services global device-independent data to determine whether or not
+ * this function has already been called.
+ */
+ if (gpsPVRSRVData)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Driver already initialised", __func__));
+ return PVRSRV_ERROR_ALREADY_EXISTS;
+ }
+
+ eError = PhysHeapInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ /*
+ * Allocate the device-independent data
+ */
+ psPVRSRVData = OSAllocZMem(sizeof(*gpsPVRSRVData));
+ if (psPVRSRVData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto Error;
+ }
+
+ /* Now it is set up, point gpsPVRSRVData to the actual data */
+ gpsPVRSRVData = psPVRSRVData;
+
+ /* Init any OS specific's */
+ eError = OSInitEnvData();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+#if defined(PVR_RI_DEBUG)
+ RIInitKM();
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ eError = DevicememHistoryInitKM();
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to initialise DevicememHistoryInitKM", __func__));
+ goto Error;
+ }
+#endif
+
+ eError = BridgeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise bridge",
+ __func__));
+ goto Error;
+ }
+
+ eError = PMRInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ eError = CacheOpInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+ eError = DCInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+#endif
+
+ /* Initialise overall system state */
+ gpsPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_OK;
+
+ /* Create an event object */
+ eError = OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", &gpsPVRSRVData->hGlobalEventObject);
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+ gpsPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+
+ eError = PVRSRVCmdCompleteInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ /* Initialise pdump */
+ eError = PDUMPINIT();
+ if(eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT;
+
+ eError = PVRSRVHandleInit();
+ if(eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ eError = _CleanupThreadPrepare(gpsPVRSRVData);
+ PVR_LOGG_IF_ERROR(eError, "_CleanupThreadPrepare", Error);
+
+ /* Create a thread which is used to do the deferred cleanup */
+ eError = OSThreadCreatePriority(&gpsPVRSRVData->hCleanupThread,
+ "pvr_defer_free",
+ CleanupThread,
+ gpsPVRSRVData,
+ OS_THREAD_LOWEST_PRIORITY);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create deferred cleanup thread",
+ __func__));
+ goto Error;
+ }
+
+ OSCreateKMAppHintState(&pvAppHintState);
+ ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADPRIORITY;
+ OSGetKMAppHintUINT32(pvAppHintState, CleanupThreadPriority,
+ &ui32AppHintDefault, &ui32AppHintCleanupThreadPriority);
+ ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADWEIGHT;
+ OSGetKMAppHintUINT32(pvAppHintState, CleanupThreadWeight,
+ &ui32AppHintDefault, &ui32AppHintCleanupThreadWeight);
+ ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY;
+ OSGetKMAppHintUINT32(pvAppHintState, WatchdogThreadPriority,
+ &ui32AppHintDefault, &ui32AppHintWatchdogThreadPriority);
+ ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADWEIGHT;
+ OSGetKMAppHintUINT32(pvAppHintState, WatchdogThreadWeight,
+ &ui32AppHintDefault, &ui32AppHintWatchdogThreadWeight);
+ OSFreeKMAppHintState(pvAppHintState);
+ pvAppHintState = NULL;
+
+ eError = OSSetThreadPriority(gpsPVRSRVData->hCleanupThread,
+ ui32AppHintCleanupThreadPriority,
+ ui32AppHintCleanupThreadWeight);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set thread priority of deferred cleanup thread.",
+ __func__));
+ goto Error;
+ }
+
+ /* Create the devices watchdog event object */
+ eError = OSEventObjectCreate("PVRSRV_DEVICESWATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hDevicesWatchdogEvObj);
+ PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", Error);
+
+ /* Create a thread which is used to detect fatal errors */
+ eError = OSThreadCreate(&gpsPVRSRVData->hDevicesWatchdogThread,
+ "pvr_device_wdg",
+ DevicesWatchdogThread,
+ gpsPVRSRVData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create devices watchdog thread",
+ __func__));
+ goto Error;
+ }
+
+ eError = OSSetThreadPriority(gpsPVRSRVData->hDevicesWatchdogThread,
+ ui32AppHintWatchdogThreadPriority,
+ ui32AppHintWatchdogThreadWeight);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set thread priority of the watchdog thread.",
+ __func__));
+ goto Error;
+ }
+
+ gpsPVRSRVData->psProcessHandleBase_Table = HASH_Create(PVRSRV_PROC_HANDLE_BASE_INIT);
+
+ if (gpsPVRSRVData->psProcessHandleBase_Table == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create hash table for process handle base.",
+ __func__));
+ eError = PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE;
+ goto Error;
+ }
+
+ eError = OSLockCreate(&gpsPVRSRVData->hProcessHandleBase_Lock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create lock for process handle base.",
+ __func__));
+ goto Error;
+ }
+
+ return 0;
+
+Error:
+ PVRSRVDriverDeInit();
+ return eError;
+}
+
+void IMG_CALLCONV
+PVRSRVDriverDeInit(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (gpsPVRSRVData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: missing device-independent data",
+ __func__));
+ return;
+ }
+
+ gpsPVRSRVData->bUnload = IMG_TRUE;
+
+ if (gpsPVRSRVData->hProcessHandleBase_Lock)
+ {
+ OSLockDestroy(gpsPVRSRVData->hProcessHandleBase_Lock);
+ gpsPVRSRVData->hProcessHandleBase_Lock = NULL;
+ }
+
+ if (gpsPVRSRVData->psProcessHandleBase_Table)
+ {
+ HASH_Delete(gpsPVRSRVData->psProcessHandleBase_Table);
+ gpsPVRSRVData->psProcessHandleBase_Table = NULL;
+ }
+
+ if (gpsPVRSRVData->hGlobalEventObject)
+ {
+ OSEventObjectSignal(gpsPVRSRVData->hGlobalEventObject);
+ }
+
+ /* Stop and cleanup the devices watchdog thread */
+ if (gpsPVRSRVData->hDevicesWatchdogThread)
+ {
+ if (gpsPVRSRVData->hDevicesWatchdogEvObj)
+ {
+ eError = OSEventObjectSignal(gpsPVRSRVData->hDevicesWatchdogEvObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+ {
+ eError = OSThreadDestroy(gpsPVRSRVData->hDevicesWatchdogThread);
+ if (PVRSRV_OK == eError)
+ {
+ gpsPVRSRVData->hDevicesWatchdogThread = NULL;
+ break;
+ }
+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+ }
+
+ if (gpsPVRSRVData->hDevicesWatchdogEvObj)
+ {
+ eError = OSEventObjectDestroy(gpsPVRSRVData->hDevicesWatchdogEvObj);
+ gpsPVRSRVData->hDevicesWatchdogEvObj = NULL;
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+ }
+
+ /* Stop and cleanup the deferred clean up thread, event object and
+ * deferred context list.
+ */
+ if (gpsPVRSRVData->hCleanupThread)
+ {
+ if (gpsPVRSRVData->hCleanupEventObject)
+ {
+ eError = OSEventObjectSignal(gpsPVRSRVData->hCleanupEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+ {
+ eError = OSThreadDestroy(gpsPVRSRVData->hCleanupThread);
+ if (PVRSRV_OK == eError)
+ {
+ gpsPVRSRVData->hCleanupThread = NULL;
+ break;
+ }
+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+ }
+
+ if (gpsPVRSRVData->hCleanupEventObject)
+ {
+ eError = OSEventObjectDestroy(gpsPVRSRVData->hCleanupEventObject);
+ gpsPVRSRVData->hCleanupEventObject = NULL;
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+ }
+
+ /* Tear down the HTB before PVRSRVHandleDeInit() removes its TL handle */
+ /* HTB De-init happens in device de-registration currently */
+ eError = HTBDeInit();
+ PVR_LOG_IF_ERROR(eError, "HTBDeInit");
+
+ eError = PVRSRVHandleDeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVHandleDeInit failed", __func__));
+ }
+
+ /* deinitialise pdump */
+ if ((g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0)
+ {
+ PDUMPDEINIT();
+ }
+
+ /* destroy event object */
+ if (gpsPVRSRVData->hGlobalEventObject)
+ {
+ OSEventObjectDestroy(gpsPVRSRVData->hGlobalEventObject);
+ gpsPVRSRVData->hGlobalEventObject = NULL;
+ }
+
+ PVRSRVCmdCompleteDeinit();
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+ eError = DCDeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: DCDeInit failed", __func__));
+ }
+#endif
+
+ eError = CacheOpDeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: CacheOpDeInit failed", __func__));
+ }
+
+ eError = PMRDeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PMRDeInit failed", __func__));
+ }
+
+ BridgeDeinit();
+
+#if defined(PVR_RI_DEBUG)
+ RIDeInitKM();
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ DevicememHistoryDeInitKM();
+#endif
+
+ OSDeInitEnvData();
+
+ eError = PhysHeapDeinit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PhysHeapDeinit failed", __func__));
+ }
+
+ OSFreeMem(gpsPVRSRVData);
+ gpsPVRSRVData = NULL;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+static PVRSRV_ERROR CreateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_UINT uiCounter=0;
+
+ for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+ {
+ psDeviceNode->psOSidSubArena[uiCounter] =
+ RA_Create(psDeviceNode->apszRANames[0],
+ OSGetPageShift(), /* Use host page size, keeps things simple */
+ RA_LOCKCLASS_0, /* This arena doesn't use any other arenas. */
+ NULL, /* No Import */
+ NULL, /* No free import */
+ NULL, /* No import handle */
+ IMG_FALSE);
+
+ if (psDeviceNode->psOSidSubArena[uiCounter] == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"\n(GPU Virtualization Validation): Calling RA_Add with base %u and size %u \n",0, GPUVIRT_SIZEOF_ARENA0));
+
+ /* Arena creation takes place earlier than when the client side reads the apphints and transfers them over the bridge. Since we don't
+ * know how the memory is going to be partitioned and since we already need some memory for all the initial allocations that take place,
+ * we populate the first sub-arena (0) with a span of 64 megabytes. This has been shown to be enough even for cases where EWS is allocated
+ * memory in this sub arena and then a multi app example is executed. This pre-allocation also means that consistency must be maintained
+ * between apphints and reality. That's why in the Apphints, the OSid0 region must start from 0 and end at 3FFFFFF. */
+
+ if (!RA_Add(psDeviceNode->psOSidSubArena[0], 0, GPUVIRT_SIZEOF_ARENA0, 0 , NULL ))
+ {
+ RA_Delete(psDeviceNode->psOSidSubArena[0]);
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psDeviceNode->apsLocalDevMemArenas[0] = psDeviceNode->psOSidSubArena[0];
+
+ return PVRSRV_OK;
+}
+
+void PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS], IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS])
+{
+ IMG_UINT uiCounter;
+
+ /* Since Sub Arena[0] has been populated already, now we populate the rest starting from 1*/
+
+ for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"\n[GPU Virtualization Validation]: Calling RA_Add with base %u and size %u \n",aui32OSidMin[uiCounter][0], aui32OSidMax[uiCounter][0]-aui32OSidMin[uiCounter][0]+1));
+
+ if (!RA_Add(psDeviceNode->psOSidSubArena[uiCounter], aui32OSidMin[uiCounter][0], aui32OSidMax[uiCounter][0]-aui32OSidMin[uiCounter][0]+1, 0, NULL))
+ {
+ goto error;
+ }
+ }
+
+ #if defined(EMULATOR)
+ {
+ SysSetOSidRegisters(aui32OSidMin, aui32OSidMax);
+ }
+ #endif
+
+ return ;
+
+error:
+ for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+ {
+ RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]);
+ }
+
+ return ;
+}
+
+#endif
+
+static void _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ /* Only dump info once */
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_LOW)
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode =
+ (PVRSRV_DEVICE_NODE *) hDebugRequestHandle;
+
+ switch (psDeviceNode->eCurrentSysPowerState)
+ {
+ case PVRSRV_SYS_POWER_STATE_OFF:
+ PVR_DUMPDEBUG_LOG("Device System Power State: OFF");
+ break;
+ case PVRSRV_SYS_POWER_STATE_ON:
+ PVR_DUMPDEBUG_LOG("Device System Power State: ON");
+ break;
+ default:
+ PVR_DUMPDEBUG_LOG("Device System Power State: UNKNOWN (%d)",
+ psDeviceNode->eCurrentSysPowerState);
+ break;
+ }
+
+ SysDebugInfo(psDeviceNode->psDevConfig, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceCreate(void *pvOSDevice,
+ PVRSRV_DEVICE_NODE **ppsDeviceNode)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_DEVICE_PHYS_HEAP physHeapIndex;
+ IMG_UINT32 i;
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ if (0 != psPVRSRVData->ui32RegisteredDevices)
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+#endif
+ psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode));
+ if (!psDeviceNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate device node",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ eError = SysDevInit(pvOSDevice, &psDevConfig);
+ if (eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get device config (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto e0;
+ }
+
+ PVR_ASSERT(psDevConfig);
+ PVR_ASSERT(psDevConfig->pvOSDevice == pvOSDevice);
+ PVR_ASSERT(!psDevConfig->psDevNode);
+
+ /* Store the device node in the device config for the system layer to use */
+ psDevConfig->psDevNode = psDeviceNode;
+
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT;
+ psDeviceNode->psDevConfig = psDevConfig;
+ psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON;
+
+ eError = PVRSRVRegisterDbgTable(psDeviceNode,
+ g_aui32DebugOrderTable,
+ IMG_ARR_NUM_ELEMS(g_aui32DebugOrderTable));
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ eError = OSLockCreate(&psDeviceNode->hPowerLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ /* Register the physical memory heaps */
+ psDeviceNode->papsRegisteredPhysHeaps =
+ OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) *
+ psDevConfig->ui32PhysHeapCount);
+ if (!psDeviceNode->papsRegisteredPhysHeaps)
+ {
+ goto e3;
+ }
+
+ for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++)
+ {
+ eError = PhysHeapRegister(&psDevConfig->pasPhysHeaps[i],
+ &psDeviceNode->papsRegisteredPhysHeaps[i]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to register physical heap %d (%s)",
+ __func__, psDevConfig->pasPhysHeaps[i].ui32PhysHeapID,
+ PVRSRVGetErrorStringKM(eError)));
+ goto e4;
+ }
+
+ psDeviceNode->ui32RegisteredPhysHeaps++;
+ }
+
+ /*
+ * The physical backing storage for the following physical heaps
+ * [CPU,GPU,FW] may or may not come from the same underlying source
+ */
+ eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+ &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL physical memory heap",
+ __func__));
+ goto e4;
+ }
+
+ eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL],
+ &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL physical memory heap",
+ __func__));
+ goto e5;
+ }
+
+ eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL],
+ &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL physical memory heap",
+ __func__));
+ goto e5;
+ }
+
+ /* Do we have card memory? If so create RAs to manage it */
+ if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+ {
+ RA_BASE_T uBase;
+ RA_LENGTH_T uSize;
+ IMG_UINT64 ui64Size;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+
+ IMG_UINT32 ui32NumOfLMARegions;
+ IMG_UINT32 ui32RegionId;
+ PHYS_HEAP* psLMAHeap;
+
+ psLMAHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+ ui32NumOfLMARegions = PhysHeapNumberOfRegions(psLMAHeap);
+
+ if (ui32NumOfLMARegions == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: LMA heap has no memory regions defined.", __func__));
+ eError = PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP;
+ goto e5;
+ }
+
+ /* Allocate memory for RA pointers and name strings */
+ psDeviceNode->apsLocalDevMemArenas = OSAllocMem(sizeof(RA_ARENA*) * ui32NumOfLMARegions);
+ psDeviceNode->ui32NumOfLocalMemArenas = ui32NumOfLMARegions;
+ psDeviceNode->apszRANames = OSAllocMem(ui32NumOfLMARegions * sizeof(IMG_PCHAR));
+
+ for (ui32RegionId = 0; ui32RegionId < ui32NumOfLMARegions; ui32RegionId++)
+ {
+ eError = PhysHeapRegionGetSize(psLMAHeap, ui32RegionId, &ui64Size);
+ if (eError != PVRSRV_OK)
+ {
+ /* We can only get here if there is a bug in this module */
+ PVR_ASSERT(IMG_FALSE);
+ return eError;
+ }
+
+ eError = PhysHeapRegionGetCpuPAddr(psLMAHeap, ui32RegionId, &sCpuPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ /* We can only get here if there is a bug in this module */
+ PVR_ASSERT(IMG_FALSE);
+ return eError;
+ }
+
+ eError = PhysHeapRegionGetDevPAddr(psLMAHeap, ui32RegionId, &sDevPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ /* We can only get here if there is a bug in this module */
+ PVR_ASSERT(IMG_FALSE);
+ return eError;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "Creating RA for card memory - region %d - 0x%016llx-0x%016llx",
+ ui32RegionId, (IMG_UINT64) sCpuPAddr.uiAddr,
+ sCpuPAddr.uiAddr + ui64Size));
+
+ psDeviceNode->apszRANames[ui32RegionId] =
+ OSAllocMem(PVRSRV_MAX_RA_NAME_LENGTH);
+ OSSNPrintf(psDeviceNode->apszRANames[ui32RegionId],
+ PVRSRV_MAX_RA_NAME_LENGTH,
+ "%s card mem",
+ psDevConfig->pszName);
+
+ uBase = sDevPAddr.uiAddr;
+ uSize = (RA_LENGTH_T) ui64Size;
+ PVR_ASSERT(uSize == ui64Size);
+
+ /* Use host page size, keeps things simple */
+ psDeviceNode->apsLocalDevMemArenas[ui32RegionId] =
+ RA_Create(psDeviceNode->apszRANames[ui32RegionId],
+ OSGetPageShift(), RA_LOCKCLASS_0, NULL, NULL, NULL,
+ IMG_FALSE);
+
+ if (psDeviceNode->apsLocalDevMemArenas[ui32RegionId] == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create LMA memory arena",
+ __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e6;
+ }
+
+ if (!RA_Add(psDeviceNode->apsLocalDevMemArenas[ui32RegionId],
+ uBase, uSize, 0, NULL))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to add memory to LMA memory arena",
+ __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e6;
+ }
+ }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ eError = CreateLMASubArenas(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create LMA memory sub-arenas", __func__));
+ goto e6;
+ }
+#endif
+
+ /* If additional psDeviceNode->pfnDevPx* callbacks are added,
+ update the corresponding virtualization-specific override
+ in pvrsrv_vz.c:PVRSRVVzDeviceCreate() */
+ psDeviceNode->pfnDevPxAlloc = LMA_PhyContigPagesAlloc;
+ psDeviceNode->pfnDevPxFree = LMA_PhyContigPagesFree;
+ psDeviceNode->pfnDevPxMap = LMA_PhyContigPagesMap;
+ psDeviceNode->pfnDevPxUnMap = LMA_PhyContigPagesUnmap;
+ psDeviceNode->pfnDevPxClean = LMA_PhyContigPagesClean;
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory only, no local card memory"));
+
+ /* else we only have OS system memory */
+ psDeviceNode->pfnDevPxAlloc = OSPhyContigPagesAlloc;
+ psDeviceNode->pfnDevPxFree = OSPhyContigPagesFree;
+ psDeviceNode->pfnDevPxMap = OSPhyContigPagesMap;
+ psDeviceNode->pfnDevPxUnMap = OSPhyContigPagesUnmap;
+ psDeviceNode->pfnDevPxClean = OSPhyContigPagesClean;
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+ }
+
+ if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "===== Local card memory only, no OS system memory"));
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory, 2nd phys heap"));
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+ }
+
+ if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "===== Local card memory only, no OS system memory"));
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewLocalRamBackedPMR;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory, 3rd phys heap"));
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewOSRamBackedPMR;
+ }
+
+ psDeviceNode->uiMMUPxLog2AllocGran = OSGetPageShift();
+
+ eError = ServerSyncInit(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ goto e6;
+ }
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ /* Perform virtualization device creation */
+ PVRSRVVzDeviceCreate(psDeviceNode);
+#endif
+
+ /*
+ * This is registered before doing device specific initialisation to ensure
+ * generic device information is dumped first during a debug request.
+ */
+ eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hDbgReqNotify,
+ psDeviceNode,
+ _SysDebugRequestNotify,
+ DEBUG_REQUEST_SYS,
+ psDeviceNode);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+ eError = HTBDeviceCreate(psDeviceNode);
+ PVR_LOG_IF_ERROR(eError, "HTBDeviceCreate");
+
+ psPVRSRVData->ui32RegisteredDevices++;
+
+#if defined(SUPPORT_RGX)
+ eError = RGXRegisterDevice(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register device", __func__));
+ eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
+ goto e7;
+ }
+#endif
+
+#if defined(PVR_DVFS)
+ eError = InitDVFS(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to start DVFS", __func__));
+#if defined(SUPPORT_RGX)
+ DevDeInitRGX(psDeviceNode);
+#endif
+ goto e7;
+ }
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+ TUtilsInit(psDeviceNode);
+#endif
+
+ dllist_init(&psDeviceNode->sMemoryContextPageFaultNotifyListHead);
+
+ /*
+ * Initialise the Transport Layer.
+ * Need to remember the RGX device node for use in the Transport Layer
+ * when allocating stream buffers that are shared with clients.
+ * Note however when the device is an LMA device our buffers will not
+ * be in host memory but card memory.
+ */
+ eError = TLInit(psDeviceNode);
+ PVR_LOG_IF_ERROR(eError, "TLInit");
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Registered device %p", psDeviceNode));
+ PVR_DPF((PVR_DBG_MESSAGE, "Register bank address = 0x%08lx",
+ (unsigned long)psDevConfig->sRegsCpuPBase.uiAddr));
+ PVR_DPF((PVR_DBG_MESSAGE, "IRQ = %d", psDevConfig->ui32IRQ));
+
+ /* Finally insert the device into the dev-list and set it as active */
+ List_PVRSRV_DEVICE_NODE_InsertTail(&psPVRSRVData->psDeviceNodeList,
+ psDeviceNode);
+
+ *ppsDeviceNode = psDeviceNode;
+
+ return PVRSRV_OK;
+
+#if defined(SUPPORT_RGX) || defined(PVR_DVFS)
+e7:
+ psPVRSRVData->ui32RegisteredDevices--;
+
+ if (psDeviceNode->hDbgReqNotify)
+ {
+ PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify);
+ }
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ /* Perform virtualization device destruction */
+ PVRSRVVzDeviceDestroy(psDeviceNode);
+#endif
+ ServerSyncDeinit(psDeviceNode);
+#endif
+e6:
+ {
+ IMG_UINT32 ui32RegionId;
+
+ for (ui32RegionId = 0;
+ ui32RegionId < psDeviceNode->ui32NumOfLocalMemArenas;
+ ui32RegionId++)
+ {
+ if (psDeviceNode->apsLocalDevMemArenas[ui32RegionId])
+ {
+ RA_Delete(psDeviceNode->apsLocalDevMemArenas[ui32RegionId]);
+ }
+ }
+ }
+
+e5:
+ for (physHeapIndex = 0;
+ physHeapIndex < IMG_ARR_NUM_ELEMS(psDeviceNode->apsPhysHeap);
+ physHeapIndex++)
+ {
+ if (psDeviceNode->apsPhysHeap[physHeapIndex])
+ {
+ PhysHeapRelease(psDeviceNode->apsPhysHeap[physHeapIndex]);
+ }
+ }
+e4:
+ for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++)
+ {
+ PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[i]);
+ }
+
+ OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+e3:
+ OSLockDestroy(psDeviceNode->hPowerLock);
+e2:
+ PVRSRVUnregisterDbgTable(psDeviceNode);
+e1:
+ psDevConfig->psDevNode = NULL;
+ SysDevDeInit(psDevConfig);
+e0:
+ OSFreeMem(psDeviceNode);
+ return eError;
+}
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+static PVRSRV_ERROR _SetDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice,
+ const void *psPrivate, IMG_BOOL bValue)
+{
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+
+ if (!ui32Flag)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eResult = RGXSetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+ ui32Flag, bValue);
+
+ return eResult;
+}
+
+static PVRSRV_ERROR _ReadDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice,
+ const void *psPrivate, IMG_BOOL *pbValue)
+{
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+ IMG_UINT32 ui32State;
+
+ if (!ui32Flag)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eResult = RGXGetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+ &ui32State);
+
+ if (PVRSRV_OK == eResult)
+ {
+ *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE;
+ }
+
+ return eResult;
+}
+static PVRSRV_ERROR _SetStateFlag(const PVRSRV_DEVICE_NODE *psDevice,
+ const void *psPrivate, IMG_BOOL bValue)
+{
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+
+ if (!ui32Flag)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* EnableHWR is a special case
+ * only possible to disable after FW is running
+ */
+ if (bValue && RGXFWIF_INICFG_HWR_EN == ui32Flag)
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
+ eResult = RGXStateFlagCtrl((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+ ui32Flag, NULL, bValue);
+
+ return eResult;
+}
+
+static PVRSRV_ERROR _ReadStateFlag(const PVRSRV_DEVICE_NODE *psDevice,
+ const void *psPrivate, IMG_BOOL *pbValue)
+{
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+ IMG_UINT32 ui32State;
+
+ if (!ui32Flag)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eResult = RGXStateFlagCtrl((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+ 0, &ui32State, IMG_FALSE);
+
+ if (PVRSRV_OK == eResult)
+ {
+ *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE;
+ }
+
+ return eResult;
+}
+
+PVRSRV_ERROR PVRSRVDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_BOOL bInitSuccesful = IMG_FALSE;
+ PVRSRV_ERROR eError;
+
+ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_INIT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device already initialised", __func__));
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+#if defined(SUPPORT_RGX)
+ eError = RGXInit(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Initialisation of Rogue device failed (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto Exit;
+ }
+#endif
+
+ bInitSuccesful = IMG_TRUE;
+
+#if defined(SUPPORT_RGX)
+Exit:
+#endif
+ eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccesful);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Services failed to finalise the device (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ }
+
+
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableClockGating,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_CLKGATING_EN));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableDMOverlap,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_DM_OVERLAP));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOnHWRTrigger,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOutOfMemory,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_CheckMList,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_CHECK_MLIST_EN));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableHWR,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_HWR_EN));
+
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableFEDLogging,
+ _ReadDeviceFlag, _SetDeviceFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_ZeroFreelist,
+ _ReadDeviceFlag, _SetDeviceFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXKMIF_DEVICE_STATE_ZERO_FREELIST));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DustRequestInject,
+ _ReadDeviceFlag, _SetDeviceFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN));
+
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisablePDumpPanic,
+ RGXQueryPdumpPanicEnable, RGXSetPdumpPanicEnable,
+ psDeviceNode,
+ NULL);
+ return eError;
+}
+#endif /* defined(SUPPORT_KERNEL_SRVINIT) */
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx;
+ IMG_UINT32 ui32RegionIdx;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ IMG_BOOL bForceUnload = IMG_FALSE;
+
+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ bForceUnload = IMG_TRUE;
+ }
+#endif
+
+ psPVRSRVData->ui32RegisteredDevices--;
+
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_DEINIT;
+
+#if defined(PVR_TESTING_UTILS)
+ TUtilsDeinit(psDeviceNode);
+#endif
+
+ /* Counter part to what gets done in PVRSRVDeviceFinalise */
+ if (psDeviceNode->hSyncPrimContext)
+ {
+ if (psDeviceNode->psSyncPrim)
+ {
+ /* Free general pupose sync primitive */
+ SyncPrimFree(psDeviceNode->psSyncPrim);
+ psDeviceNode->psSyncPrim = NULL;
+ }
+
+ if (psDeviceNode->psMMUCacheSyncPrim)
+ {
+ PVRSRV_CLIENT_SYNC_PRIM *psSync = psDeviceNode->psMMUCacheSyncPrim;
+
+ /* Important to set the device node pointer to NULL
+ * before we free the sync-prim to make sure we don't
+ * defer the freeing of the sync-prim's page tables itself.
+ * The sync is used to defer the MMU page table
+ * freeing. */
+ psDeviceNode->psMMUCacheSyncPrim = NULL;
+
+ /* Free general pupose sync primitive */
+ SyncPrimFree(psSync);
+
+ }
+
+ SyncPrimContextDestroy(psDeviceNode->hSyncPrimContext);
+ psDeviceNode->hSyncPrimContext = NULL;
+ }
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock", __func__));
+ return eError;
+ }
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (bForceUnload)
+ {
+ /*
+ * Firmware probably not responding but we still want to unload the
+ * driver.
+ */
+ break;
+ }
+#endif
+ /* Force idle device */
+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
+ if (eError == PVRSRV_OK)
+ {
+ break;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVRSRV_ERROR eError2;
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+
+ eError2 = PVRSRVPowerLock(psDeviceNode);
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock",
+ __func__));
+ return eError2;
+ }
+ }
+ else
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ return eError;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /* Power down the device if necessary */
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_OFF,
+ IMG_TRUE);
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed PVRSRVSetDevicePowerStateKM call (%s). Dump debug.",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+
+ /*
+ * If the driver is okay then return the error, otherwise we can ignore
+ * this error.
+ */
+ if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+ {
+ return eError;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Will continue to unregister as driver status is not OK",
+ __func__));
+ }
+ }
+
+#if defined(SUPPORT_RGX)
+ DevDeInitRGX(psDeviceNode);
+#endif
+
+ HTBDeviceDestroy(psDeviceNode);
+
+ if (psDeviceNode->hDbgReqNotify)
+ {
+ PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify);
+ }
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ /* Perform virtualization device destruction */
+ PVRSRVVzDeviceDestroy(psDeviceNode);
+#endif
+
+ ServerSyncDeinit(psDeviceNode);
+
+ /* Remove RAs and RA names for local card memory */
+ for (ui32RegionIdx = 0;
+ ui32RegionIdx < psDeviceNode->ui32NumOfLocalMemArenas;
+ ui32RegionIdx++)
+ {
+ if (psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx])
+ {
+ RA_Delete(psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx]);
+ }
+
+ if (psDeviceNode->apszRANames[ui32RegionIdx])
+ {
+ OSFreeMem(psDeviceNode->apszRANames[ui32RegionIdx]);
+ }
+ }
+
+ OSFreeMem(psDeviceNode->apsLocalDevMemArenas);
+ OSFreeMem(psDeviceNode->apszRANames);
+
+ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
+
+ for (ePhysHeapIdx = 0;
+ ePhysHeapIdx < IMG_ARR_NUM_ELEMS(psDeviceNode->apsPhysHeap);
+ ePhysHeapIdx++)
+ {
+ if (psDeviceNode->apsPhysHeap[ePhysHeapIdx])
+ {
+ PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]);
+ }
+ }
+
+ for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++)
+ {
+ PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[i]);
+ }
+
+ OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+
+#if defined(PVR_DVFS)
+ DeinitDVFS(psDeviceNode);
+#endif
+
+ OSLockDestroy(psDeviceNode->hPowerLock);
+
+ PVRSRVUnregisterDbgTable(psDeviceNode);
+
+ psDeviceNode->psDevConfig->psDevNode = NULL;
+ SysDevDeInit(psDeviceNode->psDevConfig);
+
+ /*
+ * Clean up Transport Layer resources that remain. Done after RGX node clean
+ * up as HWPerf stream is destroyed during this.
+ */
+ TLDeInit(psDeviceNode);
+
+ OSFreeMem(psDeviceNode);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr)
+{
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ IMG_UINT32 ui32OSid = 0;
+#endif
+ RA_BASE_T uiCardAddr;
+ RA_LENGTH_T uiActualSize;
+ PVRSRV_ERROR eError;
+
+ RA_ARENA *pArena=psDevNode->apsLocalDevMemArenas[0];
+ IMG_UINT32 ui32Log2NumPages = 0;
+
+ PVR_ASSERT(uiSize != 0);
+ ui32Log2NumPages = OSGetOrder(uiSize);
+ uiSize = (1 << ui32Log2NumPages) * OSGetPageSize();
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ IMG_UINT32 ui32OSidReg = 0;
+ IMG_BOOL bOSidAxiProt;
+
+ IMG_PID pId = OSGetCurrentClientProcessIDKM();
+
+ RetrieveOSidsfromPidList(pId, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+ pArena = psDevNode->psOSidSubArena[ui32OSid];
+}
+#endif
+
+ eError = RA_Alloc(pArena,
+ uiSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, /* No flags */
+ OSGetPageSize(),
+ "LMA_PhyContigPagesAlloc",
+ &uiCardAddr,
+ &uiActualSize,
+ NULL); /* No private handle */
+
+ PVR_ASSERT(uiSize == uiActualSize);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): LMA_PhyContigPagesAlloc: Address:%llu, size:%llu", uiCardAddr,uiActualSize));
+}
+#endif
+
+ psMemHandle->u.ui64Handle = uiCardAddr;
+ psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr;
+
+ if (PVRSRV_OK == eError)
+ {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+ uiSize,
+ (IMG_UINT64)(uintptr_t) psMemHandle);
+#else
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = psDevPAddr->uiAddr;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+ NULL,
+ sCpuPAddr,
+ uiSize,
+ NULL);
+#endif
+#endif
+ psMemHandle->ui32Order = ui32Log2NumPages;
+ }
+
+ return eError;
+}
+
+void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle)
+{
+ RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+ (IMG_UINT64)(uintptr_t) psMemHandle);
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT64)uiCardAddr);
+#endif
+#endif
+ RA_Free(psDevNode->apsLocalDevMemArenas[0], uiCardAddr);
+ psMemHandle->ui32Order = 0;
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+ void **pvPtr)
+{
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_UINT32 ui32NumPages = (1 << psMemHandle->ui32Order);
+ PVR_UNREFERENCED_PARAMETER(psMemHandle);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+
+ PhysHeapDevPAddrToCpuPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], 1, &sCpuPAddr, psDevPAddr);
+ *pvPtr = OSMapPhysToLin(sCpuPAddr,
+ ui32NumPages * OSGetPageSize(),
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE);
+ if (*pvPtr == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ else
+ {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, ui32NumPages * OSGetPageSize());
+#else
+ {
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+ *pvPtr,
+ sCpuPAddr,
+ ui32NumPages * OSGetPageSize(),
+ NULL);
+ }
+#endif
+#endif
+ return PVRSRV_OK;
+ }
+}
+
+void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ void *pvPtr)
+{
+ IMG_UINT32 ui32NumPages = (1 << psMemHandle->ui32Order);
+ PVR_UNREFERENCED_PARAMETER(psMemHandle);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, ui32NumPages * OSGetPageSize());
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, (IMG_UINT64)(uintptr_t)pvPtr);
+#endif
+#endif
+
+ OSUnMapPhysToLin(pvPtr, ui32NumPages * OSGetPageSize(),
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+ PG_HANDLE *psMemHandle,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 uiLength)
+{
+ /* No need to flush because we map as uncached */
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(psMemHandle);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(uiLength);
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVDeviceFinalise
+@Description Performs the final parts of device initialisation.
+@Input psDeviceNode Device node of the device to finish
+ initialising
+@Input bInitSuccessful Whether or not device specific
+ initialisation was successful
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bInitSuccessful)
+{
+ PVRSRV_ERROR eError;
+
+ if (bInitSuccessful)
+ {
+ eError = SyncPrimContextCreate(psDeviceNode,
+ &psDeviceNode->hSyncPrimContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create sync prim context (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+
+ /* Allocate general purpose sync primitive */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psDeviceNode->psSyncPrim,
+ "pvrsrv dev general");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to allocate sync primitive with error (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+
+ /* Allocate MMU cache invalidate sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psDeviceNode->psMMUCacheSyncPrim,
+ "pvrsrv dev MMU cache");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to allocate sync primitive with error (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+
+ /* Next update value will be 1 since sync prim starts with 0 */
+ psDeviceNode->ui32NextMMUInvalidateUpdate = 1;
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+
+ /*
+ * Always ensure a single power on command appears in the pdump. This
+ * should be the only power related call outside of PDUMPPOWCMDSTART
+ * and PDUMPPOWCMDEND.
+ */
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON, IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to set device %p power state to 'on' (%s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ PVRSRVPowerUnlock(psDeviceNode);
+ goto ErrorExit;
+ }
+
+ /* Verify firmware compatibility for device */
+ eError = PVRSRVDevInitCompatCheck(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed compatibility check for device %p (%s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ PVRSRVPowerUnlock(psDeviceNode);
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+ goto ErrorExit;
+ }
+
+ PDUMPPOWCMDSTART();
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ /* Force the device to idle if its default power state is off */
+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode,
+ &PVRSRVDeviceIsDefaultStateOFF,
+ IMG_TRUE);
+ if (eError == PVRSRV_OK)
+ {
+ break;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire power lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to idle device %p (%s)",
+ __func__, psDeviceNode,
+ PVRSRVGetErrorStringKM(eError)));
+ PVRSRVPowerUnlock(psDeviceNode);
+ goto ErrorExit;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /* Place device into its default power state. */
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_DEFAULT,
+ IMG_TRUE);
+ PDUMPPOWCMDEND();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to set device %p into its default power state (%s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+
+ PVRSRVPowerUnlock(psDeviceNode);
+ goto ErrorExit;
+ }
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ /*
+ * If PDUMP is enabled and RGX device is supported, then initialise the
+ * performance counters that can be further modified in PDUMP. Then,
+ * before ending the init phase of the pdump, drain the commands put in
+ * the kCCB during the init phase.
+ */
+#if defined(SUPPORT_RGX) && defined(PDUMP)
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo =
+ (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+
+ eError = PVRSRVRGXInitHWPerfCountersKM(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to init hwperf counters (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+
+ eError = RGXPdumpDrainKCCB(psDevInfo,
+ psDevInfo->psKernelCCBCtl->ui32WriteOffset);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Problem draining kCCB (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+ }
+#endif
+
+ /* Now that the device(s) are fully initialised set them as active */
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_ACTIVE;
+
+#if defined(SUPPORT_RGX) && defined(PVRSRV_GPUVIRT_GUESTDRV)
+ eError = RGXFWOSConfig((PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Cannot kick initialization configuration to the Device (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+
+ goto ErrorExit;
+ }
+#endif
+ }
+ else
+ {
+ /* Initialisation failed so set the device(s) into a bad state */
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD;
+ eError = PVRSRV_ERROR_NOT_INITIALISED;
+ }
+
+ /* Give PDump control a chance to end the init phase, depends on OS */
+ PDumpStopInitPhase(IMG_FALSE, IMG_TRUE);
+
+ return eError;
+
+ErrorExit:
+ /* Initialisation failed so set the device(s) into a bad state */
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD;
+
+ return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /* Only check devices which specify a compatibility check callback */
+ if (psDeviceNode->pfnInitDeviceCompatCheck)
+ return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode);
+ else
+ return PVRSRV_OK;
+}
+
+/*
+ PollForValueKM
+*/
+static
+PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Timeoutus,
+ IMG_UINT32 ui32PollPeriodus,
+ IMG_BOOL bAllowPreemption)
+{
+#if defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(ui32Timeoutus);
+ PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus);
+ PVR_UNREFERENCED_PARAMETER(bAllowPreemption);
+ return PVRSRV_OK;
+#else
+ IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */
+
+ if (bAllowPreemption)
+ {
+ PVR_ASSERT(ui32PollPeriodus >= 1000);
+ }
+
+ LOOP_UNTIL_TIMEOUT(ui32Timeoutus)
+ {
+ ui32ActualValue = OSReadHWReg32((void *)pui32LinMemAddr, 0) & ui32Mask;
+
+ if(ui32ActualValue == ui32Value)
+ {
+ return PVRSRV_OK;
+ }
+
+ if (gpsPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ if (bAllowPreemption)
+ {
+ OSSleepms(ui32PollPeriodus / 1000);
+ }
+ else
+ {
+ OSWaitus(ui32PollPeriodus);
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).",
+ ui32Value, ui32ActualValue, ui32Mask));
+
+ return PVRSRV_ERROR_TIMEOUT;
+#endif /* NO_HARDWARE */
+}
+
+
+/*
+ PVRSRVPollForValueKM
+*/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM (volatile IMG_UINT32 *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask)
+{
+ return PollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE);
+}
+
+static
+PVRSRV_ERROR IMG_CALLCONV WaitForValueKM(volatile IMG_UINT32 *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ IMG_BOOL bHoldBridgeLock)
+{
+#if defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ return PVRSRV_OK;
+#else
+
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eErrorWait;
+ IMG_UINT32 ui32ActualValue;
+
+ eError = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hOSEvent);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWaitForValueKM: Failed to setup EventObject with error (%d)", eError));
+ goto EventObjectOpenError;
+ }
+
+ eError = PVRSRV_ERROR_TIMEOUT;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ ui32ActualValue = (*pui32LinMemAddr & ui32Mask);
+
+ if (ui32ActualValue == ui32Value)
+ {
+ /* Expected value has been found */
+ eError = PVRSRV_OK;
+ break;
+ }
+ else if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ /* Services in bad state, don't wait any more */
+ eError = PVRSRV_ERROR_NOT_READY;
+ break;
+ }
+ else
+ {
+ /* wait for event and retry */
+ eErrorWait = bHoldBridgeLock ? OSEventObjectWaitAndHoldBridgeLock(hOSEvent) : OSEventObjectWait(hOSEvent);
+ if (eErrorWait != PVRSRV_OK && eErrorWait != PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"PVRSRVWaitForValueKM: Waiting for value failed with error %d. Expected 0x%x but found 0x%x (Mask 0x%08x). Retrying",
+ eErrorWait,
+ ui32Value,
+ ui32ActualValue,
+ ui32Mask));
+ }
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ OSEventObjectClose(hOSEvent);
+
+ /* One last check incase the object wait ended after the loop timeout... */
+ if (eError != PVRSRV_OK && (*pui32LinMemAddr & ui32Mask) == ui32Value)
+ {
+ eError = PVRSRV_OK;
+ }
+
+ /* Provide event timeout information to aid the Device Watchdog Thread... */
+ if (eError == PVRSRV_OK)
+ {
+ psPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+ }
+ else if (eError == PVRSRV_ERROR_TIMEOUT)
+ {
+ psPVRSRVData->ui32GEOConsecutiveTimeouts++;
+ }
+
+EventObjectOpenError:
+
+ return eError;
+
+#endif /* NO_HARDWARE */
+}
+
+/*
+ PVRSRVWaitForValueKM
+*/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKM (volatile IMG_UINT32 *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask)
+{
+ /* In this case we are NOT retaining bridge lock while waiting
+ for bridge lock. */
+ return WaitForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, IMG_FALSE);
+}
+
+/*
+ PVRSRVWaitForValueKMAndHoldBridgeLock
+*/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKMAndHoldBridgeLockKM(volatile IMG_UINT32 *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask)
+{
+ return WaitForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, IMG_TRUE);
+}
+
+int PVRSRVGetDriverStatus(void)
+{
+ return PVRSRVGetPVRSRVData()->eServicesState;
+}
+
+/*!
+ ******************************************************************************
+
+ @Function PVRSRVGetErrorStringKM
+
+ @Description Returns a text string relating to the PVRSRV_ERROR enum.
+
+ @Note case statement used rather than an indexed arrary to ensure text is
+ synchronised with the correct enum
+
+ @Input eError : PVRSRV_ERROR enum
+
+ @Return const IMG_CHAR * : Text string
+
+ @Note Must be kept in sync with servicesext.h
+
+******************************************************************************/
+
+IMG_EXPORT
+const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError)
+{
+ switch(eError)
+ {
+ case PVRSRV_OK:
+ return "PVRSRV_OK";
+#define PVRE(x) \
+ case x: \
+ return #x;
+#include "pvrsrv_errors.h"
+#undef PVRE
+ default:
+ return "Unknown PVRSRV error number";
+ }
+}
+
+/*
+ PVRSRVSystemHasCacheSnooping
+*/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ if (psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_NONE)
+ {
+ return IMG_TRUE;
+ }
+ return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CPU_ONLY) ||
+ (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS))
+ {
+ return IMG_TRUE;
+ }
+ return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_DEVICE_ONLY) ||
+ (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS))
+ {
+ return IMG_TRUE;
+ }
+ return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ return psDevConfig->bHasNonMappableLocalMemory;
+}
+
+/*
+ PVRSRVSystemWaitCycles
+*/
+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles)
+{
+ /* Delay in us */
+ IMG_UINT32 ui32Delayus = 1;
+
+ /* obtain the device freq */
+ if (psDevConfig->pfnClockFreqGet != NULL)
+ {
+ IMG_UINT32 ui32DeviceFreq;
+
+ ui32DeviceFreq = psDevConfig->pfnClockFreqGet(psDevConfig->hSysData);
+
+ ui32Delayus = (ui32Cycles*1000000)/ui32DeviceFreq;
+
+ if (ui32Delayus == 0)
+ {
+ ui32Delayus = 1;
+ }
+ }
+
+ OSWaitus(ui32Delayus);
+}
+
+static void *
+PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode,
+ va_list va)
+{
+ void *pvOSDevice = va_arg(va, void *);
+
+ if (psDeviceNode->psDevConfig->pvOSDevice == pvOSDevice)
+ {
+ return psDeviceNode;
+ }
+
+ return NULL;
+}
+
+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ psDeviceNode =
+ List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ &PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb,
+ pvOSDevice);
+ if (!psDeviceNode)
+ {
+ /* Device can't be found in the list so it isn't in the system */
+ PVR_DPF((PVR_DBG_ERROR, "%s: device %p with irq %d is not present",
+ __func__, pvOSDevice, ui32IRQ));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ return SysInstallDeviceLISR(psDeviceNode->psDevConfig->hSysData, ui32IRQ,
+ pszName, pfnLISR, pvData, phLISRData);
+}
+
+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ return SysUninstallDeviceLISR(hLISRData);
+}
+
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingHeapGetXStride(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 uiHeapNum,
+ IMG_UINT32 *puiXStride)
+{
+ PVR_ASSERT(puiXStride != NULL);
+
+ if (uiHeapNum < 1 || uiHeapNum > psDevConfig->ui32BIFTilingHeapCount)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *puiXStride = psDevConfig->pui32BIFTilingHeapConfigs[uiHeapNum - 1];
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingGetConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ RGXFWIF_BIFTILINGMODE *peBifTilingMode,
+ IMG_UINT32 *puiNumHeaps)
+{
+ *peBifTilingMode = psDevConfig->eBIFTilingMode;
+ *puiNumHeaps = psDevConfig->ui32BIFTilingHeapCount;
+ return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState)
+{
+ SysSetAxiProtOSid(ui32OSid, bState);
+ return ;
+}
+
+void SetTrustedDeviceAceEnabled(void)
+{
+ SysSetTrustedDeviceAceEnabled();
+
+ return ;
+}
+#endif
+
+/*****************************************************************************
+ End of file (pvrsrv.c)
+*****************************************************************************/
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title PowerVR services server header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_H
+#define PVRSRV_H
+
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "device.h"
+#include "power.h"
+#include "sysinfo.h"
+#include "physheap.h"
+#include "cache_ops.h"
+#include "pvr_notifier.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+#include "connection_server.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+/*!
+ * For OSThreadDestroy(), which may require a retry
+ * Try for 100 ms to destroy an OS thread before failing
+ */
+#define OS_THREAD_DESTROY_TIMEOUT_US 100000ULL
+#define OS_THREAD_DESTROY_RETRY_COUNT 10
+
+typedef struct _BUILD_INFO_
+{
+ IMG_UINT32 ui32BuildOptions;
+ IMG_UINT32 ui32BuildVersion;
+ IMG_UINT32 ui32BuildRevision;
+ IMG_UINT32 ui32BuildType;
+#define BUILD_TYPE_DEBUG 0
+#define BUILD_TYPE_RELEASE 1
+ /*The above fields are self explanatory */
+ /* B.V.N.C can be added later if required */
+} BUILD_INFO;
+
+typedef struct _DRIVER_INFO_
+{
+ BUILD_INFO sUMBuildInfo;
+ BUILD_INFO sKMBuildInfo;
+ IMG_BOOL bIsNoMatch;
+}DRIVER_INFO;
+
+typedef struct PVRSRV_DATA_TAG
+{
+ DRIVER_INFO sDriverInfo;
+ IMG_UINT32 ui32RegisteredDevices;
+ PVRSRV_DEVICE_NODE *psDeviceNodeList; /*!< List head of device nodes */
+
+ PVRSRV_SERVICES_STATE eServicesState; /*!< global driver state */
+
+ HASH_TABLE *psProcessHandleBase_Table; /*!< Hash table with process handle bases */
+ POS_LOCK hProcessHandleBase_Lock; /*!< Lock for the process handle base table */
+
+ IMG_HANDLE hGlobalEventObject; /*!< OS Global Event Object */
+ IMG_UINT32 ui32GEOConsecutiveTimeouts; /*!< OS Global Event Object Timeouts */
+
+ PVRSRV_CACHE_OP uiCacheOp; /*!< Pending cache operations in the system */
+#if (CACHEFLUSH_KM_TYPE == CACHEFLUSH_KM_RANGEBASED_DEFERRED)
+ IMG_HANDLE hCacheOpThread; /*!< CacheOp thread */
+ IMG_HANDLE hCacheOpThreadEventObject; /*!< Event object to drive CacheOp thread */
+ IMG_HANDLE hCacheOpUpdateEventObject; /*!< Update event object to drive CacheOp fencing */
+ POS_LOCK hCacheOpThreadWorkListLock; /*!< Lock protecting the cleanup thread work list */
+ DLLIST_NODE sCacheOpThreadWorkList; /*!< List of work for the cleanup thread */
+ IMG_PID CacheOpThreadPid; /*!< CacheOp thread process id */
+#endif
+
+ IMG_HANDLE hCleanupThread; /*!< Cleanup thread */
+ IMG_HANDLE hCleanupEventObject; /*!< Event object to drive cleanup thread */
+ POS_LOCK hCleanupThreadWorkListLock; /*!< Lock protecting the cleanup thread work list */
+ DLLIST_NODE sCleanupThreadWorkList; /*!< List of work for the cleanup thread */
+ IMG_PID cleanupThreadPid; /*!< Cleanup thread process id */
+
+ IMG_HANDLE hDevicesWatchdogThread; /*!< Devices Watchdog thread */
+ IMG_HANDLE hDevicesWatchdogEvObj; /*! Event object to drive devices watchdog thread */
+ volatile IMG_UINT32 ui32DevicesWatchdogPwrTrans;/*! Number of off -> on power state transitions */
+ volatile IMG_UINT32 ui32DevicesWatchdogTimeout; /*! Timeout for the Devices Watchdog Thread */
+#ifdef PVR_TESTING_UTILS
+ volatile IMG_UINT32 ui32DevicesWdWakeupCounter; /* Need this for the unit tests. */
+#endif
+
+#ifdef SUPPORT_PVRSRV_GPUVIRT
+ IMG_HANDLE hVzData; /*! Additional virtualization data */
+#endif
+
+ IMG_BOOL bUnload; /*!< Driver unload is in progress */
+} PVRSRV_DATA;
+
+typedef IMG_BOOL (*PFN_LISR)(void *pvData);
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVGetPVRSRVData
+
+ @Description Get a pointer to the global data
+
+ @Return PVRSRV_DATA *
+
+******************************************************************************/
+PVRSRV_DATA *PVRSRVGetPVRSRVData(void);
+
+PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle);
+
+PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+ void **pvPtr);
+
+void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ void *pvPtr);
+
+PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+ PG_HANDLE *psMemHandle,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 uiLength);
+
+
+/*!
+******************************************************************************
+ @Function PVRSRVPollForValueKM
+
+ @Description
+ Polls for a value to match a masked read
+
+ @Input pui32LinMemAddr : CPU linear address to poll
+ @Input ui32Value : required value
+ @Input ui32Mask : Mask
+
+ @Return PVRSRV_ERROR :
+******************************************************************************/
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM(volatile IMG_UINT32 *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask);
+
+/*!
+******************************************************************************
+ @Function PVRSRVWaitForValueKM
+
+ @Description
+ Waits (using EventObjects) for a value to match a masked read
+
+ @Input pui32LinMemAddr : CPU linear address to poll
+ @Input ui32Value : required value
+ @Input ui32Mask : Mask
+
+ @Return PVRSRV_ERROR :
+******************************************************************************/
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKM(volatile IMG_UINT32 *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask);
+
+/*!
+******************************************************************************
+ @Function PVRSRVWaitForValueKMAndHoldBridgeLockKM
+
+ @Description
+ Waits without releasing bridge lock (using EventObjects) for a value
+ to match a masked read
+
+ @Input pui32LinMemAddr : CPU linear address to poll
+ @Input ui32Value : required value
+ @Input ui32Mask : Mask
+
+ @Return PVRSRV_ERROR :
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKMAndHoldBridgeLockKM(volatile IMG_UINT32 *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemHasCacheSnooping
+
+ @Description : Returns whether the system has cache snooping
+
+ @Return : IMG_TRUE if the system has cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemSnoopingOfCPUCache
+
+ @Description : Returns whether the system supports snooping of the CPU cache
+
+ @Return : IMG_TRUE if the system has CPU cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemSnoopingOfDeviceCache
+
+ @Description : Returns whether the system supports snooping of the device cache
+
+ @Return : IMG_TRUE if the system has device cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemHasNonMappableLocalMemory
+
+ @Description : Returns whether the device has non-mappable part of local memory
+
+ @Return : IMG_TRUE if the device has non-mappable part of local memory
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemWaitCycles
+
+ @Description : Waits for at least ui32Cycles of the Device clk.
+
+*****************************************************************************/
+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles);
+
+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData);
+
+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+int PVRSRVGetDriverStatus(void);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVIsBridgeEnabled
+
+ @Description : Returns whether the given bridge group is enabled
+
+ @Return : IMG_TRUE if the given bridge group is enabled
+*****************************************************************************/
+static inline IMG_BOOL PVRSRVIsBridgeEnabled(IMG_HANDLE hServices, IMG_UINT32 ui32BridgeGroup)
+{
+ PVR_UNREFERENCED_PARAMETER(hServices);
+
+#if defined(SUPPORT_RGX)
+ if(ui32BridgeGroup >= PVRSRV_BRIDGE_RGX_FIRST)
+ {
+ return ((1U << (ui32BridgeGroup - PVRSRV_BRIDGE_RGX_FIRST)) &
+ gui32RGXBridges) != 0;
+ }
+ else
+#endif /* SUPPORT_RGX */
+ {
+ return ((1U << (ui32BridgeGroup - PVRSRV_BRIDGE_FIRST)) &
+ gui32PVRBridges) != 0;
+ }
+}
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemBIFTilingHeapGetXStride
+
+ @Description : return the default x-stride configuration for the given
+ BIF tiling heap number
+
+ @Input psDevConfig: Pointer to a device config
+
+ @Input uiHeapNum: BIF tiling heap number, starting from 1
+
+ @Output puiXStride: pointer to x-stride output of the requested heap
+
+*****************************************************************************/
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingHeapGetXStride(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 uiHeapNum,
+ IMG_UINT32 *puiXStride);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemBIFTilingGetConfig
+
+ @Description : return the BIF tiling mode and number of BIF
+ tiling heaps for the given device config
+
+ @Input psDevConfig : Pointer to a device config
+
+ @Output peBifTilingMode: Pointer to a BIF tiling mode enum
+
+ @Output puiNumHeaps : pointer to uint to hold number of heaps
+
+*****************************************************************************/
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingGetConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ RGXFWIF_BIFTILINGMODE *peBifTilingMode,
+ IMG_UINT32 *puiNumHeaps);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*!
+***********************************************************************************
+ @Function : PopulateLMASubArenas
+
+ @Description : Uses the Apphints passed by the client at initialization
+ time to add bases and sizes in the various arenas in the
+ LMA memory
+
+ @Input psDeviceNode : Pointer to the device node struct containing all the
+ arena information
+
+ @Input ui32OSidMin : Single dimensional array containing the minimum values
+ for each OSid area
+
+ @Input ui32OSidMax : Single dimensional array containing the maximum values
+ for each OSid area
+***********************************************************************************/
+
+void PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS], IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS]);
+
+#if defined(EMULATOR)
+ void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
+ void SetTrustedDeviceAceEnabled(void);
+#endif
+
+#endif
+
+#endif /* PVRSRV_H */
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title PowerVR AppHint generic interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__PVRSRV_APPHINT_H__)
+#define __PVRSRV_APPHINT_H__
+
+#if defined(LINUX)
+
+#include "km_apphint.h"
+#define PVRSRVAppHintDumpState() pvr_apphint_dump_state()
+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) pvr_apphint_register_handlers_uint64(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) pvr_apphint_register_handlers_uint32(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) pvr_apphint_register_handlers_bool(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) pvr_apphint_register_handlers_string(i,q,s,d,p)
+
+#else
+
+#define PVRSRVAppHintDumpState()
+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p)
+
+#endif
+
+#endif /* !defined(__PVRSRV_APPHINT_H__) */
+
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title PowerVR SrvKM cleanup thread deferred work interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_CLEANUP_H
+#define PVRSRV_CLEANUP_H
+
+typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam);
+
+/* typical number of times a caller should want the work to be retried in case
+ * of the callback function (pfnFree) returning an error.
+ * Callers to PVRSRVCleanupThreadAddWork should provide this value as the retry
+ * count (ui32RetryCount) unless there are special requirements.
+ * A value of 6000 corresponds to around 10 minutes.
+ */
+#define CLEANUP_THREAD_RETRY_COUNT_DEFAULT 6000
+
+typedef struct _PVRSRV_CLEANUP_THREAD_WORK_
+{
+ DLLIST_NODE sNode; /*!< list node to attach to the cleanup thread work list */
+ CLEANUP_THREAD_FN pfnFree; /*!< function to be called */
+ void *pvData; /*!< private data for pfnFree */
+ IMG_UINT32 ui32RetryCount; /*!< number of times the callback should be re-tried when it returns error */
+ IMG_BOOL bDependsOnHW;
+} PVRSRV_CLEANUP_THREAD_WORK;
+
+/*!
+******************************************************************************
+ @Function PVRSRVCleanupThreadAddWork
+
+ @Description Add a work item to be called from the cleanup thread
+
+ @Input psData : The function pointer and private data for the callback
+
+ @Return None
+******************************************************************************/
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData);
+
+#endif /* PVRSRV_CLEANUP_H */
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __PVRSRV_DEVICE_H__
+#define __PVRSRV_DEVICE_H__
+
+#include "img_types.h"
+#include "physheap.h"
+#include "pvrsrv_error.h"
+#include "rgx_fwif_km.h"
+#include "servicesext.h"
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+#include "pvr_dvfs.h"
+#endif
+
+typedef struct _PVRSRV_DEVICE_CONFIG_ PVRSRV_DEVICE_CONFIG;
+
+/*
+ * All the heaps from which regular device memory allocations can be made in
+ * terms of their locality to the respective device.
+ */
+typedef enum
+{
+ PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL = 0,
+ PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL = 1,
+ PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL = 2,
+ PVRSRV_DEVICE_PHYS_HEAP_LAST
+} PVRSRV_DEVICE_PHYS_HEAP;
+
+typedef enum
+{
+ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_MAPPABLE = 0,
+ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_NON_MAPPABLE = 1,
+ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_LAST
+} PVRSRV_DEVICE_LOCAL_MEMORY_ARENA;
+
+typedef enum _PVRSRV_DEVICE_SNOOP_MODE_
+{
+ PVRSRV_DEVICE_SNOOP_NONE = 0,
+ PVRSRV_DEVICE_SNOOP_CPU_ONLY,
+ PVRSRV_DEVICE_SNOOP_DEVICE_ONLY,
+ PVRSRV_DEVICE_SNOOP_CROSS,
+} PVRSRV_DEVICE_SNOOP_MODE;
+
+typedef IMG_UINT32
+(*PFN_SYS_DEV_CLK_FREQ_GET)(IMG_HANDLE hSysData);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_PRE_POWER)(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_POST_POWER)(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+
+typedef void
+(*PFN_SYS_DEV_INTERRUPT_HANDLED)(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE)(IMG_HANDLE hSysData,
+ IMG_UINT64 ui64MemSize);
+
+typedef void (*PFN_SYS_DEV_FEAT_DEP_INIT)(PVRSRV_DEVICE_CONFIG *, IMG_UINT64);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+
+#define PVRSRV_DEVICE_FW_CODE_REGION (0)
+#define PVRSRV_DEVICE_FW_COREMEM_CODE_REGION (1)
+
+typedef struct _PVRSRV_TD_FW_PARAMS_
+{
+ const void *pvFirmware;
+ IMG_UINT32 ui32FirmwareSize;
+ IMG_DEV_VIRTADDR sFWCodeDevVAddrBase;
+ IMG_DEV_VIRTADDR sFWDataDevVAddrBase;
+ RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr;
+ RGXFWIF_DEV_VIRTADDR sFWInitFWAddr;
+} PVRSRV_TD_FW_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SEND_FW_IMAGE)(IMG_HANDLE hSysData,
+ PVRSRV_TD_FW_PARAMS *psTDFWParams);
+
+typedef struct _PVRSRV_TD_POWER_PARAMS_
+{
+ IMG_DEV_PHYADDR sPCAddr; /* META only used param */
+
+ /* MIPS only used fields */
+ IMG_DEV_PHYADDR sGPURegAddr;
+ IMG_DEV_PHYADDR sBootRemapAddr;
+ IMG_DEV_PHYADDR sCodeRemapAddr;
+ IMG_DEV_PHYADDR sDataRemapAddr;
+} PVRSRV_TD_POWER_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SET_POWER_PARAMS)(IMG_HANDLE hSysData,
+ PVRSRV_TD_POWER_PARAMS *psTDPowerParams);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_RGXSTART)(IMG_HANDLE hSysData);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_RGXSTOP)(IMG_HANDLE hSysData);
+
+typedef struct _PVRSRV_TD_SECBUF_PARAMS_
+{
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_ALIGN_T uiAlign;
+ IMG_CPU_PHYADDR *psSecBufAddr;
+ IMG_UINT64 *pui64SecBufHandle;
+} PVRSRV_TD_SECBUF_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SECUREBUF_ALLOC)(IMG_HANDLE hSysData,
+ PVRSRV_TD_SECBUF_PARAMS *psTDSecBufParams);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SECUREBUF_FREE)(IMG_HANDLE hSysData,
+ IMG_UINT64 ui64SecBufHandle);
+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
+
+struct _PVRSRV_DEVICE_CONFIG_
+{
+ /*! OS device passed to SysDevInit (linux: 'struct device') */
+ void *pvOSDevice;
+
+ /*!
+ *! Service representation of pvOSDevice. Should be set to NULL when the
+ *! config is created in SysDevInit. Set by Services once a device node has
+ *! been created for this config and unset before SysDevDeInit is called.
+ */
+ struct _PVRSRV_DEVICE_NODE_ *psDevNode;
+
+ /*! Name of the device */
+ IMG_CHAR *pszName;
+
+ /*! Version of the device (optional) */
+ IMG_CHAR *pszVersion;
+
+ /*! Register bank address */
+ IMG_CPU_PHYADDR sRegsCpuPBase;
+ /*! Register bank size */
+ IMG_UINT32 ui32RegsSize;
+ /*! Device interrupt number */
+ IMG_UINT32 ui32IRQ;
+
+ PVRSRV_DEVICE_SNOOP_MODE eCacheSnoopingMode;
+
+ /*! Device specific data handle */
+ IMG_HANDLE hDevData;
+
+ /*! System specific data that gets passed into system callback functions. */
+ IMG_HANDLE hSysData;
+
+ IMG_BOOL bHasNonMappableLocalMemory;
+
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ IMG_UINT32 ui32PhysHeapCount;
+
+ /*!
+ *! ID of the Physical memory heap to use.
+ *!
+ *! The first entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL])
+ *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+ *! flag is not set. Normally this will be the PhysHeapID of an LMA heap
+ *! but the configuration could specify a UMA heap here (if desired).
+ *!
+ *! The second entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL])
+ *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+ *! flag is set. Normally this will be the PhysHeapID of a UMA heap but
+ *! the configuration could specify an LMA heap here (if desired).
+ *!
+ *! The third entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL])
+ *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_FW_LOCAL
+ *! flag is set.
+ *!
+ *! In the event of there being only one Physical Heap, the configuration
+ *! should specify the same heap details in all entries.
+ */
+ IMG_UINT32 aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+ RGXFWIF_BIFTILINGMODE eBIFTilingMode;
+ IMG_UINT32 *pui32BIFTilingHeapConfigs;
+ IMG_UINT32 ui32BIFTilingHeapCount;
+
+ /*!
+ *! Callbacks to change system device power state at the beginning and end
+ *! of a power state change (optional).
+ */
+ PFN_SYS_DEV_PRE_POWER pfnPrePowerState;
+ PFN_SYS_DEV_POST_POWER pfnPostPowerState;
+
+ /*! Callback to obtain the clock frequency from the device (optional). */
+ PFN_SYS_DEV_CLK_FREQ_GET pfnClockFreqGet;
+
+ /*!
+ *! Callback to handle memory budgeting. Can be used to reject allocations
+ *! over a certain size (optional).
+ */
+ PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /*!
+ *! Callback to send FW image and FW boot time parameters to the trusted
+ *! device.
+ */
+ PFN_TD_SEND_FW_IMAGE pfnTDSendFWImage;
+
+ /*!
+ *! Callback to send parameters needed in a power transition to the trusted
+ *! device.
+ */
+ PFN_TD_SET_POWER_PARAMS pfnTDSetPowerParams;
+
+ /*! Callbacks to ping the trusted device to securely run RGXStart/Stop() */
+ PFN_TD_RGXSTART pfnTDRGXStart;
+ PFN_TD_RGXSTOP pfnTDRGXStop;
+
+ /*! Callback to request allocation/freeing of secure buffers */
+ PFN_TD_SECUREBUF_ALLOC pfnTDSecureBufAlloc;
+ PFN_TD_SECUREBUF_FREE pfnTDSecureBufFree;
+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
+
+ /*! Function that does device feature specific system layer initialisation */
+ PFN_SYS_DEV_FEAT_DEP_INIT pfnSysDevFeatureDepInit;
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+ PVRSRV_DVFS sDVFS;
+#endif
+};
+
+#endif /* __PVRSRV_DEVICE_H__*/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title PowerVR device type definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVRSRV_DEVICE_TYPES_H__)
+#define __PVRSRV_DEVICE_TYPES_H__
+
+#include "img_types.h"
+
+#define PVRSRV_MAX_DEVICES 16 /*!< Largest supported number of devices on the system */
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#endif /* __PVRSRV_DEVICE_TYPES_H__ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management core
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Client side part of device memory management -- This
+ file defines the exposed Services API to core memory management
+ functions.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_DEVMEM_H
+#define PVRSRV_DEVMEM_H
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include <powervr/sync_external.h>
+#include "services_km.h" /* for PVRSRV_DEV_CONNECTION */
+
+
+/*
+ Device memory contexts, heaps and memory descriptors are passed
+ through to underlying memory APIs directly, but are to be regarded
+ as an opaque handle externally.
+*/
+typedef struct _PVRSRV_DEVMEMCTX_ *PVRSRV_DEVMEMCTX; /*!< Device-Mem Client-Side Interface: Typedef for Context Ptr */
+typedef DEVMEM_HEAP *PVRSRV_HEAP; /*!< Device-Mem Client-Side Interface: Typedef for Heap Ptr */
+typedef DEVMEM_MEMDESC *PVRSRV_MEMDESC; /*!< Device-Mem Client-Side Interface: Typedef for Memory Descriptor Ptr */
+typedef DEVMEM_EXPORTCOOKIE PVRSRV_DEVMEM_EXPORTCOOKIE; /*!< Device-Mem Client-Side Interface: Typedef for Export Cookie */
+typedef DEVMEM_FLAGS_T PVRSRV_MEMMAP_FLAGS_T; /*!< Device-Mem Client-Side Interface: Typedef for Memory-Mapping Flags Enum */
+typedef IMG_HANDLE PVRSRV_REMOTE_DEVMEMCTX; /*!< Type to use with context export import */
+typedef struct _PVRSRV_EXPORT_DEVMEMCTX_ *PVRSRV_EXPORT_DEVMEMCTX;
+
+/* To use with PVRSRVSubAllocDeviceMem() as the default factor if no
+ * over-allocation is desired. */
+#define PVRSRV_DEVMEM_PRE_ALLOC_MULTIPLIER_NONE DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER
+
+/* N.B. Flags are now defined in pvrsrv_memallocflags.h as they need
+ to be omnipresent. */
+
+/*
+ *
+ * API functions
+ *
+ */
+
+/**************************************************************************/ /*!
+@Function PVRSRVCreateDeviceMemContext
+@Description Creates a device memory context. There is a one-to-one
+ correspondence between this context data structure and the top
+ level MMU page table (known as the Page Catalogue, in the case of a
+ 3-tier MMU). It is intended that a process with its own virtual
+ space on the CPU will also have its own virtual space on the GPU.
+ Thus there is loosely a one-to-one correspondence between process
+ and device memory context, but this is not enforced at this API.
+
+ Every process must create the device memory context before any
+ memory allocations are made, and is responsible for freeing all
+ such allocations before destroying the context
+
+ This is a wrapper function above the "bare-metal" device memory
+ context creation function which would create just a context and no
+ heaps. This function will also create the heaps, according to the
+ heap config that the device specific initialization code has
+ nominated for use by this API.
+
+ The number of heaps thus created is returned to the caller, such
+ that the caller can allocate an array and the call in to fetch
+ details of each heap, or look up the heap with the "Find Heap" API
+ described below.
+
+ In order to derive the details of the MMU configuration for the
+ device, and for retrieving the "bridge handle" for communication
+ internally in services, it is necessary to pass in a
+ PVRSRV_DEV_CONNECTION.
+@Input psDev dev data
+@Output phCtxOut On success, the returned DevMem Context. The
+ caller is responsible for providing storage
+ for this.
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVCreateDeviceMemContext(PVRSRV_DEV_CONNECTION *psDevConnection,
+ PVRSRV_DEVMEMCTX *phCtxOut);
+
+/**************************************************************************/ /*!
+@Function PVRSRVDestroyDeviceMemContext
+@Description Destroy cannot fail. Well. It shouldn't, assuming the caller
+ has obeyed the protocol, i.e. has freed all his allocations
+ beforehand.
+@Input hCtx Handle to a DevMem Context
+@Return None
+*/ /***************************************************************************/
+extern IMG_IMPORT void
+PVRSRVDestroyDeviceMemContext(PVRSRV_DEVMEMCTX hCtx);
+
+/**************************************************************************/ /*!
+@Function PVRSRVFindHeapByName
+@Description Returns the heap handle for the named heap which is assumed to
+ exist in this context. PVRSRV_HEAP *phHeapOut,
+
+ N.B. No need for acquire/release semantics here, as when using
+ this wrapper layer, the heaps are automatically instantiated at
+ context creation time and destroyed when the context is
+ destroyed.
+
+ The caller is required to know the heap names already as these
+ will vary from device to device and from purpose to purpose.
+@Input hCtx Handle to a DevMem Context
+@Input pszHeapName Name of the heap to look for
+@Output phHeapOut a handle to the heap, for use in future calls
+ to OpenAllocation / AllocDeviceMemory / Map
+ DeviceClassMemory, etc. (The PVRSRV_HEAP type
+ to be regarded by caller as an opaque, but
+ strongly typed, handle)
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVFindHeapByName(PVRSRV_DEVMEMCTX hCtx,
+ const IMG_CHAR *pszHeapName,
+ PVRSRV_HEAP *phHeapOut);
+
+/**************************************************************************/ /*!
+@Function PVRSRVDevmemGetHeapBaseDevVAddr
+@Description returns the device virtual address of the base of the heap.
+@Input hHeap Handle to a Heap
+@Output pDevVAddr On success, the device virtual address of the
+ base of the heap.
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVDevmemGetHeapBaseDevVAddr(PVRSRV_HEAP hHeap,
+ IMG_DEV_VIRTADDR *pDevVAddr);
+
+/**************************************************************************/ /*!
+@Function PVRSRVSubAllocDeviceMem
+@Description Allocate memory from the specified heap, acquiring physical
+ memory from OS as we go and mapping this into
+ the GPU (mandatorily) and CPU (optionally)
+
+ Size must be a positive integer multiple of alignment, or, to
+ put it another way, the uiLog2Align LSBs should all be zero, but
+ at least one other bit should not be.
+
+ Caller to take charge of the PVRSRV_MEMDESC (the memory
+ descriptor) which is to be regarded as an opaque handle.
+
+ If the allocation is supposed to be used with PVRSRVDevmemUnpin()
+ the size must be a page multiple.
+ This is a general rule when suballocations are to
+ be avoided.
+
+@Input uiPreAllocMultiplier Size factor for internal pre-allocation of
+ memory to make subsequent calls with the
+ same flags faster. Independently if a value
+ is set, the function will try to allocate
+ from any pre-allocated memory first and -if
+ successful- not pre-allocate anything more.
+ That means the factor can always be set and
+ the correct thing will be done internally.
+@Input hHeap Handle to the heap from which memory will be
+ allocated
+@Input uiSize Amount of memory to be allocated.
+@Input uiLog2Align LOG2 of the required alignment
+@Input uiMemAllocFlags Allocation Flags
+@Input pszText Text to describe the allocation
+@Output phMemDescOut On success, the resulting memory descriptor
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVSubAllocDeviceMem(IMG_UINT8 uiPreAllocMultiplier,
+ PVRSRV_HEAP hHeap,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags,
+ const IMG_CHAR *pszText,
+ PVRSRV_MEMDESC *phMemDescOut);
+
+#define PVRSRVAllocDeviceMem(...) \
+ PVRSRVSubAllocDeviceMem(PVRSRV_DEVMEM_PRE_ALLOC_MULTIPLIER_NONE, __VA_ARGS__)
+
+/**************************************************************************/ /*!
+@Function PVRSRVFreeDeviceMem
+@Description Free that allocated by PVRSRVSubAllocDeviceMem (Memory descriptor
+ will be destroyed)
+@Input hMemDesc Handle to the descriptor of the memory to be
+ freed
+@Return None
+*/ /***************************************************************************/
+extern IMG_IMPORT void
+PVRSRVFreeDeviceMem(PVRSRV_MEMDESC hMemDesc);
+
+/**************************************************************************/ /*!
+@Function PVRSRVAcquireCPUMapping
+@Description Causes the allocation referenced by this memory descriptor to be
+ mapped into cpu virtual memory, if it wasn't already, and the
+ CPU virtual address returned in the caller-provided location.
+
+ The caller must call PVRSRVReleaseCPUMapping to advise when he
+ has finished with the mapping.
+
+ Does not accept unpinned allocations.
+ Returns PVRSRV_ERROR_INVALID_MAP_REQUEST if an unpinned
+ MemDesc is passed in.
+
+@Input hMemDesc Handle to the memory descriptor for which a
+ CPU mapping is required
+@Output ppvCpuVirtAddrOut On success, the caller's ptr is set to the
+ new CPU mapping
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVAcquireCPUMapping(PVRSRV_MEMDESC hMemDesc,
+ void **ppvCpuVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function PVRSRVReleaseCPUMapping
+@Description Relinquishes the cpu mapping acquired with
+ PVRSRVAcquireCPUMapping()
+@Input hMemDesc Handle of the memory descriptor
+@Return None
+*/ /***************************************************************************/
+extern IMG_IMPORT void
+PVRSRVReleaseCPUMapping(PVRSRV_MEMDESC hMemDesc);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVMapToDevice
+@Description Map allocation into the device MMU. This function must only be
+ called once, any further calls will return
+ PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED
+
+ The caller must call PVRSRVReleaseDeviceMapping when they
+ are finished with the mapping.
+
+ Does not accept unpinned allocations.
+ Returns PVRSRV_ERROR_INVALID_MAP_REQUEST if an unpinned
+ MemDesc is passed in.
+
+@Input hMemDesc Handle of the memory descriptor
+@Input hHeap Device heap to map the allocation into
+@Output psDevVirtAddrOut Device virtual address
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVMapToDevice(PVRSRV_MEMDESC hMemDesc,
+ PVRSRV_HEAP hHeap,
+ IMG_DEV_VIRTADDR *psDevVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function PVRSRVMapToDeviceAddress
+@Description Same as PVRSRVMapToDevice but caller chooses the address to
+ map into.
+
+ The caller is able to overwrite existing mappings so never use
+ this function on a heap where PVRSRVMapToDevice() has been
+ used before or will be used in the future.
+
+ In general the caller has to know which regions of the heap have
+ been mapped already and should avoid overlapping mappings.
+
+@Input hMemDesc Handle of the memory descriptor
+@Input hHeap Device heap to map the allocation into
+@Output sDevVirtAddr Device virtual address to map to
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR sDevVirtAddr);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVAcquireDeviceMapping
+@Description Acquire a reference on the device mapping the allocation.
+ If the allocation wasn't mapped into the device then
+ and the device virtual address returned in the
+ PVRSRV_ERROR_DEVICEMEM_NO_MAPPING will be returned as
+ PVRSRVMapToDevice must be called first.
+
+ The caller must call PVRSRVReleaseDeviceMapping when they
+ are finished with the mapping.
+
+ Does not accept unpinned allocations.
+ Returns PVRSRV_ERROR_INVALID_MAP_REQUEST if an unpinned
+ MemDesc is passed in.
+
+@Input hMemDesc Handle to the memory descriptor for which a
+ device mapping is required
+@Output psDevVirtAddrOut On success, the caller's ptr is set to the
+ new device mapping
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVAcquireDeviceMapping(PVRSRV_MEMDESC hMemDesc,
+ IMG_DEV_VIRTADDR *psDevVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function PVRSRVReleaseDeviceMapping
+@Description Relinquishes the device mapping acquired with
+ PVRSRVAcquireDeviceMapping or PVRSRVMapToDevice
+@Input hMemDesc Handle of the memory descriptor
+@Return None
+*/ /***************************************************************************/
+extern IMG_IMPORT void
+PVRSRVReleaseDeviceMapping(PVRSRV_MEMDESC hMemDesc);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDevmemLocalImport
+
+@Description Import a PMR that was created with this connection.
+ The general usage of this function is as follows:
+ 1) Create a devmem allocation on server side.
+ 2) Pass back the PMR of that allocation to client side by
+ creating a handle of type PMR_LOCAL_EXPORT_HANDLE.
+ 3) Pass the PMR_LOCAL_EXPORT_HANDLE to
+ PVRSRVMakeLocalImportHandle()to create a new handle type
+ (DEVMEM_MEM_IMPORT) that can be used with this function.
+
+@Input hExtHandle External memory handle
+
+@Input uiFlags Import flags
+
+@Output phMemDescPtr Created MemDesc
+
+@Output puiSizePtr Size of the created MemDesc
+
+@Input pszAnnotation Annotation string for this allocation/import
+
+@Return PVRSRV_OK is successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR PVRSRVDevmemLocalImport(const PVRSRV_DEV_CONNECTION *psDevConnection,
+ IMG_HANDLE hExtHandle,
+ PVRSRV_MEMMAP_FLAGS_T uiFlags,
+ PVRSRV_MEMDESC *phMemDescPtr,
+ IMG_DEVMEM_SIZE_T *puiSizePtr,
+ const IMG_CHAR *pszAnnotation);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDevmemGetImportUID
+
+@Description Get the UID of the import that backs this MemDesc
+
+@Input hMemDesc MemDesc
+
+@Return UID of import
+*/
+/*****************************************************************************/
+PVRSRV_ERROR PVRSRVDevmemGetImportUID(PVRSRV_MEMDESC hMemDesc,
+ IMG_UINT64 *pui64UID);
+
+/**************************************************************************/ /*!
+@Function PVRSRVAllocExportableDevMem
+@Description Allocate memory without mapping into device memory context. This
+ memory is exported and ready to be mapped into the device memory
+ context of other processes, or to CPU only with
+ PVRSRVMapMemoryToCPUOnly(). The caller agrees to later call
+ PVRSRVFreeUnmappedExportedMemory(). The caller must give the page
+ size of the heap into which this memory may be subsequently
+ mapped, or the largest of such page sizes if it may be mapped
+ into multiple places. This information is to be communicated in
+ the Log2Align field.
+
+ Size must be a positive integer multiple of the page size
+@Input uiLog2Align Log2 of the alignment required
+@Input uiLog2HeapPageSize The page size to allocate. Must be a
+ multiple of the heap that this is going
+ to be mapped into.
+@Input uiSize the amount of memory to be allocated
+@Input uiFlags Allocation flags
+@Input pszText Text to describe the allocation
+@Output hMemDesc
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAllocExportableDevMem(const PVRSRV_DEV_CONNECTION *psDevConnection,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+ IMG_UINT32 uiLog2HeapPageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ PVRSRV_MEMDESC *hMemDesc);
+
+/**************************************************************************/ /*!
+@Function PVRSRVChangeSparseDevMem
+@Description This function alters the underlying memory layout of the given
+ allocation by allocating/removing pages as requested
+ This function also re-writes the GPU & CPU Maps accordingly
+ The specific actions can be controlled by corresponding flags
+
+@Input psMemDesc The memory layout that needs to be modified
+@Input ui32AllocPageCount New page allocation count
+@Input pai32AllocIndices New page allocation indices (page granularity)
+@Input ui32FreePageCount Number of pages that need to be freed
+@Input pai32FreeIndices Indices of the pages that need to be freed
+@Input uiFlags Flags that control the behaviour of the call
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVChangeSparseDevMem(PVRSRV_MEMDESC psMemDesc,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ SPARSE_MEM_RESIZE_FLAGS uiFlags);
+
+/**************************************************************************/ /*!
+@Function PVRSRVAllocSparseDevMem2
+@Description Allocate sparse memory without mapping into device memory context.
+ Sparse memory is used where you have an allocation that has a
+ logical size (i.e. the amount of VM space it will need when
+ mapping it into a device) that is larger than the amount of
+ physical memory that allocation will use. An example of this
+ is a NPOT texture where the twiddling algorithm requires you
+ to round the width and height to next POT and so you know there
+ will be pages that are never accessed.
+
+ This memory is can to be exported and mapped into the device
+ memory context of other processes, or to CPU.
+
+ Size must be a positive integer multiple of the page size
+@Input psDevConnection Device to allocation the memory for
+@Input uiSize The logical size of allocation
+@Input uiChunkSize The size of the chunk
+@Input ui32NumPhysChunks The number of physical chunks required
+@Input ui32NumVirtChunks The number of virtual chunks required
+@Input pui32MappingTable index based Mapping table
+@Input uiLog2Align Log2 of the required alignment
+@Input uiLog2HeapPageSize Log2 of the heap we map this into
+@Input uiFlags Allocation flags
+@Input pszText Text to describe the allocation
+@Output hMemDesc
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAllocSparseDevMem2(const PVRSRV_DEVMEMCTX psDevMemCtx,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+ IMG_UINT32 uiLog2HeapPageSize,
+ PVRSRV_MEMMAP_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ PVRSRV_MEMDESC *hMemDesc);
+
+/**************************************************************************/ /*!
+@Function PVRSRVAllocSparseDevMem (DEPRECATED and will be removed in future)
+@Description Allocate sparse memory without mapping into device memory context.
+ Sparse memory is used where you have an allocation that has a
+ logical size (i.e. the amount of VM space it will need when
+ mapping it into a device) that is larger than the amount of
+ physical memory that allocation will use. An example of this
+ is a NPOT texture where the twiddling algorithm requires you
+ to round the width and height to next POT and so you know there
+ will be pages that are never accessed.
+
+ This memory is can to be exported and mapped into the device
+ memory context of other processes, or to CPU.
+
+ Size must be a positive integer multiple of the page size
+ This function is deprecated and should not be used in any new code
+ It will be removed in the subsequent changes.
+@Input psDevConnection Device to allocation the memory for
+@Input uiSize The logical size of allocation
+@Input uiChunkSize The size of the chunk
+@Input ui32NumPhysChunks The number of physical chunks required
+@Input ui32NumVirtChunks The number of virtual chunks required
+@Input pabMappingTable boolean based Mapping table
+@Input uiLog2Align Log2 of the required alignment
+@Input uiLog2HeapPageSize Log2 of the heap we map this into
+@Input uiFlags Allocation flags
+@Input pszText Text to describe the allocation
+@Output hMemDesc
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAllocSparseDevMem(const PVRSRV_DEVMEMCTX psDevMemCtx,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_BOOL *pabMappingTable,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+ IMG_UINT32 uiLog2HeapPageSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ PVRSRV_MEMDESC *hMemDesc);
+
+/**************************************************************************/ /*!
+@Function PVRSRVGetOSLog2PageSize
+@Description Just call AFTER setting up the connection to the kernel module
+ otherwise it will run into an assert.
+ Gives the log2 of the page size that is utilised by the OS.
+
+@Return The page size
+*/ /***************************************************************************/
+
+IMG_UINT32 PVRSRVGetOSLog2PageSize(void);
+
+/**************************************************************************/ /*!
+@Function PVRSRVGetHeapLog2PageSize
+@Description Queries the page size of a passed heap.
+
+@Input hHeap Heap that is queried
+@Output puiLog2PageSize Log2 page size will be returned in this
+
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVGetHeapLog2PageSize(PVRSRV_HEAP hHeap, IMG_UINT32* puiLog2PageSize);
+
+/**************************************************************************/ /*!
+@Function PVRSRVGetHeapTilingProperties
+@Description Queries the import alignment and tiling stride conversion
+ factor of a passed heap.
+
+@Input hHeap Heap that is queried
+@Output puiLog2ImportAlignment Log2 import alignment will be
+ returned in this
+@Output puiLog2TilingStrideFactor Log2 alignment to tiling stride
+ conversion factor will be returned
+ in this
+
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVGetHeapTilingProperties(PVRSRV_HEAP hHeap,
+ IMG_UINT32* puiLog2ImportAlignment,
+ IMG_UINT32* puiLog2TilingStrideFactor);
+
+/**************************************************************************/ /*!
+@Function PVRSRVMakeLocalImportHandle
+@Description This is a "special case" function for making a local import
+ handle. The server handle is a handle to a PMR of bridge type
+ PMR_LOCAL_EXPORT_HANDLE. The returned local import handle will
+ be of the bridge type DEVMEM_MEM_IMPORT that can be used with
+ PVRSRVDevmemLocalImport().
+@Input psConnection Services connection
+@Input hServerHandle Server export handle
+@Output hLocalImportHandle Returned client import handle
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVMakeLocalImportHandle(const PVRSRV_DEV_CONNECTION *psConnection,
+ IMG_HANDLE hServerHandle,
+ IMG_HANDLE *hLocalImportHandle);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnmakeLocalImportHandle
+@Description Destroy the hLocalImportHandle created with
+ PVRSRVMakeLocalImportHandle().
+@Input psConnection Services connection
+@Output hLocalImportHandle Local import handle
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnmakeLocalImportHandle(const PVRSRV_DEV_CONNECTION *psConnection,
+ IMG_HANDLE hLocalImportHandle);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+/**************************************************************************/ /*!
+@Function PVRSRVExport
+@Description Given a memory allocation allocated with Devmem_Allocate(),
+ create a "cookie" that can be passed intact by the caller's own
+ choice of secure IPC to another process and used as the argument
+ to "map" to map this memory into a heap in the target processes.
+ N.B. This can also be used to map into multiple heaps in one
+ process, though that's not the intention.
+
+ Note, the caller must later call Unexport before freeing the
+ memory.
+@Input hMemDesc handle to the descriptor of the memory to be
+ exported
+@Output phExportCookie On success, a handle to the exported cookie
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVExportDevMem(PVRSRV_MEMDESC hMemDesc,
+ PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnexport
+@Description Undo the export caused by "PVRSRVExport" - note - it doesn't
+ actually tear down any mapping made by processes that received
+ the export cookie. It will simply make the cookie null and void
+ and prevent further mappings.
+@Input hMemDesc handle to the descriptor of the memory which
+ will no longer be exported
+@Output phExportCookie On success, the export cookie provided will be
+ set to null
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVUnexportDevMem(PVRSRV_MEMDESC hMemDesc,
+ PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie);
+
+/**************************************************************************/ /*!
+@Function PVRSRVImportDevMem
+@Description Import memory that was previously exported with PVRSRVExport()
+ into the current process.
+
+ Note: This call only makes the memory accessible to this
+ process, it doesn't map it into the device or CPU.
+
+@Input psConnection Connection to services
+@Input phExportCookie Ptr to the handle of the export-cookie
+ identifying
+@Output phMemDescOut On Success, a handle to a new memory descriptor
+ representing the memory as mapped into the
+ local process address space.
+@Input uiFlags Device memory mapping flags
+@Input pszText Text to describe the import
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVImportDevMem(const PVRSRV_DEV_CONNECTION *psConnection,
+ PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie,
+ PVRSRV_MEMMAP_FLAGS_T uiFlags,
+ PVRSRV_MEMDESC *phMemDescOut);
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/**************************************************************************/ /*!
+@Function PVRSRVIsDeviceMemAddrValid
+@Description Checks if given device virtual memory address is valid
+ from the GPU's point of view.
+
+ This method is intended to be called by a process that imported
+ another process' memory context, hence the expected
+ PVRSRV_REMOTE_DEVMEMCTX parameter.
+
+ See PVRSRVAcquireRemoteDevMemContext for details about
+ importing memory contexts.
+
+@Input hContext handle to memory context
+@Input sDevVAddr device 40bit virtual memory address
+@Return PVRSRV_OK if address is valid or
+ PVRSRV_ERROR_INVALID_GPU_ADDR when address is invalid
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVIsDeviceMemAddrValid(PVRSRV_REMOTE_DEVMEMCTX hContext,
+ IMG_DEV_VIRTADDR sDevVAddr);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVDevmemPin
+@Description This is the counterpart to PVRSRVDevmemUnpin. It is meant to be
+ called after unpinning an allocation.
+
+ It will make an unpinned allocation available again and
+ unregister it from the OS shrinker. In the case the shrinker
+ was invoked by the OS while the allocation was unpinned it will
+ allocate new physical pages.
+
+ If any GPU mapping existed before, the same virtual address
+ range will be valid again.
+
+@Input hMemDesc The MemDesc that is going to be pinned.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the pre-unpin content
+ is still present and can be reused.
+
+ PVRSRV_ERROR_PMR_NEW_MEMORY if the memory has
+ been pinned successfully but the pre-unpin
+ content was lost.
+
+ PVRSRV_ERROR_INVALID_PARAMS if the MemDesc is
+ invalid e.g. NULL.
+
+ PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES if the
+ memory of the allocation is lost and we failed
+ to allocate new one.
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVDevmemPin(PVRSRV_MEMDESC hMemDesc);
+
+/**************************************************************************/ /*!
+@Function PVRSRVDevmemUnpin
+@Description Unpins an allocation. Unpinning means that the
+ memory must not be accessed anymore by neither CPU nor GPU.
+ The physical memory pages will be registered with a shrinker
+ and the OS is able to reclaim them in OOM situations when the
+ shrinker is invoked.
+
+ The counterpart to this is PVRSRVDevmemPin() which
+ checks if the physical pages were reclaimed by the OS and then
+ either allocates new physical pages or just unregisters the
+ allocation from the shrinker. The device virtual address range
+ (if any existed) will be kept.
+
+ The GPU mapping will be kept but is going be invalidated.
+ It is allowed to free an unpinned allocation or remove the GPU
+ mapping.
+
+ RESTRICTIONS:
+ - Unpinning should only be done if the caller is sure that
+ the GPU finished all pending/running operations on the allocation.
+
+ - The caller must ensure that no other process than the calling
+ one itself has imported or mapped the allocation, otherwise the
+ unpinning will fail.
+
+ - All CPU mappings have to be removed beforehand by the caller.
+
+ - Any attempts to map the allocation while it is unpinned are
+ forbidden.
+
+ - When using PVRSRVAllocDeviceMem() the caller must allocate
+ whole pages from the chosen heap to avoid suballocations.
+
+@Input hMemDesc The MemDesc that is going to be unpinned.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success.
+
+ PVRSRV_ERROR_INVALID_PARAMS if the passed
+ allocation is not a multiple of the heap page
+ size but was allocated with
+ PVRSRVAllocDeviceMem(), or if its NULL.
+
+ PVRSRV_ERROR_PMR_STILL_REFERENCED if the passed
+ allocation is still referenced i.e. is still
+ exported or mapped somewhere else.
+
+ PVRSRV_ERROR_STILL_MAPPED will be thrown if the
+ calling process still has CPU mappings set up
+ or the GPU mapping was acquired more than once.
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVDevmemUnpin(PVRSRV_MEMDESC hMemDesc);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVDevmemGetSize
+@Description Returns the allocated size for this device-memory.
+
+@Input hMemDesc handle to memory allocation
+@Output puiSize return value for size
+@Return PVRSRV_OK on success or
+ PVRSRV_ERROR_INVALID_PARAMS
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVDevmemGetSize(PVRSRV_MEMDESC hMemDesc, IMG_DEVMEM_SIZE_T* puiSize);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVExportDevMemContext
+@Description Makes the given memory context available to other processes that
+ can get a handle to it via PVRSRVAcquireRemoteDevmemContext.
+ This handle can be used for e.g. the breakpoint functions.
+
+ The context will be only available to other processes that are able
+ to pass in a memory descriptor that is shared between this and the
+ importing process. We use the memory descriptor to identify the
+ correct context and verify that the caller is allowed to request
+ the context.
+
+ The whole mechanism is intended to be used with the debugger that
+ for example can load USC breakpoint handlers into the shared allocation
+ and then use the acquired remote context (that is exported here)
+ to set/clear breakpoints in USC code.
+
+@Input hLocalDevmemCtx Context to export
+@Input hSharedAllocation A memory descriptor that points to a shared allocation
+ between the two processes. Must be in the given context.
+@Output phExportCtx A handle to the exported context that is needed for
+ the destruction with PVRSRVUnexportDevMemContext().
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVExportDevMemContext(PVRSRV_DEVMEMCTX hLocalDevmemCtx,
+ PVRSRV_MEMDESC hSharedAllocation,
+ PVRSRV_EXPORT_DEVMEMCTX *phExportCtx);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnexportDevMemContext
+@Description Removes the context from the list of sharable contexts that
+ that can be imported via PVRSRVReleaseRemoteDevmemContext.
+
+@Input psExportCtx An export context retrieved from
+ PVRSRVExportDevmemContext.
+*/ /***************************************************************************/
+extern void
+PVRSRVUnexportDevMemContext(PVRSRV_EXPORT_DEVMEMCTX hExportCtx);
+
+/**************************************************************************/ /*!
+@Function PVRSRVAcquireRemoteDevMemContext
+@Description Retrieves an exported context that has been made available with
+ PVRSRVExportDevmemContext in the remote process.
+
+ hSharedMemDesc must be a memory descriptor pointing to the same
+ physical resource as the one passed to PVRSRVExportDevmemContext
+ in the remote process.
+ The memory descriptor has to be retrieved from the remote process
+ via a secure buffer export/import mechanism like DMABuf.
+
+@Input hDevmemCtx Memory context of the calling process.
+@Input hSharedAllocation The memory descriptor used to export the context
+@Output phRemoteCtx Handle to the remote context.
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVAcquireRemoteDevMemContext(PVRSRV_DEVMEMCTX hDevmemCtx,
+ PVRSRV_MEMDESC hSharedAllocation,
+ PVRSRV_REMOTE_DEVMEMCTX *phRemoteCtx);
+
+/**************************************************************************/ /*!
+@Function PVRSRVReleaseRemoteDevMemContext
+@Description Releases the remote context and destroys it if this is the last
+ reference.
+
+@Input hRemoteCtx Handle to the remote context that will be removed.
+*/ /***************************************************************************/
+extern void
+PVRSRVReleaseRemoteDevMemContext(PVRSRV_REMOTE_DEVMEMCTX hRemoteCtx);
+
+/*************************************************************************/ /*!
+@Function PVRSRVRegisterDevmemPageFaultNotify
+@Description Registers to be notified when a page fault occurs on a
+ specific device memory context.
+@Input psDevmemCtx The context to be notified about.
+@Return PVRSRV_ERROR.
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVRegisterDevmemPageFaultNotify(PVRSRV_DEVMEMCTX psDevmemCtx);
+
+/*************************************************************************/ /*!
+@Function PVRSRVUnregisterDevmemPageFaultNotify
+@Description Unegisters to be notified when a page fault occurs on a
+ specific device memory context.
+@Input psDevmemCtx The context to be unregistered from.
+@Return PVRSRV_ERROR.
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVUnregisterDevmemPageFaultNotify(PVRSRV_DEVMEMCTX psDevmemCtx);
+
+#if defined __cplusplus
+};
+#endif
+#endif /* PVRSRV_DEVMEM_H */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File pvrsrv_error.h
+@Title services error enumerant
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines error codes used by any/all services modules
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PVRSRV_ERROR_H__)
+#define __PVRSRV_ERROR_H__
+
+/*!
+ *****************************************************************************
+ * Error values
+ *****************************************************************************/
+typedef enum PVRSRV_ERROR
+{
+ PVRSRV_OK,
+#define PVRE(x) x,
+#include "pvrsrv_errors.h"
+#undef PVRE
+ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_ERROR;
+
+#endif /* !defined (__PVRSRV_ERROR_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File pvrsrv_errors.h
+@Title services error codes
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines error codes used by any/all services modules
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Don't add include guards to this file! */
+
+PVRE(PVRSRV_ERROR_OUT_OF_MEMORY)
+PVRE(PVRSRV_ERROR_TOO_FEW_BUFFERS)
+PVRE(PVRSRV_ERROR_INVALID_PARAMS)
+PVRE(PVRSRV_ERROR_INIT_FAILURE)
+PVRE(PVRSRV_ERROR_CANT_REGISTER_CALLBACK)
+PVRE(PVRSRV_ERROR_INVALID_DEVICE)
+PVRE(PVRSRV_ERROR_NOT_OWNER)
+PVRE(PVRSRV_ERROR_BAD_MAPPING)
+PVRE(PVRSRV_ERROR_TIMEOUT)
+PVRE(PVRSRV_ERROR_NOT_IMPLEMENTED)
+PVRE(PVRSRV_ERROR_FLIP_CHAIN_EXISTS)
+PVRE(PVRSRV_ERROR_INVALID_SWAPINTERVAL)
+PVRE(PVRSRV_ERROR_SCENE_INVALID)
+PVRE(PVRSRV_ERROR_STREAM_ERROR)
+PVRE(PVRSRV_ERROR_FAILED_DEPENDENCIES)
+PVRE(PVRSRV_ERROR_CMD_NOT_PROCESSED)
+PVRE(PVRSRV_ERROR_CMD_TOO_BIG)
+PVRE(PVRSRV_ERROR_DEVICE_REGISTER_FAILED)
+PVRE(PVRSRV_ERROR_TOOMANYBUFFERS)
+PVRE(PVRSRV_ERROR_NOT_SUPPORTED)
+PVRE(PVRSRV_ERROR_PROCESSING_BLOCKED)
+PVRE(PVRSRV_ERROR_CANNOT_FLUSH_QUEUE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS)
+PVRE(PVRSRV_ERROR_RETRY)
+PVRE(PVRSRV_ERROR_DDK_VERSION_MISMATCH)
+PVRE(PVRSRV_ERROR_DDK_BUILD_MISMATCH)
+PVRE(PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH)
+PVRE(PVRSRV_ERROR_BVNC_MISMATCH)
+PVRE(PVRSRV_ERROR_FWPROCESSOR_MISMATCH)
+PVRE(PVRSRV_ERROR_UPLOAD_TOO_BIG)
+PVRE(PVRSRV_ERROR_INVALID_FLAGS)
+PVRE(PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY)
+PVRE(PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR)
+PVRE(PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED)
+PVRE(PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+PVRE(PVRSRV_ERROR_IOCTL_CALL_FAILED)
+PVRE(PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR)
+PVRE(PVRSRV_ERROR_MMU_CONFIG_IS_WRONG)
+PVRE(PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_CREATE_HEAP)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE)
+PVRE(PVRSRV_ERROR_MMU_LIVE_ALLOCATIONS_IN_HEAP)
+PVRE(PVRSRV_ERROR_MMU_RESERVATION_NOT_INSIDE_HEAP)
+PVRE(PVRSRV_ERROR_PMR_NEW_MEMORY)
+PVRE(PVRSRV_ERROR_PMR_STILL_REFERENCED)
+PVRE(PVRSRV_ERROR_PMR_CLIENT_NOT_TRUSTED)
+PVRE(PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY)
+PVRE(PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES)
+PVRE(PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE)
+PVRE(PVRSRV_ERROR_PMR_NOT_PERMITTED)
+PVRE(PVRSRV_ERROR_PMR_ALREADY_OCCUPIED)
+PVRE(PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PMR_TYPE)
+PVRE(PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS)
+PVRE(PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE)
+PVRE(PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE)
+PVRE(PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH)
+PVRE(PVRSRV_ERROR_PMR_INVALID_CHUNK)
+PVRE(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING)
+PVRE(PVRSRV_ERROR_PMR_EMPTY)
+PVRE(PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND)
+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_UNMAP_FAILED)
+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED)
+PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED)
+PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP)
+PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_MAP_FAILED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NON_ZERO_USAGE_COUNT)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_MMU_TYPE)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT)
+PVRE(PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PCI_CALL_FAILED)
+PVRE(PVRSRV_ERROR_PCI_REGION_TOO_SMALL)
+PVRE(PVRSRV_ERROR_PCI_REGION_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH)
+PVRE(PVRSRV_ERROR_REGISTER_BASE_NOT_SET)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL)
+PVRE(PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_FREE_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES)
+PVRE(PVRSRV_ERROR_STILL_MAPPED)
+PVRE(PVRSRV_ERROR_MAPPING_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_INVALID_SEGMENT_BLOCK)
+PVRE(PVRSRV_ERROR_INVALID_GFXDEVDEVDATA)
+PVRE(PVRSRV_ERROR_INVALID_DEVINFO)
+PVRE(PVRSRV_ERROR_INVALID_MEMINFO)
+PVRE(PVRSRV_ERROR_INVALID_MISCINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL)
+PVRE(PVRSRV_ERROR_INVALID_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT)
+PVRE(PVRSRV_ERROR_INVALID_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_KERNELINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE)
+PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_WRAP_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_CPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_PERPROC)
+PVRE(PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_MAP_REQUEST)
+PVRE(PVRSRV_ERROR_INVALID_UNMAP_REQUEST)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP)
+PVRE(PVRSRV_ERROR_MAPPING_STILL_IN_USE)
+PVRE(PVRSRV_ERROR_EXCEEDED_HW_LIMITS)
+PVRE(PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD)
+PVRE(PVRSRV_ERROR_THREAD_READ_ERROR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR)
+PVRE(PVRSRV_ERROR_ISR_ALREADY_INSTALLED)
+PVRE(PVRSRV_ERROR_ISR_NOT_INSTALLED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE)
+PVRE(PVRSRV_ERROR_INVALID_CCB_COMMAND)
+PVRE(PVRSRV_ERROR_KERNEL_CCB_FULL)
+PVRE(PVRSRV_ERROR_FLIP_FAILED)
+PVRE(PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED)
+PVRE(PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE)
+PVRE(PVRSRV_ERROR_TIMEOUT_WAITING_FOR_CLIENT_CCB)
+PVRE(PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED)
+PVRE(PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID)
+PVRE(PVRSRV_ERROR_BLIT_SETUP_FAILED)
+PVRE(PVRSRV_ERROR_SUBMIT_NEEDED)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_AVAILABLE)
+PVRE(PVRSRV_ERROR_PDUMP_BUFFER_FULL)
+PVRE(PVRSRV_ERROR_PDUMP_BUF_OVERFLOW)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_ACTIVE)
+PVRE(PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES)
+PVRE(PVRSRV_ERROR_MUTEX_DESTROY_FAILED)
+PVRE(PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR)
+PVRE(PVRSRV_ERROR_INSUFFICIENT_SCRIPT_SPACE)
+PVRE(PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_FOUND)
+PVRE(PVRSRV_ERROR_SRV_CONNECT_FAILED)
+PVRE(PVRSRV_ERROR_SRV_DISCONNECT_FAILED)
+PVRE(PVRSRV_ERROR_DEINT_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_INIT2_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE)
+PVRE(PVRSRV_ERROR_NO_DC_DEVICES_FOUND)
+PVRE(PVRSRV_ERROR_DC_DEVICE_INACCESSIBLE)
+PVRE(PVRSRV_ERROR_DC_INVALID_MAXDEPTH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNREGISTER_DEVICE)
+PVRE(PVRSRV_ERROR_NO_DEVICEDATA_FOUND)
+PVRE(PVRSRV_ERROR_NO_DEVICENODE_FOUND)
+PVRE(PVRSRV_ERROR_NO_CLIENTNODE_FOUND)
+PVRE(PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INIT_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_KILL_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER)
+PVRE(PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION)
+PVRE(PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_HANDLE_TYPE_MISMATCH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_SHAREABLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_INVALID_SUBHANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_IN_USE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE)
+PVRE(PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP)
+PVRE(PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE)
+PVRE(PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE)
+PVRE(PVRSRV_ERROR_INVALID_DEVICEID)
+PVRE(PVRSRV_ERROR_DEVICEID_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MEMORY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_CPUPADDR_TEST_FAILED)
+PVRE(PVRSRV_ERROR_COPY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK)
+PVRE(PVRSRV_ERROR_CLOCK_REQUEST_FAILED)
+PVRE(PVRSRV_ERROR_DISABLE_CLOCK_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK)
+PVRE(PVRSRV_ERROR_UNKNOWN_SGL_ERROR)
+PVRE(PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+PVRE(PVRSRV_ERROR_BAD_SYNC_STATE)
+PVRE(PVRSRV_ERROR_CACHEOP_FAILED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID)
+PVRE(PVRSRV_ERROR_PARAMETER_BUFFER_INVALID_ALIGNMENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ACQUIRE_CONNECTION)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CONNECTION)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_IN_USE)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_INVALID)
+PVRE(PVRSRV_ERROR_HP_REQUEST_TOO_LONG)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM_OP)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_CONTEXT)
+PVRE(PVRSRV_ERROR_BP_NOT_SET)
+PVRE(PVRSRV_ERROR_BP_ALREADY_SET)
+PVRE(PVRSRV_ERROR_FEATURE_DISABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_ENABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_FULL)
+PVRE(PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE)
+PVRE(PVRSRV_ERROR_MEMORY_ACCESS)
+PVRE(PVRSRV_ERROR_NO_SYSTEM_BUFFER)
+PVRE(PVRSRV_ERROR_DC_INVALID_CONFIG)
+PVRE(PVRSRV_ERROR_DC_INVALID_CROP_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_DISPLAY_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_BUFFER_DIMS)
+PVRE(PVRSRV_ERROR_DC_INVALID_TRANSFORM)
+PVRE(PVRSRV_ERROR_DC_INVALID_SCALE)
+PVRE(PVRSRV_ERROR_DC_INVALID_CUSTOM)
+PVRE(PVRSRV_ERROR_DC_TOO_MANY_PIPES)
+PVRE(PVRSRV_ERROR_DC_INVALID_PLANE_ALPHA)
+PVRE(PVRSRV_ERROR_NOT_READY)
+PVRE(PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_DIMS)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_TIMER)
+PVRE(PVRSRV_ERROR_NOT_FOUND)
+PVRE(PVRSRV_ERROR_ALREADY_OPEN)
+PVRE(PVRSRV_ERROR_STREAM_MISUSE)
+PVRE(PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG)
+PVRE(PVRSRV_ERROR_PHYSMEM_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MAX)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MIN)
+PVRE(PVRSRV_ERROR_INVALID_PB_CONFIG)
+PVRE(PVRSRV_ERROR_META_THREAD0_NOT_ENABLED)
+PVRE(PVRSRV_ERROR_NOT_AUTHENTICATED)
+PVRE(PVRSRV_ERROR_REQUEST_TDFWCODE_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDFWCODE_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_MUTEX_ALREADY_CREATED)
+PVRE(PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED)
+PVRE(PVRSRV_ERROR_ALREADY_EXISTS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SEND_PULSE)
+PVRE(PVRSRV_ERROR_TASK_FAILED)
+PVRE(PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+PVRE(PVRSRV_ERROR_INVALID_GPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_OFFSET)
+PVRE(PVRSRV_ERROR_CCCB_STALLED)
+PVRE(PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_NOT_ENABLED)
+PVRE(PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL)
+PVRE(PVRSRV_ERROR_FW_IMAGE_MISMATCH)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+PVRE(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL)
+PVRE(PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX)
+PVRE(PVRSRV_ERROR_NONZERO_REFCOUNT)
+PVRE(PVRSRV_ERROR_SETAFFINITY_FAILED)
+PVRE(PVRSRV_ERROR_INTERNAL_ERROR)
+PVRE(PVRSRV_ERROR_BRIDGE_EFAULT)
+PVRE(PVRSRV_ERROR_BRIDGE_EINVAL)
+PVRE(PVRSRV_ERROR_BRIDGE_ENOMEM)
+PVRE(PVRSRV_ERROR_BRIDGE_ERANGE)
+PVRE(PVRSRV_ERROR_BRIDGE_EPERM)
+PVRE(PVRSRV_ERROR_BRIDGE_ENOTTY)
+PVRE(PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)
+PVRE(PVRSRV_ERROR_PROBE_DEFER)
+PVRE(PVRSRV_ERROR_INVALID_ALIGNMENT)
+PVRE(PVRSRV_ERROR_CLOSE_FAILED)
+PVRE(PVRSRV_ERROR_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_CONVERSION_FAILED)
+PVRE(PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL)
+PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED)
+PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED)
+PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED)
+PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED)
+PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS)
+PVRE(PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT)
+PVRE(PVRSRV_ERROR_PID_ALREADY_REGISTERED)
+PVRE(PVRSRV_ERROR_PID_NOT_REGISTERED)
+PVRE(PVRSRV_ERROR_SIGNAL_FAILED)
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This file defines flags used on memory allocations and mappings
+ These flags are relevant throughout the memory management
+ software stack and are specified by users of services and
+ understood by all levels of the memory management in both
+ client and server.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_MEMALLOCFLAGS_H
+#define PVRSRV_MEMALLOCFLAGS_H
+
+#include "img_types.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_memallocflags.h"
+#endif
+typedef IMG_UINT32 PVRSRV_MEMALLOCFLAGS_T;
+
+/*
+ * --- MAPPING FLAGS ---
+ * | 0-3 | 4-7 | 8-10 | 11-13 | 14 |
+ * | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable |
+ *
+ * --- MISC FLAGS ---
+ * | 15 | 16 | 17 | 18 | 19 | 20 |
+ * | Defer | CPU-Local | FW-Local | SVM | Sparse-Dummy-Page | CPU-Cache-Clean |
+ *
+ * --- DEV CONTROL FLAGS ---
+ * | 24-27 |
+ * | Device-Flags |
+ *
+ * --- MEMSET FLAGS ---
+ * | 29 | 30 | 31 |
+ * | Poison-On-Free | P.-On-Alloc | Zero-On-Alloc |
+ *
+ */
+
+/*!
+ * **********************************************************
+ * * *
+ * * MAPPING FLAGS *
+ * * *
+ * **********************************************************
+ */
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_READABLE
+ *
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be read by the GPU (is this always true?)
+ *
+ * Typically all device memory allocations would specify this flag.
+ *
+ * At the moment, memory allocations without this flag are not supported
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag. It specifies that memory is permitted
+ * to be read by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a readable mapping
+ *
+ * To be clear:
+ * - When used as an argument on PMR creation; it specifies
+ * that GPU readable mappings will be _permitted_
+ * - When used as an argument to a "map" function: it specifies
+ * that a GPU readable mapping is _desired_
+ * - When used as an argument to "AllocDeviceMem": it specifies
+ * that the PMR will be created with permission to be mapped
+ * with a GPU readable mapping, _and_ that this PMR will be
+ * mapped with a GPU readble mapping.
+ * This distinction becomes important when (a) we export allocations;
+ * and (b) when we separate the creation of the PMR from the mapping.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_READABLE (1U<<0)
+#define PVRSRV_CHECK_GPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE
+ *
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be written by the GPU
+ *
+ * Using this flag on an allocation signifies that the allocation is
+ * intended to be written by the GPU.
+ *
+ * Omitting this flag causes a read-only mapping.
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag. It specifies that memory is permitted
+ * to be written by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a writable mapping (see note above about
+ * permission vs. mapping mode, and why this flag causes permissions
+ * to be inferred from mapping mode on first allocation)
+ *
+ * N.B. This flag has no relevance to the CPU's MMU mapping, if any,
+ * and would therefore not enforce read-only mapping on CPU.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE (1U<<1) /*!< mapped as writable to the GPU */
+#define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED (1U<<2) /*!< can be mapped is GPU readable in another GPU mem context */
+#define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1U<<3) /*!< can be mapped is GPU writable in another GPU mem context */
+#define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_READABLE (1U<<4) /*!< mapped as readable to the CPU */
+#define PVRSRV_CHECK_CPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE (1U<<5) /*!< mapped as writable to the CPU */
+#define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED (1U<<6) /*!< can be mapped is CPU readable in another CPU mem context */
+#define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1U<<7) /*!< can be mapped is CPU writable in another CPU mem context */
+#define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0)
+
+
+/*
+ * **********************************************************
+ * * *
+ * * CACHE CONTROL FLAGS *
+ * * *
+ * **********************************************************
+ */
+
+/*
+ GPU domain
+ ==========
+
+ The following defines are used to control the GPU cache bit field.
+ The defines are mutually exclusive.
+
+ A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU cache
+ bit field from the flags. This should be used whenever the GPU cache mode
+ needs to be determined.
+*/
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_UNCACHED
+
+ GPU domain. Request uncached memory. This means that any writes to memory
+ allocated with this flag are written straight to memory and thus are coherent
+ for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED (0U<<8)
+#define PVRSRV_CHECK_GPU_UNCACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE
+
+ GPU domain. Use write combiner (if supported) to combine sequential writes
+ together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE (1U<<8)
+#define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT
+
+ GPU domain. This flag affects the GPU MMU protection flags.
+ The allocation will be cached.
+ Services will try to set the coherent bit in the GPU MMU tables so the
+ GPU cache is snooping the CPU cache. If coherency is not supported the
+ caller is responsible to ensure the caches are up to date.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT (2U<<8)
+#define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT
+
+ GPU domain. Request cached memory, but not coherent (i.e. no cache snooping).
+ This means that if the allocation needs to transition from one device
+ to another services has to be informed so it can flush/invalidate the
+ appropriate caches.
+
+ Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+ expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT (3U<<8)
+#define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHED
+
+ GPU domain. This flag is for internal use only and is used to indicate
+ that the underlying allocation should be cached on the GPU
+ after all the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHED (7U<<8)
+#define PVRSRV_CHECK_GPU_CACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK
+
+ GPU domain. GPU cache mode mask
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK (7U<<8)
+#define PVRSRV_GPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+
+/*
+ CPU domain
+ ==========
+
+ The following defines are used to control the CPU cache bit field.
+ The defines are mutually exclusive.
+
+ A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU cache
+ bit field from the flags. This should be used whenever the CPU cache mode
+ needs to be determined.
+*/
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_UNCACHED
+
+ CPU domain. Request uncached memory. This means that any writes to memory
+ allocated with this flag are written straight to memory and thus are coherent
+ for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED (0U<<11)
+#define PVRSRV_CHECK_CPU_UNCACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE
+
+ CPU domain. Use write combiner (if supported) to combine sequential writes
+ together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE (1U<<11)
+#define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT
+
+ CPU domain. This flag affects the CPU MMU protection flags.
+ The allocation will be cached.
+ Services will try to set the coherent bit in the CPU MMU tables so the
+ CPU cache is snooping the GPU cache. If coherency is not supported the
+ caller is responsible to ensure the caches are up to date.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT (2U<<11)
+#define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT
+
+ CPU domain. Request cached memory, but not coherent (i.e. no cache snooping).
+ This means that if the allocation needs to transition from one device
+ to another services has to be informed so it can flush/invalidate the
+ appropriate caches.
+
+ Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+ expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT (3U<<11)
+#define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHED
+
+ CPU domain. This flag is for internal use only and is used to indicate
+ that the underlying allocation should be cached on the CPU
+ after all the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHED (7U<<11)
+#define PVRSRV_CHECK_CPU_CACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED)
+
+/*!
+ CPU domain. CPU cache mode mask
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK (7U<<11)
+#define PVRSRV_CPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/* Helper flags for usual cases */
+
+/*! PVRSRV_MEMALLOCFLAG_UNCACHED
+ * Memory will be uncached on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_UNCACHED (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED)
+#define PVRSRV_CHECK_UNCACHED(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_WRITE_COMBINE
+ * Memory will be write-combined on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_WRITE_COMBINE (PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE)
+#define PVRSRV_CHECK_WRITE_COMBINE(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_WRITE_COMBINE)
+
+/*! PVRSRV_MEMALLOCFLAG_CACHE_COHERENT
+ * Memory will be cached on CPU and GPU
+ * Services will try to set the correct flags in the MMU tables.
+ * In case there is no coherency support the caller has to ensure caches are up to date
+ */
+#define PVRSRV_MEMALLOCFLAG_CACHE_COHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+#define PVRSRV_CHECK_CACHE_COHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_COHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT
+ * Memory will be cache-incoherent on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT)
+#define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT)
+
+/*!
+ Cache mode mask
+*/
+#define PVRSRV_CACHE_MODE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) | PVRSRV_CPU_CACHE_MODE(uiFlags))
+
+
+/*!
+ CPU MMU Flags mask -- intended for use internal to services only
+ */
+#define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/*!
+ MMU Flags mask -- intended for use internal to services only - used
+ for partitioning the flags bits and determining which flags to pass
+ down to mmu_common.c
+ */
+#define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+/*!
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
+
+ Indicates that the PMR created due to this allocation will support
+ in-kernel CPU mappings. Only privileged processes may use this
+ flag as it may cause wastage of precious kernel virtual memory on
+ some platforms.
+ */
+#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE (1U<<14)
+#define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0)
+
+
+
+/*
+ *
+ * **********************************************************
+ * * *
+ * * ALLOC MEMORY FLAGS *
+ * * *
+ * **********************************************************
+ *
+ * (Bits 15)
+ *
+ */
+#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC (1U<<15)
+#define PVRSRV_CHECK_ON_DEMAND(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0)
+
+/*!
+ PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+
+ Indicates that the allocation will primarily be accessed by
+ the CPU, so a UMA allocation (if available) is preferable.
+ If not set, the allocation will primarily be accessed by
+ the GPU, so LMA allocation (if available) is preferable.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_LOCAL (1U<<16)
+#define PVRSRV_CHECK_CPU_LOCAL(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL) != 0)
+
+
+/*!
+ PVRSRV_MEMALLOCFLAG_FW_LOCAL
+
+ Indicates that the allocation will primarily be accessed by
+ the FW.
+ */
+#define PVRSRV_MEMALLOCFLAG_FW_LOCAL (1U<<17)
+#define PVRSRV_CHECK_FW_LOCAL(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_LOCAL) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_SVM
+
+ Indicates that the allocation will be accessed by the
+ CPU and GPU using the same virtual address, i.e. for
+ all SVM allocs, IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR
+ */
+#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC (1U<<18)
+#define PVRSRV_CHECK_SVM_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING
+
+ Indicates the particular memory that's being allocated is sparse
+ and the sparse regions should not be backed by dummy page */
+#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING (1U << 19)
+#define PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) == 0)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN
+
+ Services is going to clean the cache for the allocated memory.
+ For performance reasons avoid usage if allocation is written to by the CPU anyway
+ before the next GPU kick.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN (1U<<20)
+#define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0)
+
+
+/*
+ *
+ * **********************************************************
+ * * *
+ * * MEMORY ZEROING AND POISONING FLAGS *
+ * * *
+ * **********************************************************
+ *
+ * Zero / Poison, on alloc/free
+ *
+ * We think the following usecases are required:
+ *
+ * don't poison or zero on alloc or free
+ * (normal operation, also most efficient)
+ * poison on alloc
+ * (for helping to highlight bugs)
+ * poison on alloc and free
+ * (for helping to highlight bugs)
+ * zero on alloc
+ * (avoid highlighting security issues in other uses of memory)
+ * zero on alloc and poison on free
+ * (avoid highlighting security issues in other uses of memory,
+ * while helping to highlight a subset of bugs e.g. memory
+ * freed prematurely)
+ *
+ * Since there are more than 4, we can't encode this in just two bits,
+ * so we might as well have a separate flag for each of the three
+ * actions.
+ */
+
+/*! PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC
+
+ Ensures that the memory allocated is initialised with zeroes.
+ */
+#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC (1U<<31)
+#define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0)
+#define PVRSRV_GET_ZERO_ON_ALLOC_FLAG(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/*! PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC
+
+ Scribbles over the allocated memory with a poison value
+
+ Not compatible with ZERO_ON_ALLOC
+
+ Poisoning is very deliberately _not_ reflected in PDump as we want
+ a simulation to cry loudly if the initialised data propagates to a
+ result.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC (1U<<30)
+#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_POISON_ON_FREE
+
+ Causes memory to be trashed when freed, as a lazy man's security
+ measure.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (1U<<29)
+#define PVRSRV_CHECK_POISON_ON_FREE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0)
+
+/*
+ *
+ * **********************************************************
+ * * *
+ * * Device specific MMU flags *
+ * * *
+ * **********************************************************
+ *
+ * (Bits 24 to 27)
+ *
+ * Some services controlled devices have device specific control
+ * bits in their page table entries, we need to allow these flags
+ * to be passed down the memory management layers so the user
+ * can control these bits.
+ */
+
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET 24
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK 0x0f000000UL
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n) \
+ (((n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+
+
+/*!
+ * Secure buffer mask -- Flags in the mask are allowed for secure buffers
+ * because they are not related to CPU mappings.
+ */
+#define PVRSRV_MEMALLOCFLAGS_SECBUFMASK ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED)
+
+
+
+/*!
+ PMR flags mask -- for internal services use only. This is the set
+ of flags that will be passed down and stored with the PMR, this also
+ includes the MMU flags which the PMR has to pass down to mm_common.c
+ at PMRMap time.
+*/
+#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
+ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \
+ PVRSRV_MEMALLOCFLAG_FW_LOCAL | \
+ PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
+
+/*!
+ RA differentiation mask
+
+ for use internal to services
+
+ this is the set of flags bits that are able to determine whether a
+ pair of allocations are permitted to live in the same page table.
+ Allocations whose flags differ in any of these places would be
+ allocated from separate RA Imports and therefore would never coexist
+ in the same page.
+ Special cases are zeroing and poisoning of memory. The caller is responsible
+ to set the sub-allocations to the value he wants it to be. To differentiate
+ between zeroed and poisoned RA Imports does not make sense because the
+ memory might be reused.
+
+*/
+#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \
+ & \
+ ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+
+/*!
+ Flags that affect _allocation_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU)
+
+/*!
+ Flags that affect _mapping_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING)
+
+#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0)
+#error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK
+#endif
+
+
+/*!
+ Flags that affect _physical allocations_ in the DevMemX API
+ */
+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+ PVRSRV_MEMALLOCFLAG_CPU_LOCAL | \
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)
+
+/*!
+ Flags that affect _virtual allocations_ in the DevMemX API
+ */
+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK (PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED | \
+ PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED)
+
+#endif /* #ifndef PVRSRV_MEMALLOCFLAGS_H */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title PVR synchronization interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Types for server side code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef PVRSRV_SYNC_KM_H
+#define PVRSRV_SYNC_KM_H
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* Implementation independent types for passing fence/timeline to KM */
+typedef int32_t PVRSRV_TIMELINE_KM;
+typedef int32_t PVRSRV_FENCE_KM;
+
+#if defined (__cplusplus)
+}
+#endif
+#endif /* PVRSRV_SYNC_KM_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services Transport Layer common types and definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport layer common types and definitions included into
+ both user mode and kernel mode source.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __PVR_TLCOMMON_H__
+#define __PVR_TLCOMMON_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+
+/*! Handle type for stream descriptor objects as created by this API */
+typedef IMG_HANDLE PVRSRVTL_SD;
+
+/*! Maximum stream name length including the null byte */
+#define PRVSRVTL_MAX_STREAM_NAME_SIZE 40U
+
+/*! Packet lengths are always rounded up to a multiple of 8 bytes */
+#define PVRSRVTL_PACKET_ALIGNMENT 8U
+#define PVRSRVTL_ALIGN(x) ((x+PVRSRVTL_PACKET_ALIGNMENT-1) & ~(PVRSRVTL_PACKET_ALIGNMENT-1))
+
+
+/*! A packet is made up of a header structure followed by the data bytes.
+ * There are 3 types of packet: normal (has data), data lost and padding,
+ * see packet flags. Header kept small to reduce data overhead.
+ *
+ * if the ORDER of the structure members is changed, please UPDATE the
+ * PVRSRVTL_PACKET_FLAG_OFFSET macro.
+ */
+typedef struct _PVRSRVTL_PACKETHDR_
+{
+ IMG_UINT32 uiTypeSize; /*!< Type & number of bytes following header */
+ IMG_UINT32 uiReserved; /*!< Reserve, packets and data must be 8 byte aligned */
+
+ /* First bytes of TL packet data follow header ... */
+} PVRSRVTL_PACKETHDR, *PVRSRVTL_PPACKETHDR;
+
+/* Structure must always be a size multiple of 8 as stream buffer
+ * still an array of IMG_UINT32s.
+ */
+static_assert((sizeof(PVRSRVTL_PACKETHDR) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(PVRSRVTL_PACKETHDR) must be a multiple of 8");
+
+/*! Packet header mask used to extract the size from the uiTypeSize member.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_SIZE_MASK 0x0000FFFFU
+#define PVRSRVTL_MAX_PACKET_SIZE (PVRSRVTL_PACKETHDR_SIZE_MASK & ~0xFU)
+
+
+/*! Packet header mask used to extract the type from the uiTypeSize member.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_TYPE_MASK 0xFF000000U
+#define PVRSRVTL_PACKETHDR_TYPE_OFFSET 24U
+
+/*! Packet type enumeration.
+ */
+typedef enum _PVRSRVTL_PACKETTYPE_
+{
+ /*! Undefined packet */
+ PVRSRVTL_PACKETTYPE_UNDEF = 0,
+
+ /*! Normal packet type. Indicates data follows the header.
+ */
+ PVRSRVTL_PACKETTYPE_DATA = 1,
+
+ /*! When seen this packet type indicates that at this moment in the stream
+ * packet(s) were not able to be accepted due to space constraints and that
+ * recent data may be lost - depends on how the producer handles the
+ * error. Such packets have no data, data length is 0.
+ */
+ PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED = 2,
+
+ /*! Packets with this type set are padding packets that contain undefined
+ * data and must be ignored/skipped by the client. They are used when the
+ * circular stream buffer wraps around and there is not enough space for
+ * the data at the end of the buffer. Such packets have a length of 0 or
+ * more.
+ */
+ PVRSRVTL_PACKETTYPE_PADDING = 3,
+
+ /*! This packet type conveys to the stream consumer that the stream producer
+ * has reached the end of data for that data sequence. The TLDaemon
+ * has several options for processing these packets that can be selected
+ * on a per stream basis.
+ */
+ PVRSRVTL_PACKETTYPE_MARKER_EOS = 4,
+
+ PVRSRVTL_PACKETTYPE_LAST = PVRSRVTL_PACKETTYPE_MARKER_EOS
+} PVRSRVTL_PACKETTYPE;
+
+/* The SET_PACKET_* macros rely on the order the PVRSRVTL_PACKETHDR members are declared:
+ * uiFlags is the upper half of a structure consisting of 2 uint16 quantities.
+ */
+#define PVRSRVTL_SET_PACKET_DATA(len) (len) | (PVRSRVTL_PACKETTYPE_DATA << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_PADDING(len) (len) | (PVRSRVTL_PACKETTYPE_PADDING << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_WRITE_FAILED (0) | (PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_HDR(len,type) (len) | ((type) << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+
+/*! Returns the number of bytes of data in the packet. p may be any address type
+ * */
+#define GET_PACKET_DATA_LEN(p) \
+ ((IMG_UINT32) ((PVRSRVTL_PPACKETHDR)(p))->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK)
+
+
+/*! Returns a IMG_BYTE* pointer to the first byte of data in the packet */
+#define GET_PACKET_DATA_PTR(p) \
+ ((IMG_PBYTE) ( ((size_t)p) + sizeof(PVRSRVTL_PACKETHDR)) )
+
+/*! Given a PVRSRVTL_PPACKETHDR address, return the address of the next pack
+ * It is up to the caller to determine if the new address is within the packet
+ * buffer.
+ */
+#define GET_NEXT_PACKET_ADDR(p) \
+ ((PVRSRVTL_PPACKETHDR) ( ((IMG_UINT8 *)p) + sizeof(PVRSRVTL_PACKETHDR) + \
+ (((((PVRSRVTL_PPACKETHDR)p)->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK) + \
+ (PVRSRVTL_PACKET_ALIGNMENT-1)) & (~(PVRSRVTL_PACKET_ALIGNMENT-1)) ) ))
+
+/*! Turns the packet address p into a PVRSRVTL_PPACKETHDR pointer type
+ */
+#define GET_PACKET_HDR(p) ((PVRSRVTL_PPACKETHDR)(p))
+
+/*! Get the type of the packet. p is of type PVRSRVTL_PPACKETHDR
+ */
+#define GET_PACKET_TYPE(p) (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_TYPE_MASK)>>PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+
+
+/*! Flags for use with PVRSRVTLOpenStream
+ * 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available
+ * 0x02 - When the stream does not exist wait for a bit (2s) in
+ * PVRSRVTLOpenStream() and then exit with a timeout error if it still
+ * does not exist.
+ * 0x04 - Open stream for write only operations.
+ * If flag is not used stream is opened as read-only. This flag is
+ * required if one wants to call reserve/commit/write function on the
+ * stream descriptor. Read from on the stream descriptor opened
+ * with this flag will fail.
+ */
+#define PVRSRV_STREAM_FLAG_NONE (0U)
+#define PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING (1U<<0)
+#define PVRSRV_STREAM_FLAG_OPEN_WAIT (1U<<1)
+#define PVRSRV_STREAM_FLAG_OPEN_WO (1U<<2)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_TLCOMMON_H__ */
+/******************************************************************************
+ End of file (pvrsrv_tlcommon.h)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services Transport Layer stream names
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport layer common types and definitions included into
+ both user mode and kernel mode source.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVRSRV_TLSTREAMS_H_
+#define _PVRSRV_TLSTREAMS_H_
+
+
+#define PVRSRV_TL_HWPERF_RGX_FW_STREAM "hwperf"
+#define PVRSRV_TL_HWPERF_HOST_SERVER_STREAM "hwperf_host"
+
+/* Host HWPerf client stream names are of the form 'hwperf_client_<pid>' */
+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM "hwperf_client_"
+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC "hwperf_client_%u"
+
+
+
+#endif /* _PVRSRV_TLSTREAMS_H_ */
+
+/******************************************************************************
+ End of file (pvrsrv_tlstreams.h)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Resource Allocator
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+@Description
+ Implements generic resource allocation. The resource
+ allocator was originally intended to manage address spaces. In
+ practice the resource allocator is generic and can manage arbitrary
+ sets of integers.
+
+ Resources are allocated from arenas. Arena's can be created with an
+ initial span of resources. Further resources spans can be added to
+ arenas. A call back mechanism allows an arena to request further
+ resource spans on demand.
+
+ Each arena maintains an ordered list of resource segments each
+ described by a boundary tag. Each boundary tag describes a segment
+ of resources which are either 'free', available for allocation, or
+ 'busy' currently allocated. Adjacent 'free' segments are always
+ coallesced to avoid fragmentation.
+
+ For allocation, all 'free' segments are kept on lists of 'free'
+ segments in a table index by pvr_log2(segment size). ie Each table index
+ n holds 'free' segments in the size range 2^n -> 2^(n+1) - 1.
+
+ Allocation policy is based on an *almost* good fit strategy.
+
+ Allocated segments are inserted into a self scaling hash table which
+ maps the base resource of the span to the relevant boundary
+ tag. This allows the code to get back to the bounary tag without
+ exporting explicit boundary tag references through the API.
+
+ Each arena has an associated quantum size, all allocations from the
+ arena are made in multiples of the basic quantum.
+
+ On resource exhaustion in an arena, a callback if provided will be
+ used to request further resources. Resouces spans allocated by the
+ callback mechanism will be returned when freed (through one of the
+ two callbacks).
+*/ /**************************************************************************/
+
+/* Issues:
+ * - flags, flags are passed into the resource allocator but are not currently used.
+ * - determination, of import size, is currently braindead.
+ * - debug code should be moved out to own module and #ifdef'd
+ */
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "uniq_key_splay_tree.h"
+
+#include "hash.h"
+#include "ra.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "osfunc.h"
+#include "allocmem.h"
+#include "lock.h"
+#include "pvr_intrinsics.h"
+
+/* The initial, and minimum size of the live address -> boundary tag
+ structure hash table. The value 64 is a fairly arbitrary
+ choice. The hash table resizes on demand so the value chosen is
+ not critical. */
+#define MINIMUM_HASH_SIZE (64)
+
+
+/* #define RA_VALIDATE */
+
+#if defined(__KLOCWORK__)
+ /* make sure Klocworks analyse all the code (including the debug one) */
+ #if !defined(RA_VALIDATE)
+ #define RA_VALIDATE
+ #endif
+#endif
+
+#if (!defined(PVRSRV_NEED_PVR_ASSERT)) || (!defined(RA_VALIDATE))
+ /* Disable the asserts unless explicitly told otherwise. They slow the driver
+ too much for other people */
+
+ #undef PVR_ASSERT
+ /* let's use a macro that really do not do anything when compiling in release
+ mode! */
+ #define PVR_ASSERT(x)
+#endif
+
+/* boundary tags, used to describe a resource segment */
+struct _BT_
+{
+ enum bt_type
+ {
+ btt_free, /* free resource segment */
+ btt_live /* allocated resource segment */
+ } type;
+
+ unsigned int is_leftmost;
+ unsigned int is_rightmost;
+ unsigned int free_import;
+
+ /* The base resource and extent of this segment */
+ RA_BASE_T base;
+ RA_LENGTH_T uSize;
+
+ /* doubly linked ordered list of all segments within the arena */
+ struct _BT_ *pNextSegment;
+ struct _BT_ *pPrevSegment;
+
+ /* doubly linked un-ordered list of free segments with the same flags. */
+ struct _BT_ * next_free;
+ struct _BT_ * prev_free;
+
+ /* a user reference associated with this span, user references are
+ * currently only provided in the callback mechanism */
+ IMG_HANDLE hPriv;
+
+ /* Flags to match on this span */
+ IMG_UINT32 uFlags;
+
+};
+typedef struct _BT_ BT;
+
+
+/* resource allocation arena */
+struct _RA_ARENA_
+{
+ /* arena name for diagnostics output */
+ IMG_CHAR *name;
+
+ /* allocations within this arena are quantum sized */
+ RA_LENGTH_T uQuantum;
+
+ /* import interface, if provided */
+ PVRSRV_ERROR (*pImportAlloc)(RA_PERARENA_HANDLE h,
+ RA_LENGTH_T uSize,
+ IMG_UINT32 uFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *pBase,
+ RA_LENGTH_T *pActualSize,
+ RA_PERISPAN_HANDLE *phPriv);
+ void (*pImportFree) (RA_PERARENA_HANDLE,
+ RA_BASE_T,
+ RA_PERISPAN_HANDLE hPriv);
+
+ /* arbitrary handle provided by arena owner to be passed into the
+ * import alloc and free hooks */
+ void *pImportHandle;
+
+ IMG_PSPLAY_TREE per_flags_buckets;
+
+ /* resource segment list */
+ BT *pHeadSegment;
+
+ /* segment address to boundary tag hash table */
+ HASH_TABLE *pSegmentHash;
+
+ /* Lock for this arena */
+ POS_LOCK hLock;
+
+ /* LockClass of this arena. This is used within lockdep to decide if a
+ * recursive call sequence with the same lock class is allowed or not. */
+ IMG_UINT32 ui32LockClass;
+
+ /* If TRUE, imports will not be split up. Allocations will always get their
+ * own import
+ */
+ IMG_BOOL bNoSplit;
+};
+
+/*************************************************************************/ /*!
+@Function _RequestAllocFail
+@Description Default callback allocator used if no callback is
+ specified, always fails to allocate further resources to the
+ arena.
+@Input _h - callback handle
+@Input _uSize - requested allocation size
+@Output _pActualSize - actual allocation size
+@Input _pRef - user reference
+@Input _uflags - allocation flags
+@Input _pBase - receives allocated base
+@Return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, this function always fails to allocate.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_RequestAllocFail (RA_PERARENA_HANDLE _h,
+ RA_LENGTH_T _uSize,
+ IMG_UINT32 _uFlags,
+ const IMG_CHAR *_pszAnnotation,
+ RA_BASE_T *_pBase,
+ RA_LENGTH_T *_pActualSize,
+ RA_PERISPAN_HANDLE *_phPriv)
+{
+ PVR_UNREFERENCED_PARAMETER (_h);
+ PVR_UNREFERENCED_PARAMETER (_uSize);
+ PVR_UNREFERENCED_PARAMETER (_pActualSize);
+ PVR_UNREFERENCED_PARAMETER (_phPriv);
+ PVR_UNREFERENCED_PARAMETER (_uFlags);
+ PVR_UNREFERENCED_PARAMETER (_pBase);
+ PVR_UNREFERENCED_PARAMETER (_pszAnnotation);
+
+ return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL;
+}
+
+
+#if defined (PVR_CTZLL)
+ /* make sure to trigger an error if someone change the buckets or the bHasEltsMapping size
+ the bHasEltsMapping is used to quickly determine the smallest bucket containing elements.
+ therefore it must have at least as many bits has the buckets array have buckets. The RA
+ implementation actually uses one more bit. */
+ static_assert((sizeof(((IMG_PSPLAY_TREE) 0)->buckets) / sizeof(((IMG_PSPLAY_TREE) 0)->buckets[0]))
+ < 8 * sizeof(((IMG_PSPLAY_TREE) 0)->bHasEltsMapping),
+ "Too many buckets for bHasEltsMapping bitmap");
+#endif
+
+
+/*************************************************************************/ /*!
+@Function pvr_log2
+@Description Computes the floor of the log base 2 of a unsigned integer
+@Input n Unsigned integer
+@Return Floor(Log2(n))
+*/ /**************************************************************************/
+#if defined(PVR_CLZLL)
+/* make sure to trigger a problem if someone changes the RA_LENGTH_T type
+ indeed the __builtin_clzll is for unsigned long long variables.
+
+ if someone changes RA_LENGTH to unsigned long, then use __builtin_clzl
+ if it changes to unsigned int, use __builtin_clz
+
+ if it changes for something bigger than unsigned long long,
+ then revert the pvr_log2 to the classic implementation */
+static_assert(sizeof(RA_LENGTH_T) == sizeof(unsigned long long),
+ "RA log routines not tuned for sizeof(RA_LENGTH_T)");
+
+static inline IMG_UINT32 pvr_log2(RA_LENGTH_T n)
+{
+ PVR_ASSERT( n != 0 ); /* Log2 is not defined on 0 */
+
+ return (8 * sizeof(RA_LENGTH_T)) - 1 - PVR_CLZLL(n);
+}
+#else
+static IMG_UINT32
+pvr_log2 (RA_LENGTH_T n)
+{
+ IMG_UINT32 l = 0;
+
+ PVR_ASSERT( n != 0 ); /* Log2 is not defined on 0 */
+
+ n>>=1;
+ while (n>0)
+ {
+ n>>=1;
+ l++;
+ }
+ return l;
+}
+#endif
+
+
+#if defined(RA_VALIDATE)
+/*************************************************************************/ /*!
+@Function _IsInSegmentList
+@Description Tests if a BT is in the segment list.
+@Input pArena The arena.
+@Input pBT The boundary tag to look for.
+@Return IMG_FALSE BT was not in the arena's segment list.
+ IMG_TRUE BT was in the arena's segment list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInSegmentList (RA_ARENA *pArena,
+ BT *pBT)
+{
+ BT* pBTScan;
+
+ PVR_ASSERT (pArena != NULL);
+ PVR_ASSERT (pBT != NULL);
+
+ /* Walk the segment list until we see the BT pointer... */
+ pBTScan = pArena->pHeadSegment;
+ while (pBTScan != NULL && pBTScan != pBT)
+ {
+ pBTScan = pBTScan->pNextSegment;
+ }
+
+ /* Test if we found it and then return */
+ return (pBTScan == pBT);
+}
+
+/*************************************************************************/ /*!
+@Function _IsInFreeList
+@Description Tests if a BT is in the free list.
+@Input pArena The arena.
+@Input pBT The boundary tag to look for.
+@Return IMG_FALSE BT was not in the arena's free list.
+ IMG_TRUE BT was in the arena's free list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInFreeList (RA_ARENA *pArena,
+ BT *pBT)
+{
+ BT* pBTScan;
+ IMG_UINT32 uIndex;
+
+ PVR_ASSERT (pArena != NULL);
+ PVR_ASSERT (pBT != NULL);
+
+ /* Look for the free list that holds BTs of this size... */
+ uIndex = pvr_log2 (pBT->uSize);
+ PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+
+ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+ if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->flags != pBT->uFlags))
+ {
+ return 0;
+ }
+ else
+ {
+ pBTScan = pArena->per_flags_buckets->buckets[uIndex];
+ while (pBTScan != NULL && pBTScan != pBT)
+ {
+ pBTScan = pBTScan->next_free;
+ }
+
+ /* Test if we found it and then return */
+ return (pBTScan == pBT);
+ }
+}
+
+/* is_arena_valid should only be used in debug mode.
+ it checks that some properties an arena must have are verified */
+static int is_arena_valid(struct _RA_ARENA_ * arena)
+{
+ struct _BT_ * chunk;
+#if defined(PVR_CTZLL)
+ unsigned int i;
+#endif
+
+ for (chunk = arena->pHeadSegment; chunk != NULL; chunk = chunk->pNextSegment)
+ {
+ /* if next segment is NULL, then it must be a rightmost */
+ PVR_ASSERT((chunk->pNextSegment != NULL) || (chunk->is_rightmost));
+ /* if prev segment is NULL, then it must be a leftmost */
+ PVR_ASSERT((chunk->pPrevSegment != NULL) || (chunk->is_leftmost));
+
+ if (chunk->type == btt_free)
+ {
+ /* checks the correctness of the type field */
+ PVR_ASSERT(_IsInFreeList(arena, chunk));
+
+ /* check that there can't be two consecutive free chunks.
+ Indeed, instead of having two consecutive free chunks,
+ there should be only one that span the size of the two. */
+ PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->type != btt_free));
+ PVR_ASSERT((chunk->is_rightmost) || (chunk->pNextSegment->type != btt_free));
+ }
+ else
+ {
+ /* checks the correctness of the type field */
+ PVR_ASSERT(!_IsInFreeList(arena, chunk));
+ }
+
+ PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->base + chunk->pPrevSegment->uSize == chunk->base));
+ PVR_ASSERT((chunk->is_rightmost) || (chunk->base + chunk->uSize == chunk->pNextSegment->base));
+
+ /* all segments of the same imports must have the same flags ... */
+ PVR_ASSERT((chunk->is_rightmost) || (chunk->uFlags == chunk->pNextSegment->uFlags));
+ /* ... and the same import handle */
+ PVR_ASSERT((chunk->is_rightmost) || (chunk->hPriv == chunk->pNextSegment->hPriv));
+
+
+ /* if a free chunk spans a whole import, then it must be an 'not to free import'.
+ Otherwise it should have been freed. */
+ PVR_ASSERT((!chunk->is_leftmost) || (!chunk->is_rightmost) || (chunk->type == btt_live) || (!chunk->free_import));
+ }
+
+#if defined(PVR_CTZLL)
+ if (arena->per_flags_buckets != NULL)
+ {
+ for (i = 0; i < FREE_TABLE_LIMIT; ++i)
+ {
+ /* verify that the bHasEltsMapping is correct for this flags bucket */
+ PVR_ASSERT(
+ ((arena->per_flags_buckets->buckets[i] == NULL) &&
+ (( (arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) == 0)))
+ ||
+ ((arena->per_flags_buckets->buckets[i] != NULL) &&
+ (( (arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) != 0)))
+ );
+ }
+ }
+#endif
+
+ /* if arena was not valid, one of the assert before should have triggered */
+ return 1;
+}
+#endif
+/*************************************************************************/ /*!
+@Function _SegmentListInsertAfter
+@Description Insert a boundary tag into an arena segment list after a
+ specified boundary tag.
+@Input pInsertionPoint The insertion point.
+@Input pBT The boundary tag to insert.
+@Return PVRSRV_OK (doesn't fail)
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR
+_SegmentListInsertAfter (BT *pInsertionPoint,
+ BT *pBT)
+{
+ PVR_ASSERT (pBT != NULL);
+ PVR_ASSERT (pInsertionPoint != NULL);
+
+ pBT->pNextSegment = pInsertionPoint->pNextSegment;
+ pBT->pPrevSegment = pInsertionPoint;
+ if (pInsertionPoint->pNextSegment != NULL)
+ {
+ pInsertionPoint->pNextSegment->pPrevSegment = pBT;
+ }
+ pInsertionPoint->pNextSegment = pBT;
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function _SegmentListInsert
+@Description Insert a boundary tag into an arena segment list
+@Input pArena The arena.
+@Input pBT The boundary tag to insert.
+@Return PVRSRV_OK (doesn't fail)
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR
+_SegmentListInsert (RA_ARENA *pArena, BT *pBT)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVR_ASSERT (!_IsInSegmentList(pArena, pBT));
+
+ /* insert into the segment chain */
+ pBT->pNextSegment = pArena->pHeadSegment;
+ pArena->pHeadSegment = pBT;
+ if (pBT->pNextSegment != NULL)
+ {
+ pBT->pNextSegment->pPrevSegment = pBT;
+ }
+
+ pBT->pPrevSegment = NULL;
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function _SegmentListRemove
+@Description Remove a boundary tag from an arena segment list.
+@Input pArena The arena.
+@Input pBT The boundary tag to remove.
+*/ /**************************************************************************/
+static void
+_SegmentListRemove (RA_ARENA *pArena, BT *pBT)
+{
+ PVR_ASSERT (_IsInSegmentList(pArena, pBT));
+
+ if (pBT->pPrevSegment == NULL)
+ pArena->pHeadSegment = pBT->pNextSegment;
+ else
+ pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
+
+ if (pBT->pNextSegment != NULL)
+ pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
+}
+
+
+/*************************************************************************/ /*!
+@Function _BuildBT
+@Description Construct a boundary tag for a free segment.
+@Input base The base of the resource segment.
+@Input uSize The extent of the resouce segment.
+@Input uFlags The flags to give to the boundary tag
+@Return Boundary tag or NULL
+*/ /**************************************************************************/
+static BT *
+_BuildBT (RA_BASE_T base,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags
+ )
+{
+ BT *pBT;
+
+ pBT = OSAllocMem(sizeof(BT));
+ if (pBT == NULL)
+ {
+ return NULL;
+ }
+
+ OSCachedMemSet(pBT, 0, sizeof(BT));
+
+ pBT->is_leftmost = 1;
+ pBT->is_rightmost = 1;
+ pBT->type = btt_live;
+ pBT->base = base;
+ pBT->uSize = uSize;
+ pBT->uFlags = uFlags;
+ pBT->free_import = 0;
+
+ return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function _SegmentSplit
+@Description Split a segment into two, maintain the arena segment list. The
+ boundary tag should not be in the free table. Neither the
+ original or the new neighbour bounary tag will be in the free
+ table.
+@Input pBT The boundary tag to split.
+@Input uSize The required segment size of boundary tag after
+ splitting.
+@Return New neighbour boundary tag or NULL.
+*/ /**************************************************************************/
+static BT *
+_SegmentSplit (BT *pBT, RA_LENGTH_T uSize)
+{
+ BT *pNeighbour;
+
+ pNeighbour = _BuildBT(pBT->base + uSize, pBT->uSize - uSize, pBT->uFlags);
+ if (pNeighbour == NULL)
+ {
+ return NULL;
+ }
+
+ _SegmentListInsertAfter(pBT, pNeighbour);
+
+ pNeighbour->is_leftmost = 0;
+ pNeighbour->is_rightmost = pBT->is_rightmost;
+ pNeighbour->free_import = pBT->free_import;
+ pBT->is_rightmost = 0;
+ pNeighbour->hPriv = pBT->hPriv;
+ pBT->uSize = uSize;
+ pNeighbour->uFlags = pBT->uFlags;
+
+ return pNeighbour;
+}
+
+/*************************************************************************/ /*!
+@Function _FreeListInsert
+@Description Insert a boundary tag into an arena free table.
+@Input pArena The arena.
+@Input pBT The boundary tag.
+*/ /**************************************************************************/
+static void
+_FreeListInsert (RA_ARENA *pArena, BT *pBT)
+{
+ IMG_UINT32 uIndex;
+ uIndex = pvr_log2 (pBT->uSize);
+
+ PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+ PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+
+ pBT->type = btt_free;
+
+ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+ /* the flags item in the splay tree must have been created before-hand by
+ _InsertResource */
+ PVR_ASSERT(pArena->per_flags_buckets != NULL);
+ PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+ pBT->next_free = pArena->per_flags_buckets->buckets[uIndex];
+ if (pBT->next_free != NULL)
+ {
+ pBT->next_free->prev_free = pBT;
+ }
+ pBT->prev_free = NULL;
+ pArena->per_flags_buckets->buckets[uIndex] = pBT;
+
+#if defined(PVR_CTZLL)
+ /* tells that bucket[index] now contains elements */
+ pArena->per_flags_buckets->bHasEltsMapping |= ((IMG_ELTS_MAPPINGS) 1 << uIndex);
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function _FreeListRemove
+@Description Remove a boundary tag from an arena free table.
+@Input pArena The arena.
+@Input pBT The boundary tag.
+*/ /**************************************************************************/
+static void
+_FreeListRemove (RA_ARENA *pArena, BT *pBT)
+{
+ IMG_UINT32 uIndex;
+ uIndex = pvr_log2 (pBT->uSize);
+
+ PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+ PVR_ASSERT (_IsInFreeList(pArena, pBT));
+
+ if (pBT->next_free != NULL)
+ {
+ pBT->next_free->prev_free = pBT->prev_free;
+ }
+
+ if (pBT->prev_free != NULL)
+ {
+ pBT->prev_free->next_free = pBT->next_free;
+ }
+ else
+ {
+ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+ /* the flags item in the splay tree must have already been created
+ (otherwise how could there be a segment with these flags */
+ PVR_ASSERT(pArena->per_flags_buckets != NULL);
+ PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+ pArena->per_flags_buckets->buckets[uIndex] = pBT->next_free;
+#if defined(PVR_CTZLL)
+ if (pArena->per_flags_buckets->buckets[uIndex] == NULL)
+ {
+ /* there is no more elements in this bucket. Update the mapping. */
+ pArena->per_flags_buckets->bHasEltsMapping &= ~((IMG_ELTS_MAPPINGS) 1 << uIndex);
+ }
+#endif
+ }
+
+
+ PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+ pBT->type = btt_live;
+}
+
+
+/*************************************************************************/ /*!
+@Function _InsertResource
+@Description Add a free resource segment to an arena.
+@Input pArena The arena.
+@Input base The base of the resource segment.
+@Input uSize The extent of the resource segment.
+@Input uFlags The flags of the new resources.
+@Return New bucket pointer
+ NULL on failure
+*/ /**************************************************************************/
+static BT *
+_InsertResource (RA_ARENA *pArena,
+ RA_BASE_T base,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags
+ )
+{
+ BT *pBT;
+ PVR_ASSERT (pArena!=NULL);
+
+ pBT = _BuildBT (base, uSize, uFlags);
+
+ if (pBT != NULL)
+ {
+ IMG_PSPLAY_TREE tmp = PVRSRVInsert(pBT->uFlags, pArena->per_flags_buckets);
+ if (tmp == NULL)
+ {
+ OSFreeMem(pBT);
+ return NULL;
+ }
+
+ pArena->per_flags_buckets = tmp;
+ _SegmentListInsert (pArena, pBT);
+ _FreeListInsert (pArena, pBT);
+ }
+ return pBT;
+}
+
+/*************************************************************************/ /*!
+@Function _InsertResourceSpan
+@Description Add a free resource span to an arena, marked for free_import.
+@Input pArena The arena.
+@Input base The base of the resource segment.
+@Input uSize The extent of the resource segment.
+@Return The boundary tag representing the free resource segment,
+ or NULL on failure.
+*/ /**************************************************************************/
+static INLINE BT *
+_InsertResourceSpan (RA_ARENA *pArena,
+ RA_BASE_T base,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags)
+{
+ BT *pBT = _InsertResource(pArena, base, uSize, uFlags);
+ if (pBT != NULL)
+ {
+ pBT->free_import = 1;
+ }
+ return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function _RemoveResourceSpan
+@Description Frees a resource span from an arena, returning the imported
+ span via the callback.
+@Input pArena The arena.
+@Input pBT The boundary tag to free.
+@Return IMG_FALSE failure - span was still in use
+ IMG_TRUE success - span was removed and returned
+*/ /**************************************************************************/
+static INLINE IMG_BOOL
+_RemoveResourceSpan (RA_ARENA *pArena, BT *pBT)
+{
+ PVR_ASSERT (pArena!=NULL);
+ PVR_ASSERT (pBT!=NULL);
+
+ if (pBT->free_import &&
+ pBT->is_leftmost &&
+ pBT->is_rightmost)
+ {
+ _SegmentListRemove (pArena, pBT);
+ pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->hPriv);
+ OSFreeMem(pBT);
+
+ return IMG_TRUE;
+ }
+
+
+ return IMG_FALSE;
+}
+
+
+/*************************************************************************/ /*!
+@Function _FreeBT
+@Description Free a boundary tag taking care of the segment list and the
+ boundary tag free table.
+@Input pArena The arena.
+@Input pBT The boundary tag to free.
+*/ /**************************************************************************/
+static void
+_FreeBT (RA_ARENA *pArena, BT *pBT)
+{
+ BT *pNeighbour;
+
+ PVR_ASSERT (pArena!=NULL);
+ PVR_ASSERT (pBT!=NULL);
+ PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+
+ /* try and coalesce with left neighbour */
+ pNeighbour = pBT->pPrevSegment;
+ if ((!pBT->is_leftmost) && (pNeighbour->type == btt_free))
+ {
+ /* Sanity check. */
+ PVR_ASSERT(pNeighbour->base + pNeighbour->uSize == pBT->base);
+
+ _FreeListRemove (pArena, pNeighbour);
+ _SegmentListRemove (pArena, pNeighbour);
+ pBT->base = pNeighbour->base;
+
+ pBT->uSize += pNeighbour->uSize;
+ pBT->is_leftmost = pNeighbour->is_leftmost;
+ OSFreeMem(pNeighbour);
+ }
+
+ /* try to coalesce with right neighbour */
+ pNeighbour = pBT->pNextSegment;
+ if ((!pBT->is_rightmost) && (pNeighbour->type == btt_free))
+ {
+ /* sanity check */
+ PVR_ASSERT(pBT->base + pBT->uSize == pNeighbour->base);
+
+ _FreeListRemove (pArena, pNeighbour);
+ _SegmentListRemove (pArena, pNeighbour);
+ pBT->uSize += pNeighbour->uSize;
+ pBT->is_rightmost = pNeighbour->is_rightmost;
+ OSFreeMem(pNeighbour);
+ }
+
+ if (_RemoveResourceSpan(pArena, pBT) == IMG_FALSE)
+ {
+ _FreeListInsert (pArena, pBT);
+ PVR_ASSERT( (!pBT->is_rightmost) || (!pBT->is_leftmost) || (!pBT->free_import) );
+ }
+
+ PVR_ASSERT(is_arena_valid(pArena));
+}
+
+
+/*
+ This function returns the first element in a bucket that can be split
+ in a way that one of the subsegment can meet the size and alignment
+ criteria.
+
+ The first_elt is the bucket to look into. Remember that a bucket is
+ implemented as a pointer to the first element of the linked list.
+
+ nb_max_try is used to limit the number of elements considered.
+ This is used to only consider the first nb_max_try elements in the
+ free-list. The special value ~0 is used to say unlimited i.e. consider
+ all elements in the free list
+ */
+static INLINE
+struct _BT_ * find_chunk_in_bucket(struct _BT_ * first_elt,
+ RA_LENGTH_T uSize,
+ RA_LENGTH_T uAlignment,
+ unsigned int nb_max_try)
+{
+ struct _BT_ * walker;
+
+ for (walker = first_elt; (walker != NULL) && (nb_max_try != 0); walker = walker->next_free)
+ {
+ const RA_BASE_T aligned_base = (uAlignment > 1) ?
+ (walker->base + uAlignment - 1) & ~(uAlignment - 1)
+ : walker->base;
+
+ if (walker->base + walker->uSize >= aligned_base + uSize)
+ {
+ return walker;
+ }
+
+ /* 0xFFFF...FFFF is used has nb_max_try = infinity. */
+ if (nb_max_try != (unsigned int) ~0)
+ {
+ nb_max_try--;
+ }
+ }
+
+ return NULL;
+}
+
+
+/*************************************************************************/ /*!
+@Function _AttemptAllocAligned
+@Description Attempt an allocation from an arena.
+@Input pArena The arena.
+@Input uSize The requested allocation size.
+@Output phPriv The user references associated with
+ the imported segment. (optional)
+@Input flags Allocation flags
+@Input uAlignment Required uAlignment, or 0.
+ Must be a power of 2 if not 0
+@Output base Allocated resource base (non optional, must not be NULL)
+@Return IMG_FALSE failure
+ IMG_TRUE success
+*/ /**************************************************************************/
+static IMG_BOOL
+_AttemptAllocAligned (RA_ARENA *pArena,
+ RA_LENGTH_T uSize,
+ IMG_UINT32 uFlags,
+ RA_LENGTH_T uAlignment,
+ RA_BASE_T *base,
+ RA_PERISPAN_HANDLE *phPriv) /* this is the "per-import" private data */
+{
+
+ IMG_UINT32 index_low;
+ IMG_UINT32 index_high;
+ IMG_UINT32 i;
+ struct _BT_ * pBT = NULL;
+ RA_BASE_T aligned_base;
+
+ PVR_ASSERT (pArena!=NULL);
+ PVR_ASSERT (base != NULL);
+
+ pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets);
+ if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->ui32Flags != uFlags))
+ {
+ /* no chunks with these flags. */
+ return IMG_FALSE;
+ }
+
+ index_low = pvr_log2(uSize);
+ index_high = pvr_log2(uSize + uAlignment - 1);
+
+ PVR_ASSERT(index_low < FREE_TABLE_LIMIT);
+ PVR_ASSERT(index_high < FREE_TABLE_LIMIT);
+ PVR_ASSERT(index_low <= index_high);
+
+#if defined(PVR_CTZLL)
+ i = PVR_CTZLL((IMG_ELTS_MAPPINGS) (~((1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping);
+#else
+ for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); ++i)
+ {
+ }
+#endif
+ PVR_ASSERT(i <= FREE_TABLE_LIMIT);
+
+ if (i != FREE_TABLE_LIMIT)
+ {
+ /* since we start at index_high + 1, we are guarantee to exit */
+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1);
+ }
+ else
+ {
+ for (i = index_high; (i != index_low - 1) && (pBT == NULL); --i)
+ {
+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0);
+ }
+ }
+
+ if (pBT == NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ aligned_base = (uAlignment > 1) ? (pBT->base + uAlignment - 1) & ~(uAlignment - 1) : pBT->base;
+
+ _FreeListRemove (pArena, pBT);
+
+ if(pArena->bNoSplit)
+ {
+ goto nosplit;
+ }
+
+ /* with uAlignment we might need to discard the front of this segment */
+ if (aligned_base > pBT->base)
+ {
+ BT *pNeighbour;
+ pNeighbour = _SegmentSplit (pBT, (RA_LENGTH_T)(aligned_base - pBT->base));
+ /* partition the buffer, create a new boundary tag */
+ if (pNeighbour == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Front split failed", __FUNCTION__));
+ /* Put pBT back in the list */
+ _FreeListInsert (pArena, pBT);
+ return IMG_FALSE;
+ }
+
+ _FreeListInsert(pArena, pBT);
+ pBT = pNeighbour;
+ }
+
+ /* the segment might be too big, if so, discard the back of the segment */
+ if (pBT->uSize > uSize)
+ {
+ BT *pNeighbour;
+ pNeighbour = _SegmentSplit(pBT, uSize);
+ /* partition the buffer, create a new boundary tag */
+ if (pNeighbour == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Back split failed", __FUNCTION__));
+ /* Put pBT back in the list */
+ _FreeListInsert (pArena, pBT);
+ return IMG_FALSE;
+ }
+
+ _FreeListInsert (pArena, pNeighbour);
+ }
+nosplit:
+ pBT->type = btt_live;
+
+ if (!HASH_Insert_Extended (pArena->pSegmentHash, &pBT->base, (uintptr_t)pBT))
+ {
+ _FreeBT (pArena, pBT);
+ return IMG_FALSE;
+ }
+
+ if (phPriv != NULL)
+ *phPriv = pBT->hPriv;
+
+ *base = pBT->base;
+
+ return IMG_TRUE;
+}
+
+
+
+/*************************************************************************/ /*!
+@Function RA_Create
+@Description To create a resource arena.
+@Input name The name of the arena for diagnostic purposes.
+@Input base The base of an initial resource span or 0.
+@Input uSize The size of an initial resource span or 0.
+@Input uFlags The flags of an initial resource span or 0.
+@Input ulog2Quantum The arena allocation quantum.
+@Input imp_alloc A resource allocation callback or 0.
+@Input imp_free A resource de-allocation callback or 0.
+@Input pImportHandle Handle passed to alloc and free or 0.
+@Input bNoSplit Disable splitting up imports.
+@Return arena handle, or NULL.
+*/ /**************************************************************************/
+IMG_INTERNAL RA_ARENA *
+RA_Create (IMG_CHAR *name,
+ RA_LOG2QUANTUM_T uLog2Quantum,
+ IMG_UINT32 ui32LockClass,
+ PVRSRV_ERROR (*imp_alloc)(RA_PERARENA_HANDLE h,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T _flags,
+ const IMG_CHAR *pszAnnotation,
+ /* returned data */
+ RA_BASE_T *pBase,
+ RA_LENGTH_T *pActualSize,
+ RA_PERISPAN_HANDLE *phPriv),
+ void (*imp_free) (RA_PERARENA_HANDLE,
+ RA_BASE_T,
+ RA_PERISPAN_HANDLE),
+ RA_PERARENA_HANDLE arena_handle,
+ IMG_BOOL bNoSplit)
+{
+ RA_ARENA *pArena;
+ PVRSRV_ERROR eError;
+
+ if (name == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "RA_Create: invalid parameter 'name' (NULL not accepted)"));
+ return NULL;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "RA_Create: name='%s'", name));
+
+ pArena = OSAllocMem(sizeof (*pArena));
+ if (pArena == NULL)
+ {
+ goto arena_fail;
+ }
+
+ eError = OSLockCreate(&pArena->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto lock_fail;
+ }
+
+ pArena->pSegmentHash = HASH_Create_Extended(MINIMUM_HASH_SIZE, sizeof(RA_BASE_T), HASH_Func_Default, HASH_Key_Comp_Default);
+
+ if (pArena->pSegmentHash==NULL)
+ {
+ goto hash_fail;
+ }
+
+ pArena->name = name;
+ pArena->pImportAlloc = (imp_alloc!=NULL) ? imp_alloc : &_RequestAllocFail;
+ pArena->pImportFree = imp_free;
+ pArena->pImportHandle = arena_handle;
+ pArena->pHeadSegment = NULL;
+ pArena->uQuantum = (IMG_UINT64) (1 << uLog2Quantum);
+ pArena->per_flags_buckets = NULL;
+ pArena->ui32LockClass = ui32LockClass;
+ pArena->bNoSplit = bNoSplit;
+
+ PVR_ASSERT(is_arena_valid(pArena));
+ return pArena;
+
+hash_fail:
+ OSLockDestroy(pArena->hLock);
+lock_fail:
+ OSFreeMem(pArena);
+ /*not nulling pointer, out of scope*/
+arena_fail:
+ return NULL;
+}
+
+/*************************************************************************/ /*!
+@Function RA_Delete
+@Description To delete a resource arena. All resources allocated from
+ the arena must be freed before deleting the arena.
+@Input pArena The arena to delete.
+*/ /**************************************************************************/
+IMG_INTERNAL void
+RA_Delete (RA_ARENA *pArena)
+{
+ IMG_UINT32 uIndex;
+ IMG_BOOL bWarn = IMG_TRUE;
+
+ PVR_ASSERT(pArena != NULL);
+
+ if (pArena == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: invalid parameter - pArena"));
+ return;
+ }
+
+ PVR_ASSERT(is_arena_valid(pArena));
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Delete: name='%s'", pArena->name));
+
+ while (pArena->pHeadSegment != NULL)
+ {
+ BT *pBT = pArena->pHeadSegment;
+
+ if (pBT->type != btt_free)
+ {
+ if (bWarn)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed", __func__));
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__));
+ PVR_DPF ((PVR_DBG_ERROR, "%s: base = 0x%llx size=0x%llx", __func__,
+ (unsigned long long)pBT->base, (unsigned long long)pBT->uSize));
+ PVR_DPF ((PVR_DBG_ERROR, "%s: This warning will be issued only once for the first allocation found!", __func__));
+ bWarn = IMG_FALSE;
+ }
+ }
+ else
+ {
+ _FreeListRemove(pArena, pBT);
+ }
+
+ _SegmentListRemove (pArena, pBT);
+ OSFreeMem(pBT);
+ /*not nulling original pointer, it has changed*/
+ }
+
+ while (pArena->per_flags_buckets != NULL)
+ {
+ for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
+ {
+ PVR_ASSERT(pArena->per_flags_buckets->buckets[uIndex] == NULL);
+ }
+
+ pArena->per_flags_buckets = PVRSRVDelete(pArena->per_flags_buckets->ui32Flags, pArena->per_flags_buckets);
+ }
+
+ HASH_Delete (pArena->pSegmentHash);
+ OSLockDestroy(pArena->hLock);
+ OSFreeMem(pArena);
+ /*not nulling pointer, copy on stack*/
+}
+
+/*************************************************************************/ /*!
+@Function RA_Add
+@Description To add a resource span to an arena. The span must not
+ overlapp with any span previously added to the arena.
+@Input pArena The arena to add a span into.
+@Input base The base of the span.
+@Input uSize The extent of the span.
+@Input uFlags the flags of the new import
+@Input hPriv a private handle associate to the span. (reserved for user)
+@Return IMG_TRUE - Success
+ IMG_FALSE - failure
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+RA_Add (RA_ARENA *pArena,
+ RA_BASE_T base,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags,
+ RA_PERISPAN_HANDLE hPriv)
+{
+ struct _BT_* bt;
+ PVR_ASSERT (pArena != NULL);
+ PVR_ASSERT (uSize != 0);
+
+ if (pArena == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Add: invalid parameter - pArena"));
+ return IMG_FALSE;
+ }
+
+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+ PVR_ASSERT(is_arena_valid(pArena));
+ PVR_DPF ((PVR_DBG_MESSAGE, "RA_Add: name='%s', "
+ "base=0x%llx, size=0x%llx", pArena->name,
+ (unsigned long long)base, (unsigned long long)uSize));
+
+ uSize = (uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+ bt = _InsertResource(pArena, base, uSize, uFlags);
+ if (bt != NULL)
+ {
+ bt->hPriv = hPriv;
+ }
+
+ PVR_ASSERT(is_arena_valid(pArena));
+ OSLockRelease(pArena->hLock);
+
+ return bt != NULL;
+}
+
+/*************************************************************************/ /*!
+@Function RA_Alloc
+@Description To allocate resource from an arena.
+@Input pArena The arena
+@Input uRequestSize The size of resource segment requested.
+@Input uImportMultiplier Import x-times more for future requests if
+ we have to import new memory.
+@Output pActualSize The actual size of resource segment
+ allocated, typcially rounded up by quantum.
+@Output phPriv The user reference associated with allocated resource span.
+@Input uImportFlags Flags influencing allocation policy.
+@Input uAlignment The uAlignment constraint required for the
+ allocated segment, use 0 if uAlignment not required, otherwise
+ must be a power of 2.
+@Output base Allocated base resource
+@Return PVRSRV_OK - success
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RA_Alloc (RA_ARENA *pArena,
+ RA_LENGTH_T uRequestSize,
+ IMG_UINT8 uImportMultiplier,
+ RA_FLAGS_T uImportFlags,
+ RA_LENGTH_T uAlignment,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *base,
+ RA_LENGTH_T *pActualSize,
+ RA_PERISPAN_HANDLE *phPriv)
+{
+ PVRSRV_ERROR eError;
+ IMG_BOOL bResult;
+ RA_LENGTH_T uSize = uRequestSize;
+ RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK);
+
+ if (pArena == NULL || uImportMultiplier == 0 || uSize == 0)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,
+ "RA_Alloc: One of the necessary parameters is 0"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+ PVR_ASSERT(is_arena_valid(pArena));
+
+ if (pActualSize != NULL)
+ {
+ *pActualSize = uSize;
+ }
+
+ /* Must be a power of 2 or 0 */
+ PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0);
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Alloc: arena='%s', size=0x%llx(0x%llx), "
+ "alignment=0x%llx", pArena->name,
+ (unsigned long long)uSize, (unsigned long long)uRequestSize,
+ (unsigned long long)uAlignment));
+
+ /* if allocation failed then we might have an import source which
+ can provide more resource, else we will have to fail the
+ allocation to the caller. */
+ bResult = _AttemptAllocAligned (pArena, uSize, uFlags, uAlignment, base, phPriv);
+ if (!bResult)
+ {
+ IMG_HANDLE hPriv;
+ RA_BASE_T import_base;
+ RA_LENGTH_T uImportSize = uSize;
+
+ /*
+ Ensure that we allocate sufficient space to meet the uAlignment
+ constraint
+ */
+ if (uAlignment > pArena->uQuantum)
+ {
+ uImportSize += (uAlignment - pArena->uQuantum);
+ }
+
+ /* apply over-allocation multiplier after all alignment adjustments */
+ uImportSize *= uImportMultiplier;
+
+ /* ensure that we import according to the quanta of this arena */
+ uImportSize = (uImportSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+
+ eError = pArena->pImportAlloc (pArena->pImportHandle,
+ uImportSize, uImportFlags,
+ pszAnnotation,
+ &import_base, &uImportSize,
+ &hPriv);
+ if (PVRSRV_OK != eError)
+ {
+ OSLockRelease(pArena->hLock);
+ return eError;
+ }
+ else
+ {
+ BT *pBT;
+ pBT = _InsertResourceSpan (pArena, import_base, uImportSize, uFlags);
+ /* successfully import more resource, create a span to
+ represent it and retry the allocation attempt */
+ if (pBT == NULL)
+ {
+ /* insufficient resources to insert the newly acquired span,
+ so free it back again */
+ pArena->pImportFree(pArena->pImportHandle, import_base, hPriv);
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "RA_Alloc: name='%s', "
+ "size=0x%llx failed!", pArena->name,
+ (unsigned long long)uSize));
+ /* RA_Dump (arena); */
+
+ OSLockRelease(pArena->hLock);
+ return PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED;
+ }
+
+ pBT->hPriv = hPriv;
+
+ bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv);
+ if (!bResult)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,
+ "RA_Alloc: name='%s' second alloc failed!",
+ pArena->name));
+
+ /*
+ On failure of _AttemptAllocAligned() depending on the exact point
+ of failure, the imported segment may have been used and freed, or
+ left untouched. If the later, we need to return it.
+ */
+ _FreeBT(pArena, pBT);
+
+ OSLockRelease(pArena->hLock);
+ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED;
+ }
+ else
+ {
+ /* Check if the new allocation was in the span we just added... */
+ if (*base < import_base || *base > (import_base + uImportSize))
+ {
+ PVR_DPF ((PVR_DBG_ERROR,
+ "RA_Alloc: name='%s' alloc did not occur in the imported span!",
+ pArena->name));
+
+ /*
+ Remove the imported span which should not be in use (if it is then
+ that is okay, but essentially no span should exist that is not used).
+ */
+ _FreeBT(pArena, pBT);
+ }
+ }
+ }
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "RA_Alloc: name='%s', size=0x%llx, "
+ "*base=0x%llx = %d",pArena->name, (unsigned long long)uSize,
+ (unsigned long long)*base, bResult));
+
+ PVR_ASSERT(is_arena_valid(pArena));
+
+ OSLockRelease(pArena->hLock);
+ return PVRSRV_OK;
+}
+
+
+
+
+/*************************************************************************/ /*!
+@Function RA_Free
+@Description To free a resource segment.
+@Input pArena The arena the segment was originally allocated from.
+@Input base The base of the resource span to free.
+*/ /**************************************************************************/
+IMG_INTERNAL void
+RA_Free (RA_ARENA *pArena, RA_BASE_T base)
+{
+ BT *pBT;
+
+ PVR_ASSERT (pArena != NULL);
+
+ if (pArena == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Free: invalid parameter - pArena"));
+ return;
+ }
+
+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+ PVR_ASSERT(is_arena_valid(pArena));
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "RA_Free: name='%s', base=0x%llx", pArena->name,
+ (unsigned long long)base));
+
+ pBT = (BT *) HASH_Remove_Extended (pArena->pSegmentHash, &base);
+ PVR_ASSERT (pBT != NULL);
+
+ if (pBT)
+ {
+ PVR_ASSERT (pBT->base == base);
+ _FreeBT (pArena, pBT);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RA_Free: no resource span found for given base (0x%llX) in arena %s",
+ (unsigned long long) base,
+ pArena->name));
+ }
+
+ PVR_ASSERT(is_arena_valid(pArena));
+ OSLockRelease(pArena->hLock);
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Resource Allocator API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RA_H_
+#define _RA_H_
+
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/** Resource arena.
+ * struct _RA_ARENA_ deliberately opaque
+ */
+typedef struct _RA_ARENA_ RA_ARENA; //PRQA S 3313
+
+/*
+ * Per-Arena handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data. It is given it upon
+ * RA_Create, and promises to pass it to calls to the ImportAlloc and
+ * ImportFree callbacks
+ */
+typedef IMG_HANDLE RA_PERARENA_HANDLE;
+/*
+ * Per-Import handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data. It is given it on a
+ * per-import basis, either the "initial" import at RA_Create time, or
+ * further imports via the ImportAlloc callback. It sends it back via
+ * the ImportFree callback, and also provides it in answer to any
+ * RA_Alloc request to signify from which "import" the allocation came
+ */
+typedef IMG_HANDLE RA_PERISPAN_HANDLE;
+
+typedef IMG_UINT64 RA_BASE_T;
+typedef IMG_UINT32 RA_LOG2QUANTUM_T;
+typedef IMG_UINT64 RA_LENGTH_T;
+
+/* Lock classes: describes the level of nesting between different arenas. */
+#define RA_LOCKCLASS_0 0
+#define RA_LOCKCLASS_1 1
+#define RA_LOCKCLASS_2 2
+
+#define RA_NO_IMPORT_MULTIPLIER 1
+
+/*
+ * Flags in an "import" must much the flags for an allocation
+ */
+typedef IMG_UINT32 RA_FLAGS_T;
+
+/**
+ * @Function RA_Create
+ *
+ * @Description
+ *
+ * To create a resource arena.
+ *
+ * @Input name - the name of the arena for diagnostic purposes.
+ * @Input uQuantum - the arena allocation quantum.
+ * @Input ui32LockClass - the lock class level this arena uses.
+ * @Input alloc - a resource allocation callback or 0.
+ * @Input free - a resource de-allocation callback or 0.
+ * @Input per_arena_handle - user private handle passed to alloc and free or 0.
+ * @Input bNoSplit - Disable splitting up imports.
+ * @Return pointer to arena, or NULL.
+ */
+RA_ARENA *
+RA_Create (IMG_CHAR *name,
+ /* subsequent imports: */
+ RA_LOG2QUANTUM_T uLog2Quantum,
+ IMG_UINT32 ui32LockClass,
+ PVRSRV_ERROR (*imp_alloc)(RA_PERARENA_HANDLE _h,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *pBase,
+ RA_LENGTH_T *pActualSize,
+ RA_PERISPAN_HANDLE *phPriv),
+ void (*imp_free) (RA_PERARENA_HANDLE,
+ RA_BASE_T,
+ RA_PERISPAN_HANDLE),
+ RA_PERARENA_HANDLE per_arena_handle,
+ IMG_BOOL bNoSplit);
+
+/**
+ * @Function RA_Delete
+ *
+ * @Description
+ *
+ * To delete a resource arena. All resources allocated from the arena
+ * must be freed before deleting the arena.
+ *
+ * @Input pArena - the arena to delete.
+ * @Return None
+ */
+void
+RA_Delete (RA_ARENA *pArena);
+
+/**
+ * @Function RA_Add
+ *
+ * @Description
+ *
+ * To add a resource span to an arena. The span must not overlap with
+ * any span previously added to the arena.
+ *
+ * @Input pArena - the arena to add a span into.
+ * @Input base - the base of the span.
+ * @Input uSize - the extent of the span.
+ * @Input hPriv - handle associated to the span (reserved to user uses)
+ * @Return IMG_TRUE - success, IMG_FALSE - failure
+ */
+IMG_BOOL
+RA_Add (RA_ARENA *pArena,
+ RA_BASE_T base,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags,
+ RA_PERISPAN_HANDLE hPriv);
+
+/**
+ * @Function RA_Alloc
+ *
+ * @Description
+ *
+ * To allocate resource from an arena.
+ *
+ * @Input pArena - the arena
+ * @Input uRequestSize - the size of resource segment requested.
+ * @Input uImportMultiplier - Import x-times of the uRequestSize
+ * for future RA_Alloc calls.
+ * Use RA_NO_IMPORT_MULTIPLIER to import the exact size.
+ * @Output pActualSize - the actual_size of resource segment allocated,
+ * typcially rounded up by quantum.
+ * @Input uImportFlags - flags influencing allocation policy.
+ * @Input uAlignment - the alignment constraint required for the
+ * allocated segment, use 0 if alignment not required.
+ * @Input pszAnnotation - a string to describe the allocation
+ * @Output pBase - allocated base resource
+ * @Output phPriv - the user reference associated with allocated
+ * resource span.
+ * @Return PVRSRV_OK - success
+ */
+PVRSRV_ERROR
+RA_Alloc (RA_ARENA *pArena,
+ RA_LENGTH_T uSize,
+ IMG_UINT8 uImportMultiplier,
+ RA_FLAGS_T uFlags,
+ RA_LENGTH_T uAlignment,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *pBase,
+ RA_LENGTH_T *pActualSize,
+ RA_PERISPAN_HANDLE *phPriv);
+
+/**
+ * @Function RA_Free
+ *
+ * @Description To free a resource segment.
+ *
+ * @Input pArena - the arena the segment was originally allocated from.
+ * @Input base - the base of the resource span to free.
+ * @Input bFreeBackingStore - Should backing store memory be freed?
+ *
+ * @Return None
+ */
+void
+RA_Free (RA_ARENA *pArena, RA_BASE_T base);
+
+#endif
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX Bridge Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the rgx Bridge code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGX_BRIDGE_H__)
+#define __RGX_BRIDGE_H__
+
+#include "pvr_bridge.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "rgx_fwif.h"
+
+#define RGXFWINITPARAMS_VERSION 1
+#define RGXFWINITPARAMS_EXTENSION 128
+
+/* Parameters for RGXFirmwareInit */
+typedef struct __RGX_FW_INIT_IN_PARAMS__
+{
+ IMG_UINT32 ui32Version;
+ IMG_BOOL bEnableSignatureChecks;
+ IMG_UINT32 ui32SignatureChecksBufSize;
+ IMG_UINT32 ui32HWPerfFWBufSizeKB;
+ IMG_UINT64 ui64HWPerfFilter;
+ IMG_UINT32 ui32ConfigFlags;
+ IMG_UINT32 ui32LogType;
+ IMG_UINT32 ui32FilterFlags;
+ IMG_UINT32 ui32JonesDisableMask;
+ IMG_UINT32 ui32HWRDebugDumpLimit;
+ RGXFWIF_COMPCHECKS_BVNC sClientBVNC;
+ RGXFWIF_COMPCHECKS_BVNC sFirmwareBVNC;
+ IMG_UINT32 ui32HWPerfCountersDataSize;
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf;
+ FW_PERF_CONF eFirmwarePerf;
+ /* Available for future extensions */
+ IMG_BYTE abUnused[RGXFWINITPARAMS_EXTENSION];
+} RGX_FW_INIT_IN_PARAMS;
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+#include "common_rgxinit_bridge.h"
+#endif
+#include "common_rgxta3d_bridge.h"
+#include "common_rgxcmp_bridge.h"
+
+#include "common_rgxtq2_bridge.h"
+#include "common_rgxtq_bridge.h"
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+#include "common_breakpoint_bridge.h"
+#endif
+#include "common_debugmisc_bridge.h"
+#if defined(PDUMP)
+#include "common_rgxpdump_bridge.h"
+#endif
+#include "common_rgxhwperf_bridge.h"
+#include "common_rgxray_bridge.h"
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+#include "common_regconfig_bridge.h"
+#endif
+#include "common_timerquery_bridge.h"
+#include "common_rgxkicksync_bridge.h"
+
+#include "common_rgxsignals_bridge.h"
+
+
+/*
+ * Bridge Cmd Ids
+ */
+
+/* *REMEMBER* to update PVRSRV_BRIDGE_RGX_LAST if you add/remove a bridge
+ * group!
+ * Also you need to ensure all PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST
+ * offsets follow on from the previous bridge group's commands!
+ *
+ * If a bridge group is optional, ensure you *ALWAYS* define its index
+ * (e.g. PVRSRV_BRIDGE_RGXCMP is always 151, even is the feature is
+ * not defined). If an optional bridge group is not defined you must
+ * still define PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST for it with an
+ * assigned value of 0.
+ */
+
+/* The RGX bridge groups start at 128 (PVRSRV_BRIDGE_RGX_FIRST) rather than follow-on from the other
+ * non-device bridge groups (meaning that they then won't be displaced if
+ * other non-device bridge groups are added)
+ */
+
+#define PVRSRV_BRIDGE_RGX_FIRST 128UL
+
+/* 128: RGX TQ interface functions */
+#define PVRSRV_BRIDGE_RGXTQ 128UL
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST (PVRSRV_BRIDGE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ_CMD_LAST)
+
+
+/* 129: RGX Compute interface functions */
+#define PVRSRV_BRIDGE_RGXCMP 129UL
+# define PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST + 1)
+# define PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXCMP_CMD_LAST)
+
+
+/* 130: RGX Initialisation interface functions */
+#define PVRSRV_BRIDGE_RGXINIT 130UL
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+# define PVRSRV_BRIDGE_RGXINIT_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST +1)
+# define PVRSRV_BRIDGE_RGXINIT_DISPATCH_LAST (PVRSRV_BRIDGE_RGXINIT_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXINIT_CMD_LAST)
+#else
+# define PVRSRV_BRIDGE_RGXINIT_DISPATCH_FIRST 0
+# define PVRSRV_BRIDGE_RGXINIT_DISPATCH_LAST (PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST)
+#endif
+
+/* 131: RGX TA/3D interface functions */
+#define PVRSRV_BRIDGE_RGXTA3D 131UL
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXINIT_DISPATCH_LAST +1)
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTA3D_CMD_LAST)
+
+/* 132: RGX Breakpoint interface functions */
+#define PVRSRV_BRIDGE_BREAKPOINT 132UL
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST (PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST + PVRSRV_BRIDGE_BREAKPOINT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST)
+#endif
+
+/* 133: RGX Debug/Misc interface functions */
+#define PVRSRV_BRIDGE_DEBUGMISC 133UL
+#define PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST (PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST (PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST + PVRSRV_BRIDGE_DEBUGMISC_CMD_LAST)
+
+/* 134: RGX PDump interface functions */
+#define PVRSRV_BRIDGE_RGXPDUMP 134UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST +1)
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST)
+#endif
+
+/* 135: RGX HWPerf interface functions */
+#define PVRSRV_BRIDGE_RGXHWPERF 135UL
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST)
+
+/* 136: RGX Ray Tracing interface functions */
+#define PVRSRV_BRIDGE_RGXRAY 136UL
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXRAY_CMD_LAST)
+
+/* 137: RGX Register Configuration interface functions */
+#define PVRSRV_BRIDGE_REGCONFIG 137UL
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST (PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST + PVRSRV_BRIDGE_REGCONFIG_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST)
+#endif
+
+/* 138: RGX Timer Query interface functions */
+#define PVRSRV_BRIDGE_TIMERQUERY 138UL
+#define PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST (PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST (PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST + PVRSRV_BRIDGE_TIMERQUERY_CMD_LAST)
+
+/* 139: RGX kicksync interface */
+#define PVRSRV_BRIDGE_RGXKICKSYNC 139UL
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST)
+
+/* 140: RGX signals interface */
+#define PVRSRV_BRIDGE_RGXSIGNALS 140UL
+#define PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST (PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST)
+
+
+#define PVRSRV_BRIDGE_RGXTQ2 141UL
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ2_CMD_LAST)
+
+#define PVRSRV_BRIDGE_RGX_LAST (PVRSRV_BRIDGE_RGXTQ2)
+#define PVRSRV_BRIDGE_RGX_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST)
+
+/* bit mask representing the enabled RGX bridges */
+
+static const IMG_UINT32 gui32RGXBridges =
+ (1U << (PVRSRV_BRIDGE_RGXTQ - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_COMPUTE)
+ | (1U << (PVRSRV_BRIDGE_RGXCMP - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ | (1U << (PVRSRV_BRIDGE_RGXINIT - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_RGXTA3D - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(SUPPORT_BREAKPOINT)
+ | (1U << (PVRSRV_BRIDGE_BREAKPOINT - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(SUPPORT_DEBUGMISC)
+ | (1U << (PVRSRV_BRIDGE_DEBUGMISC - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(PDUMP)
+ | (1U << (PVRSRV_BRIDGE_RGXPDUMP - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_RGXHWPERF - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_RAY_TRACING)
+ | (1U << (PVRSRV_BRIDGE_RGXRAY - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(SUPPORT_REGCONFIG)
+ | (1U << (PVRSRV_BRIDGE_REGCONFIG - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(SUPPORT_TIMERQUERY)
+ | (1U << (PVRSRV_BRIDGE_TIMERQUERY - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_RGXKICKSYNC - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_SIGNAL_SNOOPING)
+ | (1U << (PVRSRV_BRIDGE_RGXSIGNALS - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_RGXTQ2 - PVRSRV_BRIDGE_RGX_FIRST));
+
+/* bit field representing which RGX bridge groups may optionally not
+ * be present in the server
+ */
+
+#define RGX_BRIDGES_OPTIONAL \
+ ( \
+ 0 /* no RGX bridges are currently optional */ \
+ )
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __RGX_BRIDGE_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX Common Types and Defines Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common types and definitions for RGX software
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_COMMON_H_
+#define RGX_COMMON_H_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+/* Included to get the BVNC_KM_N defined and other feature defs */
+#include "km/rgxdefs_km.h"
+
+/*! This macro represents a mask of LSBs that must be zero on data structure
+ * sizes and offsets to ensure they are 8-byte granular on types shared between
+ * the FW and host driver */
+#define RGX_FW_ALIGNMENT_LSB (7)
+
+/*! Macro to test structure size alignment */
+#define RGX_FW_STRUCT_SIZE_ASSERT(_a) \
+ static_assert((sizeof(_a) & RGX_FW_ALIGNMENT_LSB) == 0, \
+ "Size of " #_a " is not properly aligned")
+
+/*! Macro to test structure member alignment */
+#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b) \
+ static_assert((offsetof(_a, _b) & RGX_FW_ALIGNMENT_LSB) == 0, \
+ "Offset of " #_a "." #_b " is not properly aligned")
+
+
+/* The following enum assumes only one of RGX_FEATURE_TLA or RGX_FEATURE_FASTRENDER_DM feature
+ * is present. In case this is no more true, fail build to fix code */
+#if defined (RGX_FEATURE_TLA) && defined (RGX_FEATURE_FASTRENDER_DM)
+#error "Both RGX_FEATURE_TLA and RGX_FEATURE_FASTRENDER_DM defined. Fix code to handle this!"
+#endif
+
+/*! The master definition for data masters known to the firmware of RGX.
+ * When a new DM is added to this enum, relevant entry should be added to
+ * RGX_HWPERF_DM enum list.
+ * The DM in a V1 HWPerf packet uses this definition. */
+typedef enum _RGXFWIF_DM_
+{
+ RGXFWIF_DM_GP = 0,
+
+ /* Either TDM or 2D DM is present. The above build time error is present to verify this */
+ RGXFWIF_DM_2D = 1, /* when RGX_FEATURE_TLA defined */
+ RGXFWIF_DM_TDM = 1, /* when RGX_FEATURE_FASTRENDER_DM defined */
+
+ RGXFWIF_DM_TA = 2,
+ RGXFWIF_DM_3D = 3,
+ RGXFWIF_DM_CDM = 4,
+
+ /* present on Ray cores only */
+ RGXFWIF_DM_RTU = 5,
+ RGXFWIF_DM_SHG = 6,
+
+ RGXFWIF_DM_LAST,
+
+ RGXFWIF_DM_FORCE_I32 = 0x7fffffff /*!< Force enum to be at least 32-bits wide */
+} RGXFWIF_DM;
+
+typedef enum _RGX_KICK_TYPE_DM_
+{
+ RGX_KICK_TYPE_DM_GP = 1 << 0,
+ RGX_KICK_TYPE_DM_TDM_2D = 1 << 1,
+ RGX_KICK_TYPE_DM_TA = 1 << 2,
+ RGX_KICK_TYPE_DM_3D = 1 << 3,
+ RGX_KICK_TYPE_DM_CDM = 1 << 4,
+ RGX_KICK_TYPE_DM_RTU = 1 << 5,
+ RGX_KICK_TYPE_DM_SHG = 1 << 6,
+ RGX_KICK_TYPE_DM_TQ2D = 1 << 7,
+ RGX_KICK_TYPE_DM_TQ3D = 1 << 8,
+ RGX_KICK_TYPE_DM_LAST = 1 << 9
+} RGX_KICK_TYPE_DM;
+
+/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM, SHG, RTU */
+#define RGXFWIF_DM_DEFAULT_MAX (7)
+
+#if !defined(__KERNEL__)
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGXFWIF_DM_MAX_MTS 8
+#else
+#define RGXFWIF_DM_MAX_MTS 6
+#endif
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM, SHG, RTU */
+#define RGXFWIF_DM_MAX (7)
+#else
+/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM*/
+#define RGXFWIF_DM_MAX (5)
+#endif
+#define RGXFWIF_HWDM_MAX (RGXFWIF_DM_MAX)
+#else
+ #define RGXFWIF_DM_MIN_MTS_CNT (6)
+ #define RGXFWIF_RAY_TRACING_DM_MTS_CNT (2)
+ #define RGXFWIF_DM_MIN_CNT (5)
+ #define RGXFWIF_RAY_TRACING_DM_CNT (2)
+ #define RGXFWIF_DM_MAX (RGXFWIF_DM_MIN_CNT + RGXFWIF_RAY_TRACING_DM_CNT)
+#endif
+
+/* Min/Max number of HW DMs (all but GP) */
+#if defined(RGX_FEATURE_TLA)
+#define RGXFWIF_HWDM_MIN (1)
+#else
+#if defined(RGX_FEATURE_FASTRENDER_DM)
+#define RGXFWIF_HWDM_MIN (1)
+#else
+#define RGXFWIF_HWDM_MIN (2)
+#endif
+#endif
+
+/*!
+ ******************************************************************************
+ * RGXFW Compiler alignment definitions
+ *****************************************************************************/
+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+#define RGXFW_ALIGN __attribute__ ((aligned (8)))
+#elif defined(_MSC_VER)
+#define RGXFW_ALIGN __declspec(align(8))
+#pragma warning (disable : 4324)
+#else
+#error "Align MACROS need to be defined for this compiler"
+#endif
+
+/*!
+ ******************************************************************************
+ * Force 8-byte alignment for structures allocated uncached.
+ *****************************************************************************/
+#define UNCACHED_ALIGN RGXFW_ALIGN
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* RGX_COMMON_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File rgx_compact_bvnc.c
+@Title BVNC compatibility check utilities
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Utility functions used for packing BNC and V.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgx_compat_bvnc.h"
+#if defined(RGX_FIRMWARE)
+#include "rgxfw_utils.h"
+#elif !defined(RGX_BUILD_BINARY)
+#include "pvr_debug.h"
+#endif
+
+#if defined(RGX_FIRMWARE)
+#define PVR_COMPAT_ASSERT RGXFW_ASSERT
+#elif !defined(RGX_BUILD_BINARY)
+#define PVR_COMPAT_ASSERT PVR_ASSERT
+#else
+#include <assert.h>
+#define PVR_COMPAT_ASSERT assert
+#endif
+
+/**************************************************************************//**
+ * C library strlen function.
+ *****************************************************************************/
+static INLINE IMG_UINT32 OSStringLength(const IMG_CHAR* pszInput)
+{
+ const IMG_CHAR* pszTemp = pszInput;
+
+ while (*pszTemp)
+ pszTemp++;
+
+ return (pszTemp - pszInput);
+}
+
+/**************************************************************************//**
+ * Utility function for packing BNC
+ *****************************************************************************/
+static INLINE IMG_UINT64 rgx_bnc_pack(IMG_UINT32 ui32B, IMG_UINT32 ui32N,
+ IMG_UINT32 ui32C)
+{
+ /*
+ * Test for input B, N and C exceeding max bit width.
+ */
+ PVR_COMPAT_ASSERT((ui32B & (~(RGX_BVNC_PACK_MASK_B >> RGX_BVNC_PACK_SHIFT_B))) == 0);
+ PVR_COMPAT_ASSERT((ui32N & (~(RGX_BVNC_PACK_MASK_N >> RGX_BVNC_PACK_SHIFT_N))) == 0);
+ PVR_COMPAT_ASSERT((ui32C & (~(RGX_BVNC_PACK_MASK_C >> RGX_BVNC_PACK_SHIFT_C))) == 0);
+
+ return (((IMG_UINT64)ui32B << RGX_BVNC_PACK_SHIFT_B) |
+ ((IMG_UINT64)ui32N << RGX_BVNC_PACK_SHIFT_N) |
+ ((IMG_UINT64)ui32C << RGX_BVNC_PACK_SHIFT_C));
+}
+
+/**************************************************************************//**
+ * Utility function for packing BNC and V to be used by compatibility check.
+ * BNC is packed into 48 bit format.
+ * If the array pointed to by pszV is a string that is shorter than
+ * ui32OutVMaxLen characters, null characters are appended to the copy in the
+ * array pointed to by pszOutV, until 'ui32OutVMaxLen' characters in all have
+ * been written.
+ *
+ * @param: pui64OutBNC Output containing packed BNC.
+ * @param pszOutV Output containing version string.
+ * @param ui32OutVMaxLen Max characters that can be written to
+ pszOutV (excluding terminating null character)
+ * @param ui32B Input 'B' value
+ * @param pszV Input 'V' string
+ * @param ui32N Input 'N' value
+ * @param ui32C Input 'C' value
+ * @return None
+ *****************************************************************************/
+void rgx_bvnc_packed(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+ IMG_UINT32 ui32B, IMG_CHAR *pszV, IMG_UINT32 ui32N, IMG_UINT32 ui32C)
+{
+ *pui64OutBNC = rgx_bnc_pack(ui32B, ui32N, ui32C);
+
+ if (!pszOutV)
+ return;
+
+ if (pszV)
+ {
+ /*
+ * Assert can fail for two reasons
+ * 1. Caller is passing invalid 'V' string or
+ * 2. Dest buffer does not have enough memory allocated for max 'V' size.
+ */
+ PVR_COMPAT_ASSERT(OSStringLength(pszV) <= ui32OutVMaxLen);
+
+
+ for (; ui32OutVMaxLen > 0 && *pszV != '\0'; --ui32OutVMaxLen)
+ {
+ /* When copying the V, omit any characters as these would cause
+ * the compatibility check against the V read from HW to fail
+ */
+ if (*pszV && (*pszV >= '0') && (*pszV <='9'))
+ {
+ *pszOutV++ = *pszV++;
+ }
+ else
+ {
+ pszV++;
+ }
+ }
+ }
+
+ do
+ {
+ *pszOutV++ = '\0';
+ }while(ui32OutVMaxLen-- > 0);
+}
+
+/**************************************************************************//**
+ * Utility function for packing BNC and V to be used by compatibility check.
+ * Input B,N and C is packed into 48 bit format.
+ * Input V is converted into string. If number of characters required to
+ * represent 16 bit wide version number is less than ui32OutVMaxLen, than null
+ * characters are appended to pszOutV, until ui32OutVMaxLen characters in all
+ * have been written.
+ *
+ * @param: pui64OutBNC Output containing packed BNC.
+ * @param pszOutV Output containing version string.
+ * @param ui32OutVMaxLen Max characters that can be written to
+ pszOutV (excluding terminating null character)
+ * @param ui32B Input 'B' value (16 bit wide)
+ * @param ui32V Input 'V' value (16 bit wide)
+ * @param ui32N Input 'N' value (16 bit wide)
+ * @param ui32C Input 'C' value (16 bit wide)
+ * @return .None
+ *****************************************************************************/
+void rgx_bvnc_pack_hw(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+ IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C)
+{
+ /*
+ * Allocate space for max digits required to represent 16 bit wide version
+ * number (including NULL terminating character).
+ */
+ IMG_CHAR aszBuf[6];
+ IMG_CHAR *pszPointer = aszBuf;
+
+ *pui64OutBNC = rgx_bnc_pack(ui32B, ui32N, ui32C);
+
+ if (!pszOutV)
+ return;
+
+ /*
+ * Function only supports 16 bits wide version number.
+ */
+ PVR_COMPAT_ASSERT((ui32V & ~0xFFFF) == 0);
+
+ if (ui32V > 9999)
+ pszPointer+=5;
+ else if (ui32V > 999)
+ pszPointer+=4;
+ else if (ui32V > 99)
+ pszPointer+=3;
+ else if (ui32V > 9)
+ pszPointer+=2;
+ else
+ pszPointer+=1;
+
+ *pszPointer-- = '\0';
+ *pszPointer = '0';
+
+ while (ui32V > 0)
+ {
+ *pszPointer-- = (ui32V % 10) + '0';
+ ui32V /= 10;
+ }
+
+ for (pszPointer = aszBuf; ui32OutVMaxLen > 0 && *pszPointer != '\0'; --ui32OutVMaxLen)
+ *pszOutV++ = *pszPointer++;
+
+ /*
+ * Append NULL characters.
+ */
+ do
+ {
+ *pszOutV++ = '\0';
+ }while(ui32OutVMaxLen-- > 0);
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Functions for BVNC manipulating
+
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Utility functions used internally by device memory management
+ code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_COMPAT_BVNC_H__)
+#define __RGX_COMPAT_BVNC_H__
+
+#include "img_types.h"
+
+/* 64bit endian converting macros */
+#if defined(__BIG_ENDIAN__)
+#define RGX_INT64_TO_BE(N) (N)
+#define RGX_INT64_FROM_BE(N) (N)
+#define RGX_INT32_TO_BE(N) (N)
+#define RGX_INT32_FROM_BE(N) (N)
+#else
+#define RGX_INT64_TO_BE(N) \
+ ((((N) >> 56) & 0xff) \
+ | (((N) >> 40) & 0xff00) \
+ | (((N) >> 24) & 0xff0000) \
+ | (((N) >> 8) & 0xff000000) \
+ | ((N) << 56) \
+ | (((N) & 0xff00) << 40) \
+ | (((N) & 0xff0000) << 24) \
+ | (((N) & 0xff000000) << 8))
+#define RGX_INT64_FROM_BE(N) RGX_INT64_TO_BE(N)
+
+#define RGX_INT32_TO_BE(N) \
+ ((((N) >> 24) & 0xff) \
+ | (((N) >> 8) & 0xff00) \
+ | ((N) << 24) \
+ | (((N & 0xff00) << 8)))
+#define RGX_INT32_FROM_BE(N) RGX_INT32_TO_BE(N)
+#endif
+
+/******************************************************************************
+ * RGX Version packed into 24-bit (BNC) and string (V) to be used by Compatibility Check
+ *****************************************************************************/
+
+#define RGX_BVNC_PACK_SHIFT_B 32
+#define RGX_BVNC_PACK_SHIFT_N 16
+#define RGX_BVNC_PACK_SHIFT_C 0
+
+#define RGX_BVNC_PACK_MASK_B (IMG_UINT64_C(0x0000FFFF00000000))
+#define RGX_BVNC_PACK_MASK_N (IMG_UINT64_C(0x00000000FFFF0000))
+#define RGX_BVNC_PACK_MASK_C (IMG_UINT64_C(0x000000000000FFFF))
+
+#define RGX_BVNC_PACKED_EXTR_B(BVNC) ((IMG_UINT32)(((BVNC).ui64BNC & RGX_BVNC_PACK_MASK_B) >> RGX_BVNC_PACK_SHIFT_B))
+#define RGX_BVNC_PACKED_EXTR_V(BVNC) ((BVNC).aszV)
+#define RGX_BVNC_PACKED_EXTR_N(BVNC) ((IMG_UINT32)(((BVNC).ui64BNC & RGX_BVNC_PACK_MASK_N) >> RGX_BVNC_PACK_SHIFT_N))
+#define RGX_BVNC_PACKED_EXTR_C(BVNC) ((IMG_UINT32)(((BVNC).ui64BNC & RGX_BVNC_PACK_MASK_C) >> RGX_BVNC_PACK_SHIFT_C))
+
+#if !defined(RGX_SKIP_BVNC_CHECK)
+#define RGX_BVNC_EQUAL(L,R,all,version,lenmax,bnc,v) do { \
+ (lenmax) = IMG_FALSE; \
+ (bnc) = IMG_FALSE; \
+ (v) = IMG_FALSE; \
+ (version) = ((L).ui32LayoutVersion == (R).ui32LayoutVersion); \
+ if (version) \
+ { \
+ (lenmax) = ((L).ui32VLenMax == (R).ui32VLenMax); \
+ } \
+ if (lenmax) \
+ { \
+ (bnc) = ((L).ui64BNC == (R).ui64BNC); \
+ } \
+ if (bnc) \
+ { \
+ (L).aszV[(L).ui32VLenMax] = '\0'; \
+ (R).aszV[(R).ui32VLenMax] = '\0'; \
+ (v) = (OSStringCompare((L).aszV, (R).aszV)==0); \
+ } \
+ (all) = (version) && (lenmax) && (bnc) && (v); \
+ } while (0)
+#else
+#define RGX_BVNC_EQUAL(L,R,all,version,lenmax,bnc,v) \
+ (all) = IMG_TRUE; \
+ (version) = IMG_TRUE; \
+ (lenmax) = IMG_TRUE; \
+ (bnc) = IMG_TRUE; \
+ (v) = IMG_TRUE; \
+
+#endif
+
+void rgx_bvnc_packed(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+ IMG_UINT32 ui32B, IMG_CHAR *pszV, IMG_UINT32 ui32N, IMG_UINT32 ui32C);
+void rgx_bvnc_pack_hw(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+ IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C);
+
+#endif /* __RGX_COMPAT_BVNC_H__ */
+
+/******************************************************************************
+ End of file (rgx_compat_bvnc.h)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File rgx_firmware_processor.h
+@Title
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform RGX
+@Description Generic include file for firmware processors (META and MIPS)
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#if !defined(RGX_FIRMWARE_PROCESSOR_H)
+#define RGX_FIRMWARE_PROCESSOR_H
+
+#include "km/rgxdefs_km.h"
+
+#include "rgx_meta.h"
+#include "rgx_mips.h"
+
+/* Processor independent need to be defined here common for all processors */
+typedef enum
+{
+ FW_PERF_CONF_NONE = 0,
+ FW_PERF_CONF_ICACHE = 1,
+ FW_PERF_CONF_DCACHE = 2,
+ FW_PERF_CONF_POLLS = 3,
+ FW_PERF_CONF_CUSTOM_TIMER = 4,
+ FW_PERF_CONF_JTLB_INSTR = 5,
+ FW_PERF_CONF_INSTRUCTIONS = 6
+} FW_PERF_CONF;
+
+#if !defined(__KERNEL__)
+ #if defined(RGX_FEATURE_MIPS)
+
+ #define FW_CORE_ID_VALUE RGXMIPSFW_CORE_ID_VALUE
+ #define RGXFW_PROCESSOR RGXFW_PROCESSOR_MIPS
+
+ /* Firmware to host interrupts defines */
+ #define RGXFW_CR_IRQ_STATUS RGX_CR_MIPS_WRAPPER_IRQ_STATUS
+ #define RGXFW_CR_IRQ_STATUS_EVENT_EN RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN
+ #define RGXFW_CR_IRQ_CLEAR RGX_CR_MIPS_WRAPPER_IRQ_CLEAR
+ #define RGXFW_CR_IRQ_CLEAR_MASK RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN
+
+ #else
+
+ #define RGXFW_PROCESSOR RGXFW_PROCESSOR_META
+
+ /* Firmware to host interrupts defines */
+ #define RGXFW_CR_IRQ_STATUS RGX_CR_META_SP_MSLVIRQSTATUS
+ #define RGXFW_CR_IRQ_STATUS_EVENT_EN RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN
+ #define RGXFW_CR_IRQ_CLEAR RGX_CR_META_SP_MSLVIRQSTATUS
+ #define RGXFW_CR_IRQ_CLEAR_MASK (RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK & \
+ RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL)
+
+ #endif
+#endif
+
+#endif /* RGX_FIRMWARE_PROCESSOR_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File rgx_fwif.h
+@Title RGX firmware interface structures
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware interface structures used by srvinit and server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_H__)
+#define __RGX_FWIF_H__
+
+#include "rgx_firmware_processor.h"
+#include "rgx_fwif_shared.h"
+
+/*************************************************************************/ /*!
+ Logging type
+*/ /**************************************************************************/
+#define RGXFWIF_LOG_TYPE_NONE 0x00000000
+#define RGXFWIF_LOG_TYPE_TRACE 0x00000001
+#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002
+#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004
+#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008
+#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010
+#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020
+#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040
+#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080
+#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100
+#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200
+#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400
+#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800
+#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000
+#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000
+#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000
+#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80003FFE
+#define RGXFWIF_LOG_TYPE_MASK 0x80003FFF
+
+/* String used in pvrdebug -h output */
+#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,debug"
+
+/* Table entry to map log group strings to log type value */
+typedef struct {
+ const IMG_CHAR* pszLogGroupName;
+ IMG_UINT32 ui32LogGroupType;
+} RGXFWIF_LOG_GROUP_MAP_ENTRY;
+
+/*
+ Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup
+ table where needed. Keep log group names short, no more than 20 chars.
+*/
+#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \
+ { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \
+ { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \
+ { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \
+ { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \
+ { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \
+ { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \
+ { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \
+ { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \
+ { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \
+ { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \
+ { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \
+ { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \
+ { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \
+ { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG }
+
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+
+/* Used in a print statement to display log group state, one per group */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) (((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) ?("main ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) ?("mts ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) ?("cleanup ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) ?("csw ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) ?("bif ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_PM) ?("pm ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) ?("rtd ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) ?("spm ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_POW) ?("pow ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) ?("hwr ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) ?("hwp ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) ?("rpm ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) ?("dma ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) ?("debug ") :(""))
+
+
+/*! Logging function */
+typedef void (*PFN_RGXFW_LOG) (const IMG_CHAR* pszFmt, ...);
+
+
+/************************************************************************
+* RGX FW signature checks
+************************************************************************/
+#define RGXFW_SIG_BUFFER_SIZE_MIN (1024)
+
+/*!
+ ******************************************************************************
+ * HWPERF
+ *****************************************************************************/
+/* Size of the Firmware L1 HWPERF buffer in bytes (2MB). Accessed by the
+ * Firmware and host driver. */
+#define RGXFW_HWPERF_L1_SIZE_MIN (16U)
+#define RGXFW_HWPERF_L1_SIZE_DEFAULT (2048U)
+#define RGXFW_HWPERF_L1_SIZE_MAX (12288U)
+
+/* This padding value must always be large enough to hold the biggest
+ * variable sized packet. */
+#define RGXFW_HWPERF_L1_PADDING_DEFAULT (RGX_HWPERF_MAX_PACKET_SIZE)
+
+
+/*!
+ ******************************************************************************
+ * Trace Buffer
+ *****************************************************************************/
+
+/*! Number of elements on each line when dumping the trace buffer */
+#define RGXFW_TRACE_BUFFER_LINESIZE (30)
+
+/*! Total size of RGXFWIF_TRACEBUF dword (needs to be a multiple of RGXFW_TRACE_BUFFER_LINESIZE) */
+#define RGXFW_TRACE_BUFFER_SIZE (400*RGXFW_TRACE_BUFFER_LINESIZE)
+#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+#define RGXFW_THREAD_NUM 2
+#else
+#define RGXFW_THREAD_NUM 1
+#endif
+
+#define RGXFW_POLL_TYPE_SET 0x80000000
+
+typedef struct _RGXFWIF_ASSERTBUF_
+{
+ IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+ IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+ IMG_UINT32 ui32LineNum;
+} UNCACHED_ALIGN RGXFWIF_ASSERTBUF;
+
+typedef struct _RGXFWIF_TRACEBUF_SPACE_
+{
+ IMG_UINT32 ui32TracePointer;
+
+#if defined (RGX_FIRMWARE)
+ IMG_UINT32 *pui32RGXFWIfTraceBuffer; /* To be used by firmware for writing into trace buffer */
+#else
+ RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer;
+#endif
+ IMG_PUINT32 pui32TraceBuffer; /* To be used by host when reading from trace buffer */
+
+ RGXFWIF_ASSERTBUF sAssertBuf;
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE;
+
+#define RGXFWIF_POW_STATES \
+ X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \
+ X(RGXFWIF_POW_ON) /* running HW mds */ \
+ X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \
+ X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */
+
+typedef enum _RGXFWIF_POW_STATE_
+{
+#define X(NAME) NAME,
+ RGXFWIF_POW_STATES
+#undef X
+} RGXFWIF_POW_STATE;
+
+/* Firmware HWR states */
+#define RGXFWIF_HWR_HARDWARE_OK (0x1 << 0) /*!< Tells if the HW state is ok or locked up */
+#define RGXFWIF_HWR_ANALYSIS_DONE (0x1 << 2) /*!< Tells if the analysis of a GPU lockup has already been performed */
+#define RGXFWIF_HWR_GENERAL_LOCKUP (0x1 << 3) /*!< Tells if a DM unrelated lockup has been detected */
+#define RGXFWIF_HWR_DM_RUNNING_OK (0x1 << 4) /*!< Tells if at least one DM is running without being close to a lockup */
+#define RGXFWIF_HWR_DM_STALLING (0x1 << 5) /*!< Tells if at least one DM is close to lockup */
+typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS;
+
+/* Firmware per-DM HWR states */
+#define RGXFWIF_DM_STATE_WORKING (0x00) /*!< DM is working if all flags are cleared */
+#define RGXFWIF_DM_STATE_READY_FOR_HWR (0x1 << 0) /*!< DM is idle and ready for HWR */
+#define RGXFWIF_DM_STATE_NEEDS_SKIP (0x1 << 2) /*!< DM need to skip to next cmd before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP (0x1 << 3) /*!< DM need partial render cleanup before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR (0x1 << 4) /*!< DM need to increment Recovery Count once fully recovered */
+#define RGXFWIF_DM_STATE_GUILTY_LOCKUP (0x1 << 5) /*!< DM was identified as locking up and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP (0x1 << 6) /*!< DM was innocently affected by another lockup which caused HWR */
+#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING (0x1 << 7) /*!< DM was identified as over-running and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (0x1 << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */
+
+/* Per-OSid States */
+#define RGXFW_OS_STATE_ACTIVE_OS (1 << 0) /*!< Non active operating systems should not be served by the FW */
+#define RGXFW_OS_STATE_FREELIST_OK (1 << 1) /*!< Pending freelist reconstruction from that particular OS */
+#define RGXFW_OS_STATE_OFFLOADING (1 << 2) /*!< Transient state while all the OS resources in the FW are cleaned up */
+#define RGXFW_OS_STATE_GROW_REQUEST_PENDING (1 << 3) /*!< Signifies whether a request to grow a freelist is pending completion */
+
+typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS;
+
+typedef struct _RGXFWIF_TRACEBUF_
+{
+ IMG_UINT32 ui32LogType;
+ volatile RGXFWIF_POW_STATE ePowState;
+ RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM];
+
+ IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_DEFAULT_MAX];
+ IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_DEFAULT_MAX];
+ IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_DEFAULT_MAX];
+ IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_DEFAULT_MAX];
+ IMG_UINT32 ui32HwrCounter;
+
+ IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM];
+ IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM];
+
+ RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags;
+ RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_DEFAULT_MAX];
+
+ volatile IMG_UINT32 ui32HWPerfRIdx;
+ volatile IMG_UINT32 ui32HWPerfWIdx;
+ volatile IMG_UINT32 ui32HWPerfWrapCount;
+ IMG_UINT32 ui32HWPerfSize; /* Constant after setup, needed in FW */
+ IMG_UINT32 ui32HWPerfDropCount; /* The number of times the FW drops a packet due to buffer full */
+
+ /* These next three items are only valid at runtime when the FW is built
+ * with RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined
+ * in rgxfw_hwperf.c */
+ IMG_UINT32 ui32HWPerfUt; /* Buffer utilisation, high watermark of bytes in use */
+ IMG_UINT32 ui32FirstDropOrdinal;/* The ordinal of the first packet the FW dropped */
+ IMG_UINT32 ui32LastDropOrdinal; /* The ordinal of the last packet the FW dropped */
+
+ volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */
+ IMG_UINT32 ui32KCCBCmdsExecuted;
+ IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime;
+ IMG_UINT32 ui32PowMonEnergy; /* Non-volatile power monitor energy count */
+
+#define RGXFWIF_MAX_PCX 16
+ IMG_UINT32 ui32T1PCX[RGXFWIF_MAX_PCX];
+ IMG_UINT32 ui32T1PCXWOff;
+
+ IMG_UINT32 ui32OSStateFlags[RGXFW_NUM_OS]; /*!< State flags for each Operating System > */
+
+ IMG_UINT32 ui32MMUFlushCounter;
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF;
+
+
+/*!
+ ******************************************************************************
+ * GPU Utilisation
+ *****************************************************************************/
+#define RGXFWIF_GPU_STATS_MAX_VALUE_OF_STATE 10000
+
+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE_LOW (0U)
+#define RGXFWIF_GPU_UTIL_STATE_IDLE (1U)
+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE_HIGH (2U)
+#define RGXFWIF_GPU_UTIL_STATE_BLOCKED (3U)
+#define RGXFWIF_GPU_UTIL_STATE_NUM (4U)
+
+#define RGXFWIF_GPU_UTIL_TIME_MASK IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)
+#define RGXFWIF_GPU_UTIL_STATE_MASK IMG_UINT64_C(0x0000000000000003)
+
+#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK)
+#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK)
+
+/* The OS timestamps computed by the FW are approximations of the real time,
+ * which means they could be slightly behind or ahead the real timer on the Host.
+ * In some cases we can perform subtractions between FW approximated
+ * timestamps and real OS timestamps, so we need a form of protection against
+ * negative results if for instance the FW one is a bit ahead of time.
+ */
+#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \
+ ((newtime) > (oldtime) ? ((newtime) - (oldtime)) : 0)
+
+#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \
+ (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state))
+
+
+/* The timer correlation array must be big enough to ensure old entries won't be
+ * overwritten before all the HWPerf events linked to those entries are processed
+ * by the MISR. The update frequency of this array depends on how fast the system
+ * can change state (basically how small the APM latency is) and perform DVFS transitions.
+ *
+ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading
+ * an entry while the Host is updating it. With 2 entries in the worst case the FW
+ * will read old data, which is still quite ok if the Host is updating the timer
+ * correlation at that time.
+ */
+#define RGXFWIF_TIME_CORR_ARRAY_SIZE 256
+#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount) ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE)
+
+/* Make sure the timer correlation array size is a power of 2 */
+static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1)) == 0,
+ "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two");
+
+typedef struct _RGXFWIF_GPU_UTIL_FWCB_
+{
+ RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE];
+ IMG_UINT32 ui32TimeCorrSeqCount;
+
+ /* Last GPU state + OS time of the last state update */
+ IMG_UINT64 RGXFW_ALIGN ui64LastWord;
+
+ /* Counters for the amount of time the GPU was active/idle/blocked */
+ IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM];
+} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB;
+
+
+/*!
+ ******************************************************************************
+ * HWR Data
+ *****************************************************************************/
+typedef enum _RGX_HWRTYPE_
+{
+ RGX_HWRTYPE_UNKNOWNFAILURE = 0,
+ RGX_HWRTYPE_OVERRUN = 1,
+ RGX_HWRTYPE_POLLFAILURE = 2,
+ RGX_HWRTYPE_BIF0FAULT = 3,
+ RGX_HWRTYPE_BIF1FAULT = 4,
+ RGX_HWRTYPE_TEXASBIF0FAULT = 5,
+ RGX_HWRTYPE_DPXMMUFAULT = 6,
+ RGX_HWRTYPE_MMUFAULT = 7,
+ RGX_HWRTYPE_MMUMETAFAULT = 8,
+} RGX_HWRTYPE;
+
+#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) ((eHWRType == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1 )
+
+#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((eHWRType == RGX_HWRTYPE_BIF0FAULT || \
+ eHWRType == RGX_HWRTYPE_BIF1FAULT || \
+ eHWRType == RGX_HWRTYPE_TEXASBIF0FAULT || \
+ eHWRType == RGX_HWRTYPE_MMUFAULT || \
+ eHWRType == RGX_HWRTYPE_MMUMETAFAULT) ? 1 : 0 )
+
+typedef struct _RGX_BIFINFO_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus;
+ IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus;
+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */
+} RGX_BIFINFO;
+
+typedef struct _RGX_MMUINFO_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64MMUStatus;
+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */
+} RGX_MMUINFO;
+
+typedef struct _RGX_POLLINFO_
+{
+ IMG_UINT32 ui32ThreadNum;
+ IMG_UINT32 ui32CrPollAddr;
+ IMG_UINT32 ui32CrPollMask;
+} UNCACHED_ALIGN RGX_POLLINFO;
+
+typedef struct _RGX_HWRINFO_
+{
+ union
+ {
+ RGX_BIFINFO sBIFInfo;
+ RGX_MMUINFO sMMUInfo;
+ RGX_POLLINFO sPollInfo;
+ } uHWRData;
+
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimer;
+ IMG_UINT64 RGXFW_ALIGN ui64OSTimer;
+ IMG_UINT32 ui32FrameNum;
+ IMG_UINT32 ui32PID;
+ IMG_UINT32 ui32ActiveHWRTData;
+ IMG_UINT32 ui32HWRNumber;
+ IMG_UINT32 ui32EventStatus;
+ IMG_UINT32 ui32HWRRecoveryFlags;
+ RGX_HWRTYPE eHWRType;
+ RGXFWIF_DM eDM;
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick;
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart;
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish;
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady;
+} UNCACHED_ALIGN RGX_HWRINFO;
+
+#define RGXFWIF_HWINFO_MAX_FIRST 8 /* Number of first HWR logs recorded (never overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX_LAST 8 /* Number of latest HWR logs (older logs are overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */
+#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1) /* Index of the last log in the HWR log buffer */
+typedef struct _RGXFWIF_HWRINFOBUF_
+{
+ RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX];
+
+ IMG_UINT32 ui32FirstCrPollAddr[RGXFW_THREAD_NUM];
+ IMG_UINT32 ui32FirstCrPollMask[RGXFW_THREAD_NUM];
+ IMG_UINT32 ui32WriteIndex;
+ IMG_UINT32 ui32DDReqCount;
+} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF;
+
+
+#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (1)
+#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (2)
+#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (3)
+#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (4)
+
+/*!
+ ******************************************************************************
+ * RGX firmware Init Config Data
+ *****************************************************************************/
+#define RGXFWIF_INICFG_CTXSWITCH_TA_EN (0x1 << 0)
+#define RGXFWIF_INICFG_CTXSWITCH_3D_EN (0x1 << 1)
+#define RGXFWIF_INICFG_CTXSWITCH_CDM_EN (0x1 << 2)
+#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (0x1 << 3)
+#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (0x1 << 4)
+#define RGXFWIF_INICFG_RSVD (0x1 << 5)
+#define RGXFWIF_INICFG_POW_RASCALDUST (0x1 << 6)
+#define RGXFWIF_INICFG_HWPERF_EN (0x1 << 7)
+#define RGXFWIF_INICFG_HWR_EN (0x1 << 8)
+#define RGXFWIF_INICFG_CHECK_MLIST_EN (0x1 << 9)
+#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (0x1 << 10)
+#define RGXFWIF_INICFG_POLL_COUNTERS_EN (0x1 << 11)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX (RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE (RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST (RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_CLRMSK (0xFFFFCFFFU)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT (12)
+#define RGXFWIF_INICFG_SHG_BYPASS_EN (0x1 << 14)
+#define RGXFWIF_INICFG_RTU_BYPASS_EN (0x1 << 15)
+#define RGXFWIF_INICFG_REGCONFIG_EN (0x1 << 16)
+#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (0x1 << 17)
+#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (0x1 << 18)
+#define RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN (0x1 << 19)
+#define RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN (0x1 << 20)
+#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (0x1 << 21)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (22)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (0x7 << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_METAT1_SHIFT (25)
+#define RGXFWIF_INICFG_METAT1_MAIN (RGX_META_T1_MAIN << RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_METAT1_DUMMY (RGX_META_T1_DUMMY << RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_METAT1_ENABLED (RGXFWIF_INICFG_METAT1_MAIN | RGXFWIF_INICFG_METAT1_DUMMY)
+#define RGXFWIF_INICFG_METAT1_MASK (RGXFWIF_INICFG_METAT1_ENABLED >> RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (0x1 << 27)
+#define RGXFWIF_INICFG_WORKEST_V1 (0x1 << 28)
+#define RGXFWIF_INICFG_WORKEST_V2 (0x1 << 29)
+#define RGXFWIF_INICFG_PDVFS_V1 (0x1 << 30)
+#define RGXFWIF_INICFG_PDVFS_V2 (0x1 << 31)
+#define RGXFWIF_INICFG_ALL (0xFFFFFFDFU)
+
+#define RGXFWIF_SRVCFG_DISABLE_PDP_EN (0x1 << 31)
+#define RGXFWIF_SRVCFG_ALL (0x80000000U)
+#define RGXFWIF_FILTCFG_TRUNCATE_HALF (0x1 << 3)
+#define RGXFWIF_FILTCFG_TRUNCATE_INT (0x1 << 2)
+#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (0x1 << 1)
+
+#define RGXFWIF_INICFG_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_CTXSWITCH_TA_EN | \
+ RGXFWIF_INICFG_CTXSWITCH_3D_EN | \
+ RGXFWIF_INICFG_CTXSWITCH_CDM_EN)
+
+#define RGXFWIF_INICFG_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_DM_ALL | \
+ RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \
+ RGXFWIF_INICFG_CTXSWITCH_SRESET_EN)
+
+typedef enum
+{
+ RGX_ACTIVEPM_FORCE_OFF = 0,
+ RGX_ACTIVEPM_FORCE_ON = 1,
+ RGX_ACTIVEPM_DEFAULT = 2
+} RGX_ACTIVEPM_CONF;
+
+typedef enum
+{
+ RGX_RD_POWER_ISLAND_FORCE_OFF = 0,
+ RGX_RD_POWER_ISLAND_FORCE_ON = 1,
+ RGX_RD_POWER_ISLAND_DEFAULT = 2
+} RGX_RD_POWER_ISLAND_CONF;
+
+typedef enum
+{
+ RGX_META_T1_OFF = 0x0, /*!< No thread 1 running (unless 2nd thread is used for HWPerf) */
+ RGX_META_T1_MAIN = 0x1, /*!< Run the main thread 0 code on thread 1 (and vice versa if 2nd thread is used for HWPerf) */
+ RGX_META_T1_DUMMY = 0x2 /*!< Run dummy test code on thread 1 */
+} RGX_META_T1_CONF;
+
+/*!
+ ******************************************************************************
+ * Querying DM state
+ *****************************************************************************/
+
+typedef enum _RGXFWIF_DM_STATE_
+{
+ RGXFWIF_DM_STATE_NORMAL = 0,
+ RGXFWIF_DM_STATE_LOCKEDUP = 1
+} RGXFWIF_DM_STATE;
+
+typedef struct
+{
+ IMG_UINT16 ui16RegNum; /*!< Register number */
+ IMG_UINT16 ui16IndirectRegNum; /*!< Indirect register number (or 0 if not used) */
+ IMG_UINT16 ui16IndirectStartVal; /*!< Start value for indirect register */
+ IMG_UINT16 ui16IndirectEndVal; /*!< End value for indirect register */
+} RGXFW_REGISTER_LIST;
+
+#endif /* __RGX_FWIF_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif.h)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX fw interface alignment checks
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Checks to avoid disalignment in RGX fw data structures
+ shared with the host
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_ALIGNCHECKS_H__)
+#define __RGX_FWIF_ALIGNCHECKS_H__
+
+/* for the offsetof macro */
+#include <stddef.h>
+
+/*!
+ ******************************************************************************
+ * Alignment UM/FW checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_UM_MAX 128
+
+#define RGXFW_ALIGN_CHECKS_INIT0 \
+ sizeof(RGXFWIF_TRACEBUF), \
+ offsetof(RGXFWIF_TRACEBUF, ui32LogType), \
+ offsetof(RGXFWIF_TRACEBUF, sTraceBuf), \
+ offsetof(RGXFWIF_TRACEBUF, aui32HwrDmLockedUpCount), \
+ offsetof(RGXFWIF_TRACEBUF, aui32HwrDmOverranCount), \
+ offsetof(RGXFWIF_TRACEBUF, aui32HwrDmRecoveredCount), \
+ offsetof(RGXFWIF_TRACEBUF, aui32HwrDmFalseDetectCount), \
+ \
+ /* RGXFWIF_CMDTA checks */ \
+ sizeof(RGXFWIF_CMDTA), \
+ offsetof(RGXFWIF_CMDTA, sTARegs), \
+ \
+ /* RGXFWIF_CMD3D checks */ \
+ sizeof(RGXFWIF_CMD3D), \
+ offsetof(RGXFWIF_CMD3D, s3DRegs), \
+ \
+ /* RGXFWIF_CMDTRANSFER checks */ \
+ sizeof(RGXFWIF_CMDTRANSFER), \
+ offsetof(RGXFWIF_CMDTRANSFER, sTransRegs), \
+ \
+ \
+ /* RGXFWIF_CMD_COMPUTE checks */ \
+ sizeof(RGXFWIF_CMD_COMPUTE), \
+ offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs), \
+ \
+ sizeof(RGXFWIF_FREELIST), \
+ offsetof(RGXFWIF_FREELIST, psFreeListDevVAddr),\
+ offsetof(RGXFWIF_FREELIST, ui32MaxPages),\
+ offsetof(RGXFWIF_FREELIST, ui32CurrentPages),\
+ offsetof(RGXFWIF_FREELIST, ui32HWRCounter),\
+ \
+ sizeof(RGXFWIF_RENDER_TARGET),\
+ offsetof(RGXFWIF_RENDER_TARGET, psVHeapTableDevVAddr), \
+ \
+ sizeof(RGXFWIF_HWRTDATA), \
+ offsetof(RGXFWIF_HWRTDATA, psPMMListDevVAddr), \
+ offsetof(RGXFWIF_HWRTDATA, apsFreeLists),\
+ offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase), \
+ offsetof(RGXFWIF_HWRTDATA, psParentRenderTarget), \
+ offsetof(RGXFWIF_HWRTDATA, eState), \
+ offsetof(RGXFWIF_HWRTDATA, ui32NumPartialRenders), \
+ \
+ sizeof(RGXFWIF_HWPERF_CTL_BLK), \
+ offsetof(RGXFWIF_HWPERF_CTL_BLK, aui64CounterCfg), \
+ \
+ sizeof(RGXFWIF_REGISTER_GUESTOS_OFFSETS), \
+ offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, ui32OSid), \
+ offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sKCCBCtl), \
+ offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sKCCB), \
+ offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sFirmwareCCBCtl), \
+ offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sFirmwareCCB), \
+\
+ sizeof(RGXFWIF_HWPERF_CTL), \
+ offsetof(RGXFWIF_HWPERF_CTL, SelCntr)
+
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGXFW_ALIGN_CHECKS_INIT1 \
+ RGXFW_ALIGN_CHECKS_INIT0, \
+ sizeof(RGXFWIF_RPM_FREELIST), \
+ offsetof(RGXFWIF_RPM_FREELIST, sFreeListDevVAddr), \
+ offsetof(RGXFWIF_RPM_FREELIST, ui32MaxPages), \
+ offsetof(RGXFWIF_RPM_FREELIST, ui32CurrentPages), \
+ offsetof(RGXFWIF_RPM_FREELIST, ui32GrowPages)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT1 RGXFW_ALIGN_CHECKS_INIT0
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+
+#if defined(RGX_FEATURE_TLA)
+#define RGXFW_ALIGN_CHECKS_INIT2 \
+ RGXFW_ALIGN_CHECKS_INIT1, \
+ /* RGXFWIF_CMD2D checks */ \
+ sizeof(RGXFWIF_CMD2D), \
+ offsetof(RGXFWIF_CMD2D, s2DRegs)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT2 RGXFW_ALIGN_CHECKS_INIT1
+#endif /* RGX_FEATURE_TLA */
+
+
+#if defined(RGX_FEATURE_FASTRENDER_DM)
+#define RGXFW_ALIGN_CHECKS_INIT \
+ RGXFW_ALIGN_CHECKS_INIT2, \
+ /* RGXFWIF_CMDTDM checks */ \
+ sizeof(RGXFWIF_CMDTDM), \
+ offsetof(RGXFWIF_CMDTDM, sTDMRegs)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT RGXFW_ALIGN_CHECKS_INIT2
+#endif /* ! RGX_FEATURE_FASTRENDER_DM */
+
+
+
+/*!
+ ******************************************************************************
+ * Alignment KM checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_INIT_KM \
+ sizeof(RGXFWIF_INIT), \
+ offsetof(RGXFWIF_INIT, sFaultPhysAddr), \
+ offsetof(RGXFWIF_INIT, sPDSExecBase), \
+ offsetof(RGXFWIF_INIT, sUSCExecBase), \
+ offsetof(RGXFWIF_INIT, psKernelCCBCtl), \
+ offsetof(RGXFWIF_INIT, psKernelCCB), \
+ offsetof(RGXFWIF_INIT, psFirmwareCCBCtl), \
+ offsetof(RGXFWIF_INIT, psFirmwareCCB), \
+ offsetof(RGXFWIF_INIT, asSigBufCtl), \
+ offsetof(RGXFWIF_INIT, sTraceBufCtl), \
+ offsetof(RGXFWIF_INIT, sRGXCompChecks), \
+ \
+ /* RGXFWIF_FWRENDERCONTEXT checks */ \
+ sizeof(RGXFWIF_FWRENDERCONTEXT), \
+ offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), \
+ offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), \
+ \
+ sizeof(RGXFWIF_FWCOMMONCONTEXT), \
+ offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext), \
+ offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode), \
+ offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB), \
+ offsetof(RGXFWIF_FWCOMMONCONTEXT, ui64MCUFenceAddr)
+
+#endif /* __RGX_FWIF_ALIGNCHECKS_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_alignchecks.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File rgx_fwif_hwperf.h
+@Title RGX HWPerf support
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Shared header between RGX firmware and Init process
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_FWIF_HWPERF_H
+#define RGX_FWIF_HWPERF_H
+
+#include "rgx_fwif_shared.h"
+#include "rgx_hwperf_km.h"
+#include "rgxdefs_km.h"
+
+
+/*****************************************************************************/
+
+/* Structure to hold a block's parameters for passing between the BG context
+ * and the IRQ context when applying a configuration request. */
+typedef struct _RGXFWIF_HWPERF_CTL_BLK_
+{
+ IMG_BOOL bValid;
+ IMG_BOOL bEnabled;
+ IMG_UINT32 eBlockID;
+ IMG_UINT32 uiCounterMask;
+ IMG_UINT64 RGXFW_ALIGN aui64CounterCfg[RGX_CNTBLK_COUNTERS_MAX];
+} RGXFWIF_HWPERF_CTL_BLK;
+
+/* Structure used to hold the configuration of the non-mux counters blocks */
+typedef struct _RGXFW_HWPERF_SELECT_
+{
+ IMG_UINT32 ui32NumSelectedCounters;
+ IMG_UINT32 aui32SelectedCountersIDs[RGX_HWPERF_MAX_CUSTOM_CNTRS];
+} RGXFW_HWPERF_SELECT;
+
+/* Structure to hold the whole configuration request details for all blocks
+ * The block masks and counts are used to optimise reading of this data. */
+typedef struct _RGXFWIF_HWPERF_CTL_
+{
+ IMG_BOOL bResetOrdinal;
+
+ IMG_UINT32 ui32SelectedCountersBlockMask;
+ RGXFW_HWPERF_SELECT RGXFW_ALIGN SelCntr[RGX_HWPERF_MAX_CUSTOM_BLKS];
+
+ IMG_UINT32 ui32EnabledBlksCount;
+ RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[RGX_HWPERF_MAX_DEFINED_BLKS];
+} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL;
+
+/* NOTE: The switch statement in this function must be kept in alignment with
+ * the enumeration RGX_HWPERF_CNTBLK_ID defined in rgx_hwperf_km.h. ASSERTs may
+ * result if not.
+ * The function provides a hash lookup to get a handle on the global store for
+ * a block's configuration store from it's block ID.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(rgxfw_hwperf_get_block_ctl)
+#endif
+static INLINE RGXFWIF_HWPERF_CTL_BLK* rgxfw_hwperf_get_block_ctl(
+ RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData)
+{
+ IMG_INT32 i32Idx = -1;
+
+ /* Hash the block ID into a control configuration array index */
+ switch(eBlockID)
+ {
+ case RGX_CNTBLK_ID_TA:
+ case RGX_CNTBLK_ID_RASTER:
+ case RGX_CNTBLK_ID_HUB:
+ case RGX_CNTBLK_ID_TORNADO:
+ case RGX_CNTBLK_ID_JONES:
+ case RGX_CNTBLK_ID_BF:
+ case RGX_CNTBLK_ID_BT:
+ case RGX_CNTBLK_ID_RT:
+ case RGX_CNTBLK_ID_SH:
+ {
+ i32Idx = eBlockID;
+ break;
+ }
+ case RGX_CNTBLK_ID_TPU_MCU0:
+ case RGX_CNTBLK_ID_TPU_MCU1:
+ case RGX_CNTBLK_ID_TPU_MCU2:
+ case RGX_CNTBLK_ID_TPU_MCU3:
+ case RGX_CNTBLK_ID_TPU_MCU4:
+ case RGX_CNTBLK_ID_TPU_MCU5:
+ case RGX_CNTBLK_ID_TPU_MCU6:
+ case RGX_CNTBLK_ID_TPU_MCU7:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_USC0:
+ case RGX_CNTBLK_ID_USC1:
+ case RGX_CNTBLK_ID_USC2:
+ case RGX_CNTBLK_ID_USC3:
+ case RGX_CNTBLK_ID_USC4:
+ case RGX_CNTBLK_ID_USC5:
+ case RGX_CNTBLK_ID_USC6:
+ case RGX_CNTBLK_ID_USC7:
+ case RGX_CNTBLK_ID_USC8:
+ case RGX_CNTBLK_ID_USC9:
+ case RGX_CNTBLK_ID_USC10:
+ case RGX_CNTBLK_ID_USC11:
+ case RGX_CNTBLK_ID_USC12:
+ case RGX_CNTBLK_ID_USC13:
+ case RGX_CNTBLK_ID_USC14:
+ case RGX_CNTBLK_ID_USC15:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_TEXAS0:
+ case RGX_CNTBLK_ID_TEXAS1:
+ case RGX_CNTBLK_ID_TEXAS2:
+ case RGX_CNTBLK_ID_TEXAS3:
+ case RGX_CNTBLK_ID_TEXAS4:
+ case RGX_CNTBLK_ID_TEXAS5:
+ case RGX_CNTBLK_ID_TEXAS6:
+ case RGX_CNTBLK_ID_TEXAS7:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_RASTER0:
+ case RGX_CNTBLK_ID_RASTER1:
+ case RGX_CNTBLK_ID_RASTER2:
+ case RGX_CNTBLK_ID_RASTER3:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_BLACKPEARL0:
+ case RGX_CNTBLK_ID_BLACKPEARL1:
+ case RGX_CNTBLK_ID_BLACKPEARL2:
+ case RGX_CNTBLK_ID_BLACKPEARL3:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_PBE0:
+ case RGX_CNTBLK_ID_PBE1:
+ case RGX_CNTBLK_ID_PBE2:
+ case RGX_CNTBLK_ID_PBE3:
+ case RGX_CNTBLK_ID_PBE4:
+ case RGX_CNTBLK_ID_PBE5:
+ case RGX_CNTBLK_ID_PBE6:
+ case RGX_CNTBLK_ID_PBE7:
+ case RGX_CNTBLK_ID_PBE8:
+ case RGX_CNTBLK_ID_PBE9:
+ case RGX_CNTBLK_ID_PBE10:
+ case RGX_CNTBLK_ID_PBE11:
+ case RGX_CNTBLK_ID_PBE12:
+ case RGX_CNTBLK_ID_PBE13:
+ case RGX_CNTBLK_ID_PBE14:
+ case RGX_CNTBLK_ID_PBE15:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_BX_TU0:
+ case RGX_CNTBLK_ID_BX_TU1:
+ case RGX_CNTBLK_ID_BX_TU2:
+ case RGX_CNTBLK_ID_BX_TU3:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) +
+ RGX_CNTBLK_INDIRECT_COUNT(PBE, 15) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ default:
+ {
+ return NULL;
+ }
+ }
+ if ((i32Idx < 0) || (i32Idx >= RGX_HWPERF_MAX_DEFINED_BLKS))
+ {
+ return NULL;
+ }
+ return &psHWPerfInitData->sBlkCfg[i32Idx];
+}
+
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX firmware interface structures used by pvrsrvkm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware interface structures used by pvrsrvkm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_KM_H__)
+#define __RGX_FWIF_KM_H__
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+#include "rgxdefs_km.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "rgx_firmware_processor.h"
+
+#if !defined(__KERNEL__)
+/* The following defines the offsets for the KCCB, KCCBCtl, FWCCB and FWCCBCtl
+ * for the various guests in a virtualisation environment. It is assumed that each
+ * guest is built the same way and so all their offsets will match. If the code
+ * at host level changes and the offsets change, the defines here need to be updated.
+ */
+
+#if defined(RGX_FEATURE_META)
+#define RGXFWIF_GUEST_OFFSET_KCCB (RGXFW_SEGMMU_DATA_BASE_ADDRESS | \
+ RGXFW_SEGMMU_DATA_META_CACHED | \
+ RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED | \
+ 0x2000U)
+#define RGXFWIF_GUEST_OFFSET_KCCBCTL (RGXFW_SEGMMU_DATA_BASE_ADDRESS | \
+ RGXFW_SEGMMU_DATA_META_UNCACHED | \
+ RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED | \
+ 0x0280U)
+#define RGXFWIF_GUEST_OFFSET_FWCCB (RGXFW_SEGMMU_DATA_BASE_ADDRESS | \
+ RGXFW_SEGMMU_DATA_META_UNCACHED | \
+ RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED | \
+ 0x0300U)
+#define RGXFWIF_GUEST_OFFSET_FWCCBCTL (RGXFW_SEGMMU_DATA_BASE_ADDRESS | \
+ RGXFW_SEGMMU_DATA_META_UNCACHED | \
+ RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED | \
+ 0x02C0U)
+#else
+/* In case of MIPS we will need to define proper values for these offsets */
+#define RGXFWIF_GUEST_OFFSET_KCCB (0x0)
+#define RGXFWIF_GUEST_OFFSET_KCCBCTL (0x0)
+#define RGXFWIF_GUEST_OFFSET_FWCCB (0x0)
+#define RGXFWIF_GUEST_OFFSET_FWCCBCTL (0x0)
+#endif
+
+#endif
+
+#if defined(RGX_FIRMWARE)
+typedef DLLIST_NODE RGXFWIF_DLLIST_NODE;
+#else
+typedef struct {RGXFWIF_DEV_VIRTADDR p;
+ RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE;
+#endif
+
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TRACEBUF;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK;
+typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_SELECT_CUSTOM_CNTRS;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_INIT;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMPCHECKS;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ALIGNCHECK;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE;
+
+/*!
+ * This number is used to represent an invalid page catalogue physical address
+ */
+#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
+
+/*!
+ Firmware memory context.
+*/
+typedef struct _RGXFWIF_FWMEMCONTEXT_
+{
+ IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */
+ IMG_INT32 uiPageCatBaseRegID; /*!< associated page catalog base register (-1 == unallocated) */
+ IMG_UINT32 uiBreakpointAddr; /*!< breakpoint address */
+ IMG_UINT32 uiBPHandlerAddr; /*!< breakpoint handler address */
+ IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ IMG_UINT32 ui32OSid;
+ IMG_BOOL bOSidAxiProt;
+#endif
+
+} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT;
+
+
+/*!
+ * FW context state flags
+ */
+#define RGXFWIF_CONTEXT_TAFLAGS_NEED_RESUME (0x00000001)
+#define RGXFWIF_CONTEXT_RENDERFLAGS_NEED_RESUME (0x00000002)
+#define RGXFWIF_CONTEXT_CDMFLAGS_NEED_RESUME (0x00000004)
+#define RGXFWIF_CONTEXT_SHGFLAGS_NEED_RESUME (0x00000008)
+#define RGXFWIF_CONTEXT_TDMFLAGS_CONTEXT_STORED (0x00000010)
+#define RGXFWIF_CONTEXT_ALLFLAGS_NEED_RESUME (0x0000001F)
+
+
+typedef struct _RGXFWIF_TACTX_STATE_
+{
+ /* FW-accessible TA state which must be written out to memory on context store */
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER; /* To store in mid-TA */
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER_Init; /* Initial value (in case is 'lost' due to a lock-up */
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_BATCH;
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM0;
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM1;
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM2;
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM3;
+#if defined(SUPPORT_VDM_CONTEXT_STORE_BUFFER_AB)
+ IMG_UINT16 RGXFW_ALIGN ui16TACurrentIdx;
+#endif
+} UNCACHED_ALIGN RGXFWIF_TACTX_STATE;
+
+
+typedef struct _RGXFWIF_3DCTX_STATE_
+{
+ /* FW-accessible ISP state which must be written out to memory on context store */
+ IMG_UINT32 RGXFW_ALIGN au3DReg_ISP_STORE[64];
+ IMG_UINT64 RGXFW_ALIGN u3DReg_PM_DEALLOCATED_MASK_STATUS;
+ IMG_UINT64 RGXFW_ALIGN u3DReg_PM_PDS_MTILEFREE_STATUS;
+} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE;
+
+
+
+typedef struct _RGXFWIF_COMPUTECTX_STATE_
+{
+ IMG_BOOL RGXFW_ALIGN bBufferB;
+} RGXFWIF_COMPUTECTX_STATE;
+
+
+typedef struct _RGXFWIF_VRDMCTX_STATE_
+{
+ /* FW-accessible TA state which must be written out to memory on context store */
+ IMG_UINT64 RGXFW_ALIGN uVRDMReg_VRM_CALL_STACK_POINTER;
+ IMG_UINT64 RGXFW_ALIGN uVRDMReg_VRM_BATCH;
+} UNCACHED_ALIGN RGXFWIF_VRDMCTX_STATE;
+
+
+typedef struct _RGXFWIF_FWCOMMONCONTEXT_
+{
+ /*
+ Used by bg and irq context
+ */
+ /* CCB details for this firmware context */
+ PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */
+ PRGXFWIF_CCCB psCCB; /*!< CCB base */
+ RGXFWIF_DMA_ADDR sCCBMetaDMAAddr;
+
+ /*
+ Used by the bg context only
+ */
+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */
+
+ /*
+ Used by the irq context only
+ */
+ RGXFWIF_DLLIST_NODE sRunNode; /*!< List entry for the run list */
+
+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */
+
+ /* Context suspend state */
+ PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */
+
+ /* Framework state
+ */
+ PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */
+
+ /*
+ * Flags e.g. for context switching
+ */
+ IMG_UINT32 ui32Flags;
+ IMG_UINT32 ui32Priority;
+ IMG_UINT32 ui32PrioritySeqNum;
+ IMG_UINT64 RGXFW_ALIGN ui64MCUFenceAddr;
+
+ /* References to the host side originators */
+ IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */
+ IMG_UINT32 ui32PID; /*!< associated process ID */
+
+ /* Statistic updates waiting to be passed back to the host... */
+ IMG_BOOL bStatsPending; /*!< True when some stats are pending */
+ IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */
+ IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */
+ IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */
+ RGXFWIF_DM eDM; /*!< Data Master type */
+ IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */
+ RGXFWIF_DLLIST_NODE sWaitSignalNode; /*!< List entry for the wait-signal list */
+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */
+ IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */
+ IMG_UINT64 RGXFW_ALIGN ui64ResumeSignalAddr; /*!< Address of the Services Signal for resuming the buffer */
+ IMG_BOOL bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */
+} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT;
+
+/*!
+ Firmware render context.
+*/
+typedef struct _RGXFWIF_FWRENDERCONTEXT_
+{
+ RGXFWIF_FWCOMMONCONTEXT sTAContext; /*!< Firmware context for the TA */
+ RGXFWIF_FWCOMMONCONTEXT s3DContext; /*!< Firmware context for the 3D */
+
+ /*
+ * Note: The following fields keep track of OOM and partial render statistics.
+ * Because these data structures are allocated cache-incoherent,
+ * and because these fields are updated by the firmware,
+ * the host will read valid values only after an SLC flush/inval.
+ * This is only guaranteed to happen while destroying the render-context.
+ */
+
+ /* The following variable has been reused to avoid breaking compatibility.
+ *
+ * It was previously:
+ * IMG_UINT32 ui32TotalNumPartialRenders; Total number of partial renders
+ *
+ * And is changed to:
+ */
+ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */
+
+ IMG_UINT32 ui32TotalNumOutOfMemory; /*!< Total number of OOMs */
+
+} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT;
+
+/*!
+ Firmware render context.
+*/
+typedef struct _RGXFWIF_FWRAYCONTEXT_
+{
+ IMG_UINT32 ui32ActiveFCMask; /* move here to avoid that fwrayctx and shgctx have the same addr */
+ IMG_UINT32 ui32NextFC;
+ RGXFWIF_FWCOMMONCONTEXT sSHGContext; /*!< Firmware context for the SHG */
+ RGXFWIF_FWCOMMONCONTEXT sRTUContext; /*!< Firmware context for the RTU */
+ PRGXFWIF_CCCB_CTL psCCBCtl[DPX_MAX_RAY_CONTEXTS];
+ PRGXFWIF_CCCB psCCB[DPX_MAX_RAY_CONTEXTS];
+} UNCACHED_ALIGN RGXFWIF_FWRAYCONTEXT;
+
+#define RGXFWIF_INVALID_FRAME_CONTEXT (0xFFFFFFFF)
+
+/*!
+ BIF tiling mode
+*/
+typedef enum _RGXFWIF_BIFTILINGMODE_
+{
+ RGXFWIF_BIFTILINGMODE_NONE = 0,
+ RGXFWIF_BIFTILINGMODE_256x16 = 0,
+ RGXFWIF_BIFTILINGMODE_512x8 = 1
+} RGXFWIF_BIFTILINGMODE;
+
+/*!
+ BIF requester selection
+*/
+typedef enum _RGXFWIF_BIFREQ_
+{
+ RGXFWIF_BIFREQ_TA = 0,
+ RGXFWIF_BIFREQ_3D = 1,
+ RGXFWIF_BIFREQ_CDM = 2,
+ RGXFWIF_BIFREQ_2D = 3,
+ RGXFWIF_BIFREQ_TDM = 3,
+ RGXFWIF_BIFREQ_HOST = 4,
+ RGXFWIF_BIFREQ_RTU = 5,
+ RGXFWIF_BIFREQ_SHG = 6,
+ RGXFWIF_BIFREQ_MAX = 7
+} RGXFWIF_BIFREQ;
+
+typedef enum _RGXFWIF_PM_DM_
+{
+ RGXFWIF_PM_DM_TA = 0,
+ RGXFWIF_PM_DM_3D = 1,
+} RGXFWIF_PM_DM;
+
+typedef enum _RGXFWIF_RPM_DM_
+{
+ RGXFWIF_RPM_DM_SHF = 0,
+ RGXFWIF_RPM_DM_SHG = 1,
+ RGXFWIF_RPM_DM_MAX,
+} RGXFWIF_RPM_DM;
+
+/*!
+ ******************************************************************************
+ * Kernel CCB control for RGX
+ *****************************************************************************/
+typedef struct _RGXFWIF_CCB_CTL_
+{
+ volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */
+ volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */
+ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */
+ IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */
+} UNCACHED_ALIGN RGXFWIF_CCB_CTL;
+
+/*!
+ ******************************************************************************
+ * Kernel CCB command structure for RGX
+ *****************************************************************************/
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PT (0x1) /* MMU_CTRL_INVAL_PT_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2) /* MMU_CTRL_INVAL_PD_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4) /* MMU_CTRL_INVAL_PC_EN */
+
+#if !defined(__KERNEL)
+
+#if !defined(RGX_FEATURE_SLC_VIVT)
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX(C) (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0) /* not used */
+
+#else /* RGX_FEATURE_SLC_VIVT */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX(C) ((C) << 0x3) /* MMU_CTRL_INVAL_CONTEXT_SHIFT */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#endif
+
+#else
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX(C) ((C) << 0x3) /* MMU_CTRL_INVAL_CONTEXT_SHIFT */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#endif
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000) /* indicates FW should interrupt the host */
+
+typedef struct _RGXFWIF_MMUCACHEDATA_
+{
+ PRGXFWIF_FWMEMCONTEXT psMemoryContext;
+ IMG_UINT32 ui32Flags;
+ RGXFWIF_DEV_VIRTADDR sMMUCacheSync;
+ IMG_UINT32 ui32MMUCacheSyncUpdateValue;
+} RGXFWIF_MMUCACHEDATA;
+
+typedef struct _RGXFWIF_SLCBPCTLDATA_
+{
+ IMG_BOOL bSetBypassed; /*!< Should SLC be/not be bypassed for indicated units? */
+ IMG_UINT32 uiFlags; /*!< Units to enable/disable */
+} RGXFWIF_SLCBPCTLDATA;
+
+#define RGXFWIF_BPDATA_FLAGS_WRITE (1 << 0)
+#define RGXFWIF_BPDATA_FLAGS_CTL (1 << 1)
+#define RGXFWIF_BPDATA_FLAGS_REGS (1 << 2)
+
+typedef struct _RGXFWIF_FWBPDATA_
+{
+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */
+ IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */
+ IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */
+ IMG_UINT32 ui32BPDM; /*!< Breakpoint control */
+ IMG_BOOL bEnable;
+ IMG_UINT32 ui32Flags;
+ IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */
+ IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */
+} RGXFWIF_BPDATA;
+
+#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS 4
+
+typedef struct _RGXFWIF_KCCB_CMD_KICK_DATA_
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */
+ IMG_UINT32 ui32CWoffUpdate; /*!< Client CCB woff update */
+ IMG_UINT32 ui32NumCleanupCtl; /*!< number of CleanupCtl pointers attached */
+ PRGXFWIF_CLEANUP_CTL apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */
+ PRGXFWIF_WORKLOAD_DATA sWorkloadDataFWAddress; /*!< deprecated, kept for compatibility. */
+ IMG_UINT32 ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */
+} RGXFWIF_KCCB_CMD_KICK_DATA;
+
+typedef struct _RGXFWIF_KCCB_CMD_FENCE_DATA_
+{
+ RGXFWIF_DEV_VIRTADDR sSyncObjDevVAddr;
+ IMG_UINT32 uiUpdateVal;
+} RGXFWIF_KCCB_CMD_SYNC_DATA;
+
+typedef enum _RGXFWIF_CLEANUP_TYPE_
+{
+ RGXFWIF_CLEANUP_FWCOMMONCONTEXT, /*!< FW common context cleanup */
+ RGXFWIF_CLEANUP_HWRTDATA, /*!< FW HW RT data cleanup */
+ RGXFWIF_CLEANUP_FREELIST, /*!< FW freelist cleanup */
+ RGXFWIF_CLEANUP_ZSBUFFER, /*!< FW ZS Buffer cleanup */
+ RGXFWIF_CLEANUP_HWFRAMEDATA, /*!< FW RPM/RTU frame data */
+ RGXFWIF_CLEANUP_RPM_FREELIST, /*!< FW RPM freelist */
+} RGXFWIF_CLEANUP_TYPE;
+
+#define RGXFWIF_CLEANUP_RUN (1 << 0) /*!< The requested cleanup command has run on the FW */
+#define RGXFWIF_CLEANUP_BUSY (1 << 1) /*!< The requested resource is busy */
+
+typedef struct _RGXFWIF_CLEANUP_REQUEST_
+{
+ RGXFWIF_CLEANUP_TYPE eCleanupType; /*!< Cleanup type */
+ union {
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< FW common context to cleanup */
+ PRGXFWIF_HWRTDATA psHWRTData; /*!< HW RT to cleanup */
+ PRGXFWIF_FREELIST psFreelist; /*!< Freelist to cleanup */
+ PRGXFWIF_ZSBUFFER psZSBuffer; /*!< ZS Buffer to cleanup */
+ PRGXFWIF_RAY_FRAME_DATA psHWFrameData; /*!< RPM/RTU frame data to cleanup */
+ PRGXFWIF_RPM_FREELIST psRPMFreelist; /*!< RPM Freelist to cleanup */
+ } uCleanupData;
+ RGXFWIF_DEV_VIRTADDR sSyncObjDevVAddr; /*!< sync primitive used to indicate state of the request */
+} RGXFWIF_CLEANUP_REQUEST;
+
+typedef enum _RGXFWIF_POWER_TYPE_
+{
+ RGXFWIF_POW_OFF_REQ = 1,
+ RGXFWIF_POW_FORCED_IDLE_REQ,
+ RGXFWIF_POW_NUMDUST_CHANGE,
+ RGXFWIF_POW_APM_LATENCY_CHANGE
+} RGXFWIF_POWER_TYPE;
+
+typedef enum
+{
+ RGXFWIF_OS_ONLINE = 1,
+ RGXFWIF_OS_OFFLINE
+} RGXFWIF_OS_STATE_CHANGE;
+
+typedef struct _RGXFWIF_POWER_REQUEST_
+{
+ RGXFWIF_POWER_TYPE ePowType; /*!< Type of power request */
+ union
+ {
+ IMG_UINT32 ui32NumOfDusts; /*!< Number of active Dusts */
+ IMG_BOOL bForced; /*!< If the operation is mandatory */
+ IMG_BOOL bCancelForcedIdle; /*!< If the operation is to cancel previously forced idle */
+ IMG_UINT32 ui32ActivePMLatencyms; /*!< Number of milliseconds to set APM latency */
+ } uPoweReqData;
+ IMG_BOOL bNotifyTimeout; /*!< Notify the FW that the host has timed out waiting for a response to an idling.
+ It's placed here as an extension of the struct for backwards compatibility reasons */
+} RGXFWIF_POWER_REQUEST;
+
+typedef struct _RGXFWIF_SLCFLUSHINVALDATA_
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */
+ IMG_BOOL bInval; /*!< Invalidate the cache as well as flushing */
+ IMG_BOOL bDMContext; /*!< The data to flush/invalidate belongs to a specific DM context */
+ RGXFWIF_DM eDM; /*!< DM to flush entries for (only useful when bDMContext == TRUE) */
+} RGXFWIF_SLCFLUSHINVALDATA;
+
+typedef struct _RGXFWIF_STATEFLAG_CTRL_
+{
+ IMG_BOOL bSetNotClear; /*!< Set or clear config flags */
+ IMG_UINT32 ui32Config; /*!< Mask of config flags to change */
+ RGXFWIF_DEV_VIRTADDR sSyncObjDevVAddr; /*!< sync primitive used to return the status */
+} RGXFWIF_STATEFLAG_CTRL;
+
+typedef struct _RGXFWIF_HCS_CTL_
+{
+ IMG_UINT32 ui32HCSDeadlineMS; /* New number of milliseconds C/S is allowed to last */
+} RGXFWIF_HCS_CTL;
+
+typedef struct _RGXFWIF_HWPERF_CTRL_
+{
+ IMG_BOOL bToggle; /*!< Toggle masked bits or apply full mask? */
+ IMG_UINT64 RGXFW_ALIGN ui64Mask; /*!< Mask of events to toggle */
+} RGXFWIF_HWPERF_CTRL;
+
+typedef struct _RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS_
+{
+ IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */
+ PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */
+} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS;
+
+typedef struct _RGXFWIF_CORECLKSPEEDCHANGE_DATA_
+{
+ IMG_UINT32 ui32NewClockSpeed; /*!< New clock speed */
+} RGXFWIF_CORECLKSPEEDCHANGE_DATA;
+
+#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16
+
+typedef struct _RGXFWIF_HWPERF_CTRL_BLKS_
+{
+ IMG_BOOL bEnable;
+ IMG_UINT32 ui32NumBlocks; /*!< Number of block IDs in the array */
+ IMG_UINT16 aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX]; /*!< Array of RGX_HWPERF_CNTBLK_ID values */
+} RGXFWIF_HWPERF_CTRL_BLKS;
+
+
+typedef struct _RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS_
+{
+ IMG_UINT16 ui16CustomBlock;
+ IMG_UINT16 ui16NumCounters;
+ PRGX_HWPERF_SELECT_CUSTOM_CNTRS sCustomCounterIDs;
+} RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS;
+
+typedef struct _RGXFWIF_ZSBUFFER_BACKING_DATA_
+{
+ RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; /*!< ZS-Buffer FW address */
+ IMG_UINT32 bDone; /*!< action backing/unbacking succeeded */
+} RGXFWIF_ZSBUFFER_BACKING_DATA;
+
+typedef struct
+{
+ IMG_UINT32 ui32IsolationPriorityThreshold;
+} RGXFWIF_OSID_ISOLATION_GROUP_DATA;
+
+/*
+ * Flags to pass in the unused bits of the page size grow request
+ */
+#define RGX_FREELIST_GSDATA_RPM_RESTART_EN (1 << 31) /*!< Restart RPM after freelist grow command */
+#define RGX_FREELIST_GSDATA_RPM_PAGECNT_MASK (0x3FFFFFU) /*!< Mask for page count. */
+
+typedef struct _RGXFWIF_FREELIST_GS_DATA_
+{
+ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; /*!< Freelist FW address */
+ IMG_UINT32 ui32DeltaSize; /*!< Amount of the Freelist change */
+ IMG_UINT32 ui32NewSize; /*!< New amount of pages on the freelist */
+} RGXFWIF_FREELIST_GS_DATA;
+
+#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000
+
+typedef struct _RGXFWIF_FREELISTS_RECONSTRUCTION_DATA_
+{
+ IMG_UINT32 ui32FreelistsCount;
+ IMG_UINT32 aui32FreelistIDs[MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS];
+} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA;
+
+
+typedef struct _RGXFWIF_SIGNAL_UPDATE_DATA_
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sDevSignalAddress; /*!< device virtual address of the updated signal */
+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */
+} UNCACHED_ALIGN RGXFWIF_SIGNAL_UPDATE_DATA;
+
+
+typedef struct _RGXFWIF_WRITE_OFFSET_UPDATE_DATA_
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to that may need to be resumed following write offset update */
+} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA;
+
+typedef struct _RGXFWIF_WORKEST_FWCCB_CMD_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex; /*!< Index for return data array */
+ IMG_UINT64 RGXFW_ALIGN ui64CyclesTaken; /*!< The cycles the workload took on the hardware */
+} RGXFWIF_WORKEST_FWCCB_CMD;
+
+
+/*!
+ ******************************************************************************
+ * Proactive DVFS Structures
+ *****************************************************************************/
+#define NUM_OPP_VALUES 16
+
+typedef struct _PDVFS_OPP_
+{
+ IMG_UINT32 ui32Volt; /* V */
+ IMG_UINT32 ui32Freq; /* Hz */
+} UNCACHED_ALIGN PDVFS_OPP;
+
+typedef struct _RGXFWIF_PDVFS_OPP_
+{
+ PDVFS_OPP asOPPValues[NUM_OPP_VALUES];
+ IMG_UINT32 ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP;
+
+typedef struct _RGXFWIF_PDVFS_OPP_DATA_
+{
+ RGXFWIF_PDVFS_OPP sPDFVSOppInfo;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP_DATA;
+
+typedef struct _RGXFWIF_PDVFS_MAX_FREQ_DATA_
+{
+ IMG_UINT32 ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA;
+
+/*!
+ ******************************************************************************
+ * Register configuration structures
+ *****************************************************************************/
+
+#define RGXFWIF_REG_CFG_MAX_SIZE 512
+
+typedef enum _RGXFWIF_REGDATA_CMD_TYPE_
+{
+ RGXFWIF_REGCFG_CMD_ADD = 101,
+ RGXFWIF_REGCFG_CMD_CLEAR = 102,
+ RGXFWIF_REGCFG_CMD_ENABLE = 103,
+ RGXFWIF_REGCFG_CMD_DISABLE = 104
+} RGXFWIF_REGDATA_CMD_TYPE;
+
+typedef struct _RGXFWIF_REGCONFIG_DATA_
+{
+ RGXFWIF_REGDATA_CMD_TYPE eCmdType;
+ RGXFWIF_REG_CFG_TYPE eRegConfigType;
+ RGXFWIF_REG_CFG_REC RGXFW_ALIGN sRegConfig;
+
+} RGXFWIF_REGCONFIG_DATA;
+
+typedef struct _RGXFWIF_REG_CFG_
+{
+ IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[RGXFWIF_REG_CFG_TYPE_ALL];
+ RGXFWIF_REG_CFG_REC RGXFW_ALIGN asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE];
+} UNCACHED_ALIGN RGXFWIF_REG_CFG;
+
+typedef struct _RGXFWIF_REGISTER_GUESTOS_OFFSETS_
+{
+ IMG_UINT32 ui32OSid;
+ RGXFWIF_DEV_VIRTADDR RGXFW_ALIGN sKCCBCtl;
+ RGXFWIF_DEV_VIRTADDR sKCCB;
+ RGXFWIF_DEV_VIRTADDR sFirmwareCCBCtl;
+ RGXFWIF_DEV_VIRTADDR sFirmwareCCB;
+} UNCACHED_ALIGN RGXFWIF_REGISTER_GUESTOS_OFFSETS;
+
+/* OSid Scheduling Priority Change */
+typedef struct _RGXFWIF_OSID_PRIORITY_DATA_
+{
+ IMG_UINT32 ui32OSidNum;
+ IMG_UINT32 ui32Priority;
+} RGXFWIF_OSID_PRIORITY_DATA;
+
+typedef struct
+{
+ IMG_UINT32 ui32OSid;
+ RGXFWIF_OS_STATE_CHANGE eNewOSState;
+} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA;
+
+typedef struct
+{
+ PRGXFWIF_INIT sOSInit;
+} RGXFW_ALIGN RGXFWIF_OS_CONFIG_DATA;
+
+typedef enum _RGXFWIF_KCCB_CMD_TYPE_
+{
+ RGXFWIF_KCCB_CMD_KICK = 101,
+ RGXFWIF_KCCB_CMD_MMUCACHE = 102,
+ RGXFWIF_KCCB_CMD_BP = 104,
+ RGXFWIF_KCCB_CMD_SLCBPCTL = 106, /*!< slc bypass control. Requires sSLCBPCtlData. For validation */
+ RGXFWIF_KCCB_CMD_SYNC = 107, /*!< host sync command. Requires sSyncData. */
+ RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 108, /*!< slc flush and invalidation request */
+ RGXFWIF_KCCB_CMD_CLEANUP = 109, /*!< Requests cleanup of a FW resource (type specified in the command data) */
+ RGXFWIF_KCCB_CMD_POW = 110, /*!< Power request */
+ RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 111, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
+ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 112, /*!< Configure, clear and enable multiple HWPerf blocks */
+ RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 113, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */
+ RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 114, /*!< CORE clock speed change event */
+ RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 115, /*!< Backing for on-demand ZS-Buffer done */
+ RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 116, /*!< Unbacking for on-demand ZS-Buffer done */
+ RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 117, /*!< Freelist Grow done */
+ RGXFWIF_KCCB_CMD_FREELIST_SHRINK_UPDATE = 118, /*!< Freelist Shrink done */
+ RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 119, /*!< Freelists Reconstruction done */
+ RGXFWIF_KCCB_CMD_HEALTH_CHECK = 120, /*!< Health check request */
+ RGXFWIF_KCCB_CMD_REGCONFIG = 121,
+ RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 122, /*!< Configure the custom counters for HWPerf */
+ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT = 123, /*!< Configure, clear and enable multiple HWPerf blocks during the init process*/
+ RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 124, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */
+ RGXFWIF_KCCB_CMD_WORKEST_CLEAR_BUFFER = 125,
+ RGXFWIF_KCCB_CMD_PDVFS_PASS_OPP = 126,
+ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 127,
+ RGXFWIF_KCCB_CMD_PDVFS_REQUEST_REACTIVE_UPDATE = 129,
+ RGXFWIF_KCCB_CMD_DOPPLER_MEMORY_GROW = 130,
+
+ RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE = 131, /*!< Informs the firmware that the host has performed a signal update */
+
+ RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 132, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */
+
+ RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 133, /*!< Changes the relative scheduling priority for a particular OSid. It can only be serviced for the Host DDK */
+ RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 134, /*!< Set or clear firmware state flags */
+ RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE = 135, /*!< Set hard context switching deadline */
+ RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE = 136, /*!< Changes the configuration of (or even disables) the OSid Isolation scheduling group. It can only be serviced for the Host DDK */
+ RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 137, /*!< Informs the FW that a Guest OS has come online / offline. It can only be serviced for the Host DDK */
+ RGXFWIF_KCCB_CMD_OS_CFG_INIT = 138, /*!< First kick of the DDK which initializes all OS specific data on the FW */
+} RGXFWIF_KCCB_CMD_TYPE;
+
+/* Kernel CCB command packet */
+typedef struct _RGXFWIF_KCCB_CMD_
+{
+ RGXFWIF_KCCB_CMD_TYPE eCmdType; /*!< Command type */
+ RGXFWIF_DM eDM; /*!< DM associated with the command */
+
+ union
+ {
+ RGXFWIF_KCCB_CMD_KICK_DATA sCmdKickData; /*!< Data for Kick command */
+ RGXFWIF_MMUCACHEDATA sMMUCacheData; /*!< Data for MMUCACHE command */
+ RGXFWIF_BPDATA sBPData; /*!< Data for Breakpoint Commands */
+ RGXFWIF_SLCBPCTLDATA sSLCBPCtlData; /*!< Data for SLC Bypass Control */
+ RGXFWIF_KCCB_CMD_SYNC_DATA sSyncData; /*!< Data for host sync commands */
+ RGXFWIF_SLCFLUSHINVALDATA sSLCFlushInvalData; /*!< Data for SLC Flush/Inval commands */
+ RGXFWIF_CLEANUP_REQUEST sCleanupData; /*!< Data for cleanup commands */
+ RGXFWIF_POWER_REQUEST sPowData; /*!< Data for power request commands */
+ RGXFWIF_HWPERF_CTRL sHWPerfCtrl; /*!< Data for HWPerf control command */
+ RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS sHWPerfCfgEnableBlks; /*!< Data for HWPerf configure, clear and enable performance counter block command */
+ RGXFWIF_HWPERF_CTRL_BLKS sHWPerfCtrlBlks; /*!< Data for HWPerf enable or disable performance counter block commands */
+ RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS sHWPerfSelectCstmCntrs; /*!< Data for HWPerf configure the custom counters to read */
+ RGXFWIF_CORECLKSPEEDCHANGE_DATA sCORECLKSPEEDCHANGEData;/*!< Data for CORE clock speed change */
+ RGXFWIF_ZSBUFFER_BACKING_DATA sZSBufferBackingData; /*!< Feedback for Z/S Buffer backing/unbacking */
+ RGXFWIF_FREELIST_GS_DATA sFreeListGSData; /*!< Feedback for Freelist grow/shrink */
+ RGXFWIF_FREELISTS_RECONSTRUCTION_DATA sFreeListsReconstructionData; /*!< Feedback for Freelists reconstruction */
+ RGXFWIF_REGCONFIG_DATA sRegConfigData; /*!< Data for custom register configuration */
+ RGXFWIF_REGISTER_GUESTOS_OFFSETS sRegisterGuestOsOffests;/*!< Data for registering a guestOS with the FW */
+ RGXFWIF_SIGNAL_UPDATE_DATA sSignalUpdateData; /*!< Data for informing the FW about the signal update */
+ RGXFWIF_WRITE_OFFSET_UPDATE_DATA sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */
+ RGXFWIF_PDVFS_OPP_DATA sPDVFSOppData;
+ RGXFWIF_PDVFS_MAX_FREQ_DATA sPDVFSMaxFreqData;
+ RGXFWIF_OSID_PRIORITY_DATA sCmdOSidPriorityData; /*!< Data for updating an OSid priority */
+ RGXFWIF_STATEFLAG_CTRL sStateFlagCtrl; /*!< Data for StateFlag control command */
+ RGXFWIF_HCS_CTL sHCSCtrl; /*!< Data for Hard Context Switching */
+ RGXFWIF_OSID_ISOLATION_GROUP_DATA sCmdOSidIsolationData; /*!< Data for updating the OSid isolation group */
+ RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */
+ RGXFWIF_OS_CONFIG_DATA sCmdOSConfigData; /*!< Data for the OS-specific initialization part of the FW */
+ } UNCACHED_ALIGN uCmdData;
+} UNCACHED_ALIGN RGXFWIF_KCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD);
+
+/*!
+ ******************************************************************************
+ * Firmware CCB command structure for RGX
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA_
+{
+ IMG_UINT32 ui32ZSBufferID;
+ IMG_BOOL bPopulate;
+} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA_
+{
+ IMG_UINT32 ui32FreelistID;
+} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA_
+{
+ IMG_UINT32 ui32FreelistsCount;
+ IMG_UINT32 ui32HwrCounter;
+ IMG_UINT32 aui32FreelistIDs[MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS];
+} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA_
+{
+ IMG_UINT32 ui32ServerCommonContextID; /*!< Context affected by the reset */
+ RGXFWIF_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */
+ IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */
+ IMG_BOOL bPageFault; /*!< Did a page fault happen */
+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */
+} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA;
+
+typedef enum _RGXFWIF_FWCCB_CMD_TYPE_
+{
+ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101, /*!< Requests ZSBuffer to be backed with physical pages */
+ RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102, /*!< Requests ZSBuffer to be unbacked */
+ RGXFWIF_FWCCB_CMD_FREELIST_GROW = 103, /*!< Requests an on-demand freelist grow/shrink */
+ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = 104, /*!< Requests freelists reconstruction */
+ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = 105, /*!< Notifies host of a HWR event on a context */
+ RGXFWIF_FWCCB_CMD_DEBUG_DUMP = 106, /*!< Requests an on-demand debug dump */
+ RGXFWIF_FWCCB_CMD_UPDATE_STATS = 107, /*!< Requests an on-demand update on process stats */
+
+ RGXFWIF_FWCCB_CMD_DOPPLER_MEMORY_GROW = 108, /*!< Requests an on-demand RPM freelist grow */
+ RGXFWIF_FWCCB_CMD_WORKLOAD_FINISHED = 109, /*!< Supplies data for the workload matching algorithm */
+ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 110,
+ RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM = 111,
+} RGXFWIF_FWCCB_CMD_TYPE;
+
+typedef enum
+{
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_SH_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumSHStores stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE;
+
+
+/* Firmware CCB command packet */
+
+typedef struct
+{
+ RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE eElementToUpdate; /*!< Element to update */
+ IMG_PID pidOwner; /*!< The pid of the process whose stats are being updated */
+ IMG_INT32 i32AdjustmentValue; /*!< Adjustment to be made to the statistic */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA;
+/*!
+ ******************************************************************************
+ * Workload Estimation Structures
+ *****************************************************************************/
+
+typedef struct
+{
+ IMG_UINT64 RGXFW_ALIGN /*uintptr_t DEVMEM_MEMDESC*/ ui64WorkloadDataMemdesc;
+} RGXFWIF_FWCCB_CMD_WORKLOAD_FINISHED_DATA;
+
+/*!
+ ******************************************************************************
+ * Proactive DVFS Structures
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA_
+{
+ IMG_UINT32 ui32CoreClkRate;
+} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM_DATA_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64MemDesc;
+} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_
+{
+ RGXFWIF_FWCCB_CMD_TYPE eCmdType; /*!< Command type */
+ union
+ {
+ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA sCmdZSBufferBacking; /*!< Data for Z/S-Buffer on-demand (un)backing*/
+ RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA sCmdFreeListGS; /*!< Data for on-demand freelist grow/shrink */
+ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA sCmdFreeListsReconstruction; /*!< Data for freelists reconstruction */
+ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA sCmdContextResetNotification; /*!< Data for context reset notification */
+ RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */
+ RGXFWIF_FWCCB_CMD_WORKLOAD_FINISHED_DATA sCmdWorkEstWorkloadFinished; /*!< Data for workload matching */
+ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange;
+ RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM_DATA sCmdPDVFSFreeMem;
+ } RGXFW_ALIGN uCmdData;
+} RGXFW_ALIGN RGXFWIF_FWCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD);
+
+/*!
+ ******************************************************************************
+ * Signature and Checksums Buffer
+ *****************************************************************************/
+typedef struct _RGXFWIF_SIGBUF_CTL_
+{
+ PRGXFWIF_SIGBUFFER sBuffer; /*!< Ptr to Signature Buffer memory */
+ IMG_UINT32 ui32LeftSizeInRegs; /*!< Amount of space left for storing regs in the buffer */
+} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL;
+
+/*!
+ ******************************************************************************
+ * Updated configuration post FW data init.
+ *****************************************************************************/
+typedef struct _RGXFWIF_RUNTIME_CFG_
+{
+ IMG_UINT32 ui32ActivePMLatencyms; /* APM latency in ms before signalling IDLE to the host */
+ IMG_BOOL bActivePMLatencyPersistant; /* If set, APM latency does not reset to system default each GPU power transition */
+ IMG_UINT32 ui32CoreClockSpeed; /* Core clock speed, currently only used to calculate timer ticks */
+ IMG_UINT32 ui32DefaultDustsNumInit; /* Last number of dusts change requested by the host */
+ PRGXFWIF_HWPERFBUF sHWPerfBuf; /* On-demand allocated HWPerf buffer address, to be passed to the FW */
+} RGXFWIF_RUNTIME_CFG;
+
+/*!
+ *****************************************************************************
+ * Control data for RGX
+ *****************************************************************************/
+
+#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999)
+
+#if defined(PDUMP)
+
+#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32
+
+typedef enum _RGXFWIF_PID_FILTER_MODE_
+{
+ RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+ RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT
+} RGXFWIF_PID_FILTER_MODE;
+
+typedef struct _RGXFWIF_PID_FILTER_ITEM_
+{
+ IMG_PID uiPID;
+ IMG_UINT32 ui32OSID;
+} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM;
+
+typedef struct _RGXFWIF_PID_FILTER_
+{
+ RGXFWIF_PID_FILTER_MODE eMode;
+ /* each process in the filter list is specified by a PID and OS ID pair.
+ * each PID and OS pair is an item in the items array (asItems).
+ * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries
+ * then it must be terminated by an item with pid of zero.
+ */
+ RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS];
+} RGXFW_ALIGN RGXFWIF_PID_FILTER;
+#endif
+
+typedef struct _RGXFWIF_INIT_
+{
+ IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr;
+
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sUSCExecBase;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sResultDumpBase;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sDPXControlStreamBase;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sRTUHeapBase;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sTDMTPUYUVCeoffsHeapBase;
+
+ IMG_BOOL bFirstTA;
+ IMG_BOOL bFirstRender;
+ IMG_BOOL bFrameworkAfterInit;
+ IMG_BOOL bDisableFilterHWPerfCustomCounter;
+ RGXFWIF_DEV_VIRTADDR sPowerSync;
+ IMG_UINT32 ui32FilterFlags;
+
+ /* Kernel CCB */
+ PRGXFWIF_CCB_CTL psKernelCCBCtl;
+ PRGXFWIF_CCB psKernelCCB;
+
+ /* Firmware CCB */
+ PRGXFWIF_CCB_CTL psFirmwareCCBCtl;
+ PRGXFWIF_CCB psFirmwareCCB;
+
+ RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_DEFAULT_MAX];
+
+ IMG_BOOL bEnableLogging;
+ IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */
+ IMG_UINT32 ui32BreakpointTemps;
+ IMG_UINT32 ui32BreakpointShareds;
+ IMG_UINT32 ui32HWRDebugDumpLimit;
+
+ RGXFWIF_BIFTILINGMODE eBifTilingMode;
+ struct
+ {
+ IMG_UINT64 uiBase;
+ IMG_UINT64 uiLen;
+ IMG_UINT64 uiXStride;
+ } RGXFW_ALIGN sBifTilingCfg[RGXFWIF_NUM_BIF_TILING_CONFIGS];
+
+ PRGXFWIF_RUNTIME_CFG sRuntimeCfg;
+
+ PRGXFWIF_TRACEBUF sTraceBufCtl;
+ IMG_UINT64 RGXFW_ALIGN ui64HWPerfFilter;
+
+ PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl;
+ PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl;
+ PRGXFWIF_REG_CFG sRegCfg;
+ PRGXFWIF_HWPERF_CTL sHWPerfCtl;
+
+ RGXFWIF_DEV_VIRTADDR sAlignChecks;
+
+ /* Core clock speed at FW boot time */
+ IMG_UINT32 ui32InitialCoreClockSpeed;
+
+ /* APM latency in ms before signalling IDLE to the host */
+ IMG_UINT32 ui32ActivePMLatencyms;
+
+ /* Flag to be set by the Firmware after successful start */
+ IMG_BOOL bFirmwareStarted;
+
+ IMG_UINT32 ui32MarkerVal;
+
+ IMG_UINT32 ui32FirmwareStartedTimeStamp;
+
+ IMG_UINT32 ui32JonesDisableMask;
+
+ /* Compatibility checks to be populated by the Firmware */
+ RGXFWIF_COMPCHECKS sRGXCompChecks;
+
+ RGXFWIF_DMA_ADDR sCorememDataStore;
+
+ FW_PERF_CONF eFirmwarePerf;
+
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sSLC3FenceDevVAddr;
+
+ RGXFWIF_DEV_VIRTADDR sT1Stack;
+
+ RGXFWIF_PDVFS_OPP sPDVFSOPPInfo;
+
+ /**
+ * FW Pointer to memory containing core clock rate in Hz.
+ * Firmware (PDVFS) updates the memory when running on non primary FW thread
+ * to communicate to host driver.
+ */
+ PRGXFWIF_CORE_CLK_RATE sCoreClockRate;
+
+#if defined(PDUMP)
+ RGXFWIF_PID_FILTER sPIDFilter;
+#endif
+
+ /* Workload Estimation Firmware CCB */
+ PRGXFWIF_CCB_CTL psWorkEstFirmwareCCBCtl;
+ PRGXFWIF_CCB psWorkEstFirmwareCCB;
+
+} UNCACHED_ALIGN RGXFWIF_INIT;
+
+
+/*!
+ ******************************************************************************
+ * Client CCB commands which are only required by the kernel
+ *****************************************************************************/
+typedef struct _RGXFWIF_CMD_PRIORITY_
+{
+ IMG_UINT32 ui32Priority;
+} RGXFWIF_CMD_PRIORITY;
+
+#endif /* __RGX_FWIF_KM_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_km.h)
+******************************************************************************/
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File rgx_fwif_resetframework.h
+@Title Post-reset work-around framework FW interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_RGX_FWIF_RESETFRAMEWORK_H)
+#define _RGX_FWIF_RESETFRAMEWORK_H
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+
+typedef struct _RGXFWIF_RF_REGISTERS_
+{
+#if RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT == 2
+ IMG_UINT64 uCDMReg_CDM_CB_QUEUE;
+ IMG_UINT64 uCDMReg_CDM_CB_BASE;
+ IMG_UINT64 uCDMReg_CDM_CB;
+#else
+ IMG_UINT64 uCDMReg_CDM_CTRL_STREAM_BASE;
+#endif
+} RGXFWIF_RF_REGISTERS;
+
+#define RGXFWIF_RF_FLAG_ENABLE 0x00000001 /*!< enables the reset framework in the firmware */
+
+typedef struct _RGXFWIF_RF_CMD_
+{
+ IMG_UINT32 ui32Flags;
+
+ /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */
+ RGXFWIF_RF_REGISTERS RGXFW_ALIGN sFWRegisters;
+
+} RGXFWIF_RF_CMD;
+
+/* to opaquely allocate and copy in the kernel */
+#define RGXFWIF_RF_CMD_SIZE sizeof(RGXFWIF_RF_CMD)
+
+#endif /* _RGX_FWIF_RESETFRAMEWORK_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File rgx_fwif_sf.h
+@Title RGX firmware interface string format specifiers
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the rgx firmware logging messages. The following
+ list are the messages the firmware prints. Changing anything
+ but the first column or spelling mistakes in the strings will
+ break compatibility with log files created with older/newer
+ firmware versions.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_FWIF_SF_H
+#define RGX_FWIF_SF_H
+
+/*****************************************************************************
+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you
+ * WILL BREAK fw tracing message compatibility with previous
+ * fw versions. Only add new ones, if so required.
+ ****************************************************************************/
+/* Available log groups */
+#define RGXFW_LOG_SFGROUPLIST \
+ X(RGXFW_GROUP_NULL,NULL) \
+ X(RGXFW_GROUP_MAIN,MAIN) \
+ X(RGXFW_GROUP_CLEANUP,CLEANUP) \
+ X(RGXFW_GROUP_CSW,CSW) \
+ X(RGXFW_GROUP_PM, PM) \
+ X(RGXFW_GROUP_RTD,RTD) \
+ X(RGXFW_GROUP_SPM,SPM) \
+ X(RGXFW_GROUP_MTS,MTS) \
+ X(RGXFW_GROUP_BIF,BIF) \
+ X(RGXFW_GROUP_DATA,DATA) \
+ X(RGXFW_GROUP_POW,POW) \
+ X(RGXFW_GROUP_HWR,HWR) \
+ X(RGXFW_GROUP_HWP,HWP) \
+ X(RGXFW_GROUP_RPM,RPM) \
+ X(RGXFW_GROUP_DMA,DMA) \
+ X(RGXFW_GROUP_DBG,DBG)
+
+enum RGXFW_LOG_SFGROUPS {
+#define X(A,B) A,
+ RGXFW_LOG_SFGROUPLIST
+#undef X
+};
+
+/* Table of String Format specifiers, the group they belong and the number of
+ * arguments each expects. Xmacro styled macros are used to generate what is
+ * needed without requiring hand editing.
+ *
+ * id : id within a group
+ * gid : group id
+ * Sym name : name of enumerations used to identify message strings
+ * String : Actual string
+ * #args : number of arguments the string format requires
+ */
+#define RGXFW_LOG_SFIDLIST \
+/*id, gid, id name, string, # arguments */ \
+X( 0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string\n", 0) \
+\
+X( 1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8X @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d\n", 6) \
+X( 2, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%d, HWRTData1State=%d\n", 2) \
+X( 3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8X @ %d, CSW resume:%d, prio: %d\n", 4) \
+X( 4, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished\n", 0) \
+X( 5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X( 6, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished\n", 0) \
+X( 7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8X @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d\n", 7) \
+X( 8, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished\n", 0) \
+X( 9, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render\n", 0) \
+X(10, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render\n", 0) \
+X(11, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x\n", 2) \
+X(12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8X @ %d, prio:%d\n", 3) \
+X(13, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished\n", 0) \
+X(14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8X\n", 3) \
+X(16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx %08.8X @ %d\n", 2) \
+X(17, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [%08.8X] is %08.8X requires %08.8X\n", 3) \
+X(18, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded\n", 0) \
+X(19, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [%08.8X] is %08.8X requires >= %08.8X\n", 3) \
+X(20, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx %08.8X\n", 1) \
+X(21, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires >= ????????, [%08.8X] is ???????? requires %08.8X\n", 4) \
+X(22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx %08.8X @ %d\n", 2) \
+X(23, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [%08.8X] = %08.8X\n", 2) \
+X(24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of: \n", 1) \
+X(25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: %08.8X\n", 2) \
+X(26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: %08.8X, MemCtx: %08.8X\n", 3) \
+X(27, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW\n", 0) \
+X(28, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.\n", 0) \
+X(29, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u\n", 1) \
+X(30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = %X, fw = %X\n", 3) \
+X(31, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered\n", 0) \
+X(32, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler\n", 2) \
+X(33, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8X\n", 1) \
+X(34, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_STORE, "Store breakpoint state\n", 0) \
+X(35, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_UNSET, "Unsetting BP Registers\n", 0) \
+X(36, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NONZERO_RT, "Active RTs expected to be zero, actually %u\n", 1) \
+X(37, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_PRESENT, "RTC present, %u active render targets\n", 1) \
+X(38, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER, "Estimated Power 0x%x\n", 1) \
+X(39, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u\n", 1) \
+X(40, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u\n", 2) \
+X(41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK, "HWR sizes check %d failed: addresses = %d, sizes = %d\n", 3) \
+X(42, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%X\n", 1) \
+X(43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8X\n", 2) \
+X(44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d\n", 2) \
+X(45, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down\n", 0) \
+X(46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)\n", 2) \
+X(47, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)\n", 0) \
+X(48, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)\n", 0) \
+X(49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: %08.8X \n", 2) \
+X(50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d\n", 7) \
+X(51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x\n", 3) \
+X(52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK\n", 1) \
+X(53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty\n", 1) \
+X(54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08X%08X, VCE=0x%08x%08X, ALIST=0x%08x%08X, IsTA=%d\n", 8) \
+X(55, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick\n", 0) \
+X(56, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device\n", 1) \
+X(57, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8X DM%u\n", 2) \
+X(58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X(59, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED, "RDM finished on context %u\n", 1) \
+X(60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X(61, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED, "SHG finished\n", 0) \
+X(62, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED, "FBA finished on context %u\n", 1) \
+X(63, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed\n", 0) \
+X(64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%d start\n", 1) \
+X(65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete\n", 1) \
+X(66, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE, "FC%u cCCB Woff update = %u\n", 2) \
+X(67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8X @ %d, prio: %d, Frame Context: %d\n", 4) \
+X(68, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIDEKICK_INIT, "Sidekick init\n", 0) \
+X(69, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RD_INIT, "Rascal+Dusts init (# dusts mask: %X)\n", 1) \
+X(70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d\n", 3) \
+X(71, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x\n", 3) \
+X(72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)\n", 1) \
+X(73, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.\n", 0) \
+X(74, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU has locked up (see HWR logs for more info)\n", 0) \
+X(75, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)\n", 0) \
+X(76, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll (see HWR logs)\n", 0) \
+X(77, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DOPPLER_OOM, "Doppler out of memory event for FC %u\n", 1) \
+X(78, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires >= %08.8X\n", 3) \
+X(79, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires %08.8X\n", 3) \
+X(80, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [%08.8X]\n", 1) \
+X(81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx %08.8X @ %d\n", 2) \
+X(82, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [%08.8X] = %08.8X\n", 2) \
+X(83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8X @ %d\n", 2) \
+X(84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM, "RPM Out of memory! Context 0x%08x, SH requestor %d\n", 2) \
+X(85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD, "Discard RTU due to RPM abort: FWCtx 0x%08.8X @ %d, prio: %d, Frame Context: %d\n", 4) \
+X(86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)\n", 4) \
+X(87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)\n", 4) \
+X(88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)\n", 4) \
+X(89, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 3) \
+X(90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 3) \
+X(91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d\n", 1) \
+X(92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d\n", 1) \
+X(93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz\n", 1) \
+X(94, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8X, prio: %d, queue: 0x%08X%08X (Roff = %u, Woff = %u, Size = %u)\n", 7) \
+X(95, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE, "Signal check failed, Required Data: 0x%X, Address: 0x%08x%08x\n", 3) \
+X(96, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x\n", 5) \
+X(97, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNALED, "Signalled the previously waiting FWCtx: 0x%08.8X, OSId: %u, Signal Address: 0x%08x%08x\n", 4) \
+X(98, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED_DEPRECATED, "Compute stalled\n", 0) \
+X(99, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED, "Compute stalled (Roff = %u, Woff = %u, Size = %u)\n", 3) \
+X(100, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resumed (Roff = %u, Woff = %u, Size = %u)\n", 3) \
+X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x\n", 4) \
+X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x\n", 4) \
+X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM, "DM: %u signal check failed\n", 1) \
+X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8X @ %d, prio:%d\n", 3) \
+X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished\n", 0) \
+X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]: 0x%08X 0x%08X)\n", 4) \
+X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT, "BRN 54141 HIT\n", 0) \
+X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA, "BRN 54141 Dummy TA kicked\n", 0) \
+X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA, "BRN 54141 resume TA\n", 0) \
+X(110, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DOUBLE_HIT, "BRN 54141 double hit after applying WA\n", 0) \
+X(111, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE, "BRN 54141 Dummy TA VDM base address: 0x%08x%08x\n", 2) \
+X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%X, Current Data: 0x%X, Address: 0x%08x%08x\n", 4) \
+X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled (Roff = %u, Woff = %u)\n", 2) \
+X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx %08.8X\n", 1) \
+X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u \n", 3) \
+X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed\n", 0) \
+X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8X @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 11) \
+X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8X @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 10) \
+X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8X @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 8) \
+X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, ext:0x%08X, int:0x%08X)\n", 6) \
+X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx 0x%08.8X @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 8) \
+X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.\n", 1) \
+X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.\n", 1) \
+X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.\n", 1) \
+X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed\n", 1) \
+X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 2) \
+X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)\n", 3) \
+X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).\n", 1) \
+X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET, "HCS changed to %d ms\n", 1) \
+X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)\n", 4) \
+X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, " Phantom %d: USCTiles=%d\n", 2) \
+X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF, "Isolation grouping is disabled \n", 0) \
+X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF, "Isolation group configured with a priority threshold of %d\n", 1) \
+X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE, "OS %d has come online \n", 1) \
+X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE, "OS %d has gone offline \n", 1) \
+X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8X, OSId: %u, Signal Address: 0x%08x%08x\n", 4) \
+X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: FWCtx 0x%08.8X, prio: %d, queue: 0x%08X%08X (Roff = %u, Woff = %u, Size = %u)\n", 7) \
+X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u becomes 0, Woff = %u, Size = %u)\n", 6) \
+X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u, StreamStartOffset = %u)\n", 5) \
+X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIDEKICK_DEINIT, "Sidekick deinit\n", 0) \
+X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RD_DEINIT, "Rascal+Dusts deinit\n", 0) \
+X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x\n", 2) \
+X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store\n", 0) \
+\
+X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d\n", 2) \
+X( 2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u\n", 1) \
+X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%X\n", 3) \
+X( 4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u\n", 1) \
+X( 5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL, "Kick MTS Bg task DM=All\n", 0) \
+X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d \n", 1) \
+X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d\n", 2) \
+X( 8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x\n", 2) \
+X( 9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = %x, cmd = %x\n", 3) \
+X(10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x\n", 3) \
+X(11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE, "Ready queue debug DM = %u, celltype = %d, OSid = %u\n", 3) \
+X(12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task DM = %u, counted = %d, OSid = %u\n", 3 ) \
+X(13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u\n", 1) \
+X(14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.\n", 0) \
+\
+X( 1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned\n", 1) \
+X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d\n", 3) \
+X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "HWRTData [0x%08x] for DM=%d, received cleanup request\n", 2) \
+X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d\n", 3) \
+X( 5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy\n", 2) \
+X( 6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned\n", 2) \
+X( 7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned\n", 1) \
+X( 8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned\n", 1) \
+X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d\n", 3) \
+X(10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d\n", 4) \
+X(11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request\n", 2) \
+X(12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d\n", 3) \
+X(13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d\n", 4) \
+X(14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned\n", 2) \
+\
+X( 1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8X needs resume\n", 1) \
+X( 2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME, "*** CDM FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X( 3, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SHARED, "CDM FWCtx shared alloc size load 0x%X\n", 1) \
+X( 4, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_COMPLETE, "*** CDM FWCtx store complete\n", 0) \
+X( 5, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_START, "*** CDM FWCtx store start\n", 0) \
+X( 6, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SOFT_RESET, "CDM Soft Reset\n", 0) \
+X( 7, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_NEEDS_RESUME, "3D FWCtx 0x%08.8X needs resume\n", 1) \
+X( 8, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME, "*** 3D FWCtx 0x%08.8X resume\n", 1) \
+X( 9, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_COMPLETE, "*** 3D context store complete\n", 0) \
+X(10, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED, "3D context store pipe state: 0x%08.8X 0x%08.8X 0x%08.8X\n", 3) \
+X(11, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START, "*** 3D context store start\n", 0) \
+X(12, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_TQ_RESUME, "*** 3D TQ FWCtx 0x%08.8X resume\n", 1) \
+X(13, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_NEEDS_RESUME, "TA FWCtx 0x%08.8X needs resume\n", 1) \
+X(14, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X(15, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%X, load 0x%X\n", 2) \
+X(16, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete\n", 0) \
+X(17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start\n", 0) \
+X(18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d\n", 3) \
+X(19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u\n", 2) \
+X(20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe%d state: 0x%08.8X\n", 2) \
+X(21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe%d state: 0x%08.8X\n", 2) \
+X(22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME, "SHG FWCtx 0x%08.8X needs resume\n", 1) \
+X(23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME, "*** SHG FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X(24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED, "SHG context shared alloc size store 0x%X, load 0x%X\n", 2) \
+X(25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE, "*** SHG context store complete\n", 0) \
+X(26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START, "*** SHG context store start\n", 0) \
+X(27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d\n", 1) \
+X(28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.\n", 0) \
+X(29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X, shader state %u\n", 4) \
+X(30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)\n", 2) \
+X(31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT, "TA context store hit BRN 52563: vertex store tasks outstanding\n", 0) \
+X(32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)\n", 1) \
+X(33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED, "TA context store deferred due to BRN 54141.", 0) \
+X(34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u\n", 7) \
+X(35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start\n", 0) \
+X(36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete\n", 0) \
+X(37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME, "TDM context needs resume, header [%08.8X, %08.8X]\n", 2) \
+\
+X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE, "Activate MemCtx=0x%08x BIFreq=%d secure=%d\n", 3) \
+X( 2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x \n", 1) \
+X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC, "Alloc PC reg %d\n", 1) \
+X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_GRAB, "Grab reg %d refcount now %d\n", 2) \
+X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_UNGRAB, "Ungrab reg %d refcount now %d\n", 2) \
+X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x\n", 6) \
+X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST, "Trust enabled:%d, for BIFreq=%d\n", 2) \
+X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG, "BIF Tiling Cfg %d base %08x%08x len %08x%08x enable %d stride %d --> %08x%08x\n", 9) \
+X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now %08x %08x\n", 4) \
+X(10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context %d, Register's contents are now %04x\n", 3) \
+X(11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = %x, Reg index = %u, Bitshift index = %u, Val = %08x%08x\n", 7) \
+\
+X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)\n", 10) \
+X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d\n", 8) \
+X( 3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)\n", 14) \
+X( 4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)\n", 14) \
+X( 5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x\n", 5) \
+X( 6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED, "Grow for freelist ID=0x%08x denied by host\n", 1) \
+X( 7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x\n", 5) \
+X( 8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed\n", 1) \
+X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d\n", 2) \
+X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: %08x, status(1:success, 0:fail): %d\n", 2)\
+X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x\n", 1) \
+\
+X( 1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x\n", 3) \
+X( 2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x\n", 3) \
+X( 3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW, "RPM request failed. Waiting for freelist grow.\n", 0) \
+X( 4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT, "RPM request failed. Aborting the current frame.\n", 0) \
+X( 5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW, "RPM waiting for pending grow on freelist 0x%08x\n", 1) \
+X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW, "Request freelist grow [0x%08x] current pages %d, grow size %d\n", 3) \
+X( 7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD, "Freelist load: SHF = 0x%08x, SHG = 0x%08x\n", 2) \
+X( 8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08X.%08X\n", 2) \
+X( 9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08X.%08X\n", 2) \
+X(10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)\n", 5) \
+X(11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART, "Restarting SHG\n", 0) \
+X(12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED, "Grow failed, aborting the current frame.\n", 0) \
+X(13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE, "RPM abort complete on HWFrameData [0x%08x].\n", 1) \
+X(14, RGXFW_GROUP_RPM, RGXFW_SF_RPM_CLEANUP_NEEDS_ABORT, "RPM freelist cleanup [0x%08x] requires abort to proceed.\n", 1) \
+X(15, RGXFW_GROUP_RPM, RGXFW_SF_RPM_RPM_PT, "RPM page table base register: 0x%08X.%08X\n", 2) \
+X(16, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_ABORT, "Issuing RPM abort.\n", 0) \
+X(17, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_TOGGLE_CHECK_FULL, "RPM OOM received but toggle bits indicate free pages available\n", 0) \
+X(18, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_HW_TIMEOUT, "RPM hardware timeout. Unable to process OOM event.\n", 0) \
+X(19, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_LOAD, "SHF FL (0x%08x) load, FPL: 0x%08X.%08X, roff: 0x%08X, woff: 0x%08X\n", 5) \
+X(20, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_LOAD, "SHG FL (0x%08x) load, FPL: 0x%08X.%08X, roff: 0x%08X, woff: 0x%08X\n", 5) \
+X(21, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_STORE, "SHF FL (0x%08x) store, roff: 0x%08X, woff: 0x%08X\n", 3) \
+X(22, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_STORE, "SHG FL (0x%08x) store, roff: 0x%08X, woff: 0x%08X\n", 3) \
+\
+X( 1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u\n", 2) \
+X( 2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u\n", 2) \
+X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to %X, FL different between TA/3D: local: %d, global: %d, mmu: %d\n", 4) \
+X( 4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D, "Loading VFP table 0x%08x%08x for 3D\n", 2) \
+X( 5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA, "Loading VFP table 0x%08x%08x for TA\n", 2) \
+X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 10) \
+X( 7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store\n", 0) \
+X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No\n", 2) \
+X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No\n", 2) \
+X(10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes \n", 3) \
+X(11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes \n", 3) \
+X(12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x\n", 5) \
+X(13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 10) \
+X(14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u\n", 2) \
+X(15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u\n", 2) \
+X(16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB, "Store Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG, "Freelist 0x%X RESET!!!!!!!!\n", 1) \
+X(19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2, "Freelist 0x%X stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 5) \
+X(20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%X type: %d (0:local,1:global,2:mmu) on HW context %u\n", 3) \
+X(21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)\n", 1) \
+X(22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed\n", 0) \
+X(23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d\n", 3) \
+X(24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)\n", 3) \
+X(25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)\n", 8) \
+X(26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED, "3D RTData 0x%08x loaded on HW context %u\n", 2) \
+X(27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)\n", 4) \
+X(28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=%d, HWRTData1State=%d\n", 2) \
+\
+X( 1, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_DEPRECATED, "Force Z-Load for partial render\n", 0) \
+X( 2, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_DEPRECATED, "Force Z-Store for partial render\n", 0) \
+X( 3, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_LOCAL, "3D MemFree: Local FL 0x%08x\n", 1) \
+X( 4, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_MMU, "3D MemFree: MMU FL 0x%08x\n", 1) \
+X( 5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL, "3D MemFree: Global FL 0x%08x\n", 1) \
+X( 6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [%08.8X] is %08.8X requires %08.8X, HardwareSync Fence [%08.8X] is %08.8X requires %08.8X\n", 6) \
+X( 7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x\n", 3) \
+X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x\n", 5) \
+X( 9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED, "Partial render avoided\n", 0) \
+X(10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED, "Partial render discarded\n", 0) \
+X(11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished\n", 0) \
+X(12, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DBG, "SPM Owner = 3D-BG\n", 0) \
+X(13, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DIRQ, "SPM Owner = 3D-IRQ\n", 0) \
+X(14, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_NONE, "SPM Owner = NONE\n", 0) \
+X(15, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TABG, "SPM Owner = TA-BG\n", 0) \
+X(16, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TAIRQ, "SPM Owner = TA-IRQ\n", 0) \
+X(17, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_ADDRESS, "ZStore address 0x%08x%08x\n", 2) \
+X(18, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SSTORE_ADDRESS, "SStore address 0x%08x%08x\n", 2) \
+X(19, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_ADDRESS, "ZLoad address 0x%08x%08x\n", 2) \
+X(20, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SLOAD_ADDRESS, "SLoad address 0x%08x%08x\n", 2) \
+X(21, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_ZSBUFFER, "No deferred ZS Buffer provided\n", 0) \
+X(22, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POPULATED, "ZS Buffer successfully populated (ID=0x%08x)\n", 1) \
+X(23, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POP_UNNEEDED, "No need to populate ZS Buffer (ID=0x%08x)\n", 1) \
+X(24, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOPULATED, "ZS Buffer successfully unpopulated (ID=0x%08x)\n", 1) \
+X(25, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOP_UNNEEDED, "No need to unpopulate ZS Buffer (ID=0x%08x)\n", 1) \
+X(26, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST, "Send ZS-Buffer backing request to host (ID=0x%08x)\n", 1) \
+X(27, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST, "Send ZS-Buffer unbacking request to host (ID=0x%08x)\n", 1) \
+X(28, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_PENDING, "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(29, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_PENDING, "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(30, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZBUFFER_NOT_READY, "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)\n", 1) \
+X(31, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SBUFFER_NOT_READY, "Partial Render waiting for SBuffer to be backed (ID=0x%08x)\n", 1) \
+X(32, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_NONE, "SPM State = none\n", 0) \
+X(33, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_BLOCKED, "SPM State = PR blocked\n", 0) \
+X(34, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_GROW, "SPM State = wait for grow\n", 0) \
+X(35, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_HW, "SPM State = wait for HW\n", 0) \
+X(36, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running\n", 0) \
+X(37, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided\n", 0) \
+X(38, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed\n", 0) \
+X(39, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)\n", 2) \
+X(40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag\n", 0) \
+X(41, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x\n", 1) \
+X(42, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)\n", 1) \
+\
+X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%d int: 0x%X, ext: 0x%X, pow flags: 0x%X\n", 4) \
+X( 2, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_IDLE, "Sidekick idle (might be powered down). Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%8.8X\n", 3) \
+X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d\n", 4) \
+X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d\n", 2) \
+X( 6, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_OFF, "Sidekick ready to be powered down. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8X\n", 2) \
+X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)\n", 2) \
+X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d\n", 2) \
+X(11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init\n", 0) \
+X(12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: %X)\n", 1) \
+X(13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.\n", 0) \
+X(14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.\n", 0) \
+X(15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d\n", 2) \
+X(16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d\n", 2) \
+X(17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37566, "Request power up due to BRN37566. Pow stat int: 0x%X\n", 1) \
+X(18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%X, ext: 0x%X, pow flags: 0x%X\n", 3) \
+X(19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%X\n", 1) \
+X(20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%X\n", 1) \
+X(21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X(22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X(23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz\n", 2) \
+X(24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%X, %d dusts powered.\n", 2) \
+X(25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.\n", 0) \
+X(26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u\n", 1) \
+X(27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%X, Ext: 0x%X, Pow flags: 0x%X\n", 3) \
+X(28, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_DEADLINE, "Proactive DVFS: New deadline, time = 0x%08x%08x\n", 2) \
+X(29, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_WORKLOAD, "Proactive DVFS: New workload, cycles = 0x%08x%08x\n", 2) \
+X(30, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_CALCULATE, "Proactive DVFS: Proactive frequency calculated = 0x%08x\n", 1) \
+X(31, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UTILISATION, "Proactive DVFS: Reactive utilisation = 0x%08x\n", 1) \
+X(32, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_REACT, "Proactive DVFS: Reactive frequency calculated = 0x%08x%08x\n", 2) \
+X(33, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x\n", 1) \
+X(34, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEADLINE_REMOVED, "Proactive DVFS: Deadline removed = 0x%08x%08x\n", 2) \
+X(35, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_WORKLOAD_REMOVED, "Proactive DVFS: Workload removed = 0x%08x%08x\n", 2) \
+X(36, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_THROTTLE, "Proactive DVFS: Throttle to a maximum = 0x%x\n", 1) \
+X(37, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_FAILURE, "Proactive DVFS: Failed to pass OPP point via GPIO.\n", 0) \
+X(38, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_INVALID_NODE, "Proactive DVFS: Invalid node passed to function.\n", 0) \
+X(39, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GUEST_BAD_ACCESS, "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u\n", 1) \
+X(40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: 0x%x\n", 1) \
+X(41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: 0x%x\n", 1) \
+X(42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.\n", 0) \
+X(43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d\n", 2) \
+X(44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042, "Allowed number of dusts is %d due to BRN59042.\n", 1) \
+X(45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+\
+X(1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: %08.8X\n", 2) \
+X(2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: %08.8X, MemCtx: %08.8X\n", 3) \
+X(3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW\n", 0) \
+X(4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.\n", 0) \
+X(5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: %08.8X \n", 2) \
+X(6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08X)\n", 3) \
+X(8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08X->0x%08X)\n", 3) \
+X(9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08X)\n", 3) \
+X(12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08X)\n", 4) \
+X(13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08X val:0x%08X)\n", 3) \
+X(14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08X\n", 2) \
+X(15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08X (st: %d), FWCtx 0x%08X @ %d\n", 6) \
+X(16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d\n", 2) \
+X(17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8X - local (0x%08.8X): %d, global (0x%08.8X): %d\n", 5) \
+X(18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8X, discard: %d - local (0x%08.8X): s%d?=c%d, global (0x%08.8X): s%d?=c%d\n", 8) \
+X(19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8X c%d\n", 2) \
+X(20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8X @ %d, RTD 0x%08x.\n", 3) \
+X(21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)\n", 2) \
+X(22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: %08.8X (TPC addr: %08X%08X, size: %d bytes)\n", 4) \
+X(23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08X\n", 2) \
+X(24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08X \n", 5) \
+X(25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered\n", 1) \
+X(26, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP, "DM%u: Hold scheduling due to R-Flag = 0x%08x\n", 2) \
+X(27, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_RECONSTRUCTION, "Analysis: Need freelist reconstruction\n", 0) \
+X(28, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP, "Analysis DM%u: Lockup FWCtx: %08.8X. Need to skip to next command\n", 2) \
+X(29, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP_OOM_TA, "Analysis DM%u: Lockup while TA is OOM FWCtx: %08.8X. Need to skip to next command\n", 2) \
+X(30, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_PR_CLEANUP, "Analysis DM%u: Lockup while partial render FWCtx: %08.8X. Need PR cleanup\n", 2) \
+X(31, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU has locked up\n", 0) \
+X(32, RGXFW_GROUP_HWR, RGXFW_SF_HWR_READY, "DM%u ready for HWR\n", 1) \
+X(33, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08X\n", 2) \
+X(34, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08X)\n", 1) \
+X(35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out\n", 1) \
+X(36, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x\n", 1) \
+X(37, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08X\n", 2) \
+X(38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline\n", 0) \
+X(39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll\n", 0) \
+X(40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x\n", 2) \
+X(41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (loop:%d, poll failures: 0x%08X)\n", 2) \
+X(42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08X\n", 1) \
+X(43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08X)\n", 1) \
+X(44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).\n", 1) \
+X(45, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detected (Meta MMU Status: 0x%08X%08X)\n", 2) \
+X(46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for DM%u is HWRNeeded=%u\n", 2) \
+X(47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for DM%u is HWRNeeded=%u\n", 2) \
+X(48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u\n", 3) \
+X(49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d\n", 3) \
+X(50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "Deadline counter for DM%u is HWRDeadline=%u\n", 2) \
+X(51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction\n", 1) \
+X(52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%X (ID=%d)\n", 2) \
+X(53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete\n", 1) \
+X(54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%X (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u\n", 4) \
+X(55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed\n", 1) \
+X(56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02X, StallingMask=0x%02X, PDS_CTRL=0x%08X%08X)\n", 4) \
+X(57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02X, StallingMask=0x%02X, PDS_CTRL=0x%08X%08X)\n", 4) \
+X(58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u\n", 2) \
+X(59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty\n", 1) \
+X(60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: %08x%08x, deadline: %08x%08x\n", 5) \
+X(61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)\n", 1) \
+X(62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)\n", 1) \
+X(63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction\n", 2) \
+X(64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u becomes StreamStartOffset = %u)\n", 5) \
+X(65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)\n", 3) \
+X(67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance\n", 1) \
+\
+X( 1, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u\n", 2) \
+X( 2, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW\n", 1) \
+X( 3, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INCBLK, "Block 0x%x included in event - enabled in HW\n", 1) \
+X( 4, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELREG, "Select register state hi_0x%x lo_0x%x\n", 2) \
+X( 5, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CSBHDR, "Counter stream block header word 0x%x\n", 1) \
+X( 6, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTROFF, "Counter register offset 0x%x\n", 1) \
+X( 7, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGSKP, "Block 0x%x config unset, skipping\n", 1) \
+X( 8, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK, "Accessing Indirect block 0x%x\n", 1) \
+X( 9, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DIRBLK, "Accessing Direct block 0x%x\n", 1) \
+X(10, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CNTPRG, "Programmed counter select register at offset 0x%x\n", 1) \
+X(11, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKPRG, "Block register offset 0x%x and value 0x%x\n", 2) \
+X(12, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKCG, "Reading config block from driver 0x%x\n", 1) \
+X(13, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKRG, "Reading block range 0x%x to 0x%x\n", 2) \
+X(14, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKREC, "Recording block 0x%x config from driver\n", 1) \
+X(15, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKED, "Finished reading config block from driver\n", 0) \
+X(16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: %x value: %x \n", 2) \
+X(17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u ID:%x\n", 2) \
+X(18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID %x is not allowed. The package [b:%u, n:%u] will be discarded\n", 3) \
+X(19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Custom Counters filter status %d\n", 1) \
+X(20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded\n", 2) \
+X(21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d\n", 2) \
+X(22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter %x is %x ?\n", 2) \
+X(23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset\n", 1) \
+X(24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD, "Encountered an invalid command (%d)\n", 1) \
+X(25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 2) \
+X(26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)\n", 3) \
+X(27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d \n", 1) \
+\
+X( 1, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u\n", 5) \
+X( 2, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_COMPLETE, "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u\n", 4) \
+X( 3, RGXFW_GROUP_DMA, RGXFW_SF_DMA_INT_REG, "DMA Interrupt register 0x%08x\n", 1) \
+X( 4, RGXFW_GROUP_DMA, RGXFW_SF_DMA_WAIT, "Waiting for transfer ID %u completion...\n", 1) \
+X( 5, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed\n", 3) \
+X( 6, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)\n", 3) \
+X( 7, RGXFW_GROUP_DMA, RGXFW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure\n", 1) \
+\
+X(1, RGXFW_GROUP_DBG, RGXFW_SF_DBG_INTPAIR, "0x%8.8x 0x%8.8x\n", 2) \
+\
+X(65535, RGXFW_GROUP_NULL, RGXFW_SF_LAST, "You should not use this string\n", 15)
+
+
+/* The symbolic names found in the table above are assigned an ui32 value of
+ * the following format:
+ * 31 30 28 27 20 19 16 15 12 11 0 bits
+ * - --- ---- ---- ---- ---- ---- ---- ----
+ * 0-11: id number
+ * 12-15: group id number
+ * 16-19: number of parameters
+ * 20-27: unused
+ * 28-30: active: identify SF packet, otherwise regular int32
+ * 31: reserved for signed/unsigned compatibility
+ *
+ * The following macro assigns those values to the enum generated SF ids list.
+ */
+#define RGXFW_LOG_IDMARKER (0x70000000)
+#define RGXFW_LOG_CREATESFID(a,b,e) ((a) | (b<<12) | (e<<16)) | RGXFW_LOG_IDMARKER
+
+#define RGXFW_LOG_IDMASK (0xFFF00000)
+#define RGXFW_LOG_VALIDID(I) (((I) & RGXFW_LOG_IDMASK) == RGXFW_LOG_IDMARKER)
+
+typedef enum RGXFW_LOG_SFids {
+#define X(a, b, c, d, e) c = RGXFW_LOG_CREATESFID(a,b,e),
+ RGXFW_LOG_SFIDLIST
+#undef X
+} RGXFW_LOG_SFids;
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define RGXFW_SF_GID(x) (((x)>>12) & 0xf)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define RGXFW_SF_PARAMNUM(x) (((x)>>16) & 0xf)
+
+#endif /* RGX_FWIF_SF_H */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX firmware interface structures
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware interface structures shared by both host client
+ and host server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_SHARED_H__)
+#define __RGX_FWIF_SHARED_H__
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "img_types.h"
+#include "rgx_common.h"
+#include "devicemem_typedefs.h"
+
+/*
+ * Firmware binary block unit in bytes.
+ * Raw data stored in FW binary will be aligned on this size.
+ */
+#define FW_BLOCK_SIZE 4096L
+
+/* Offset for BVNC struct from the end of the FW binary */
+#define FW_BVNC_BACKWARDS_OFFSET (FW_BLOCK_SIZE)
+
+/*!
+ ******************************************************************************
+ * Device state flags
+ *****************************************************************************/
+#define RGXKMIF_DEVICE_STATE_ZERO_FREELIST (0x1 << 0) /*!< Zeroing the physical pages of reconstructed free lists */
+#define RGXKMIF_DEVICE_STATE_FTRACE_EN (0x1 << 1) /*!< Used to enable production of GPT FTrace from HWPerf events in the MISR */
+#define RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN (0x1 << 2) /*!< Used to disable the Devices Watchdog logging */
+#define RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN (0x1 << 3) /*!< Used for validation to inject dust requests every TA/3D kick */
+#define RGXKMIF_DEVICE_STATE_HWPERF_HOST_EN (0x1 << 4) /*!< Used to enable host-side-only HWPerf stream */
+
+/* Required memory alignment for 64-bit variables accessible by Meta
+ (the gcc meta aligns 64-bit vars to 64-bit; therefore, mem shared between
+ the host and meta that contains 64-bit vars has to maintain this aligment)*/
+#define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64)
+
+typedef struct _RGXFWIF_DEV_VIRTADDR_
+{
+ IMG_UINT32 ui32Addr;
+} RGXFWIF_DEV_VIRTADDR;
+
+typedef struct _RGXFWIF_DMA_ADDR_
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr;
+ RGXFWIF_DEV_VIRTADDR pbyFWAddr;
+} UNCACHED_ALIGN RGXFWIF_DMA_ADDR;
+
+typedef IMG_UINT8 RGXFWIF_CCCB;
+
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RENDER_TARGET;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RAY_FRAME_DATA;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RPM_FREELIST;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RTA_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_UFO_ADDR;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CLEANUP_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TIMESTAMP_ADDR;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_WORKLOAD_DATA;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_DEADLINE_LIST_NODE;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_WORKLOAD_LIST_NODE;
+
+/* FIXME PRGXFWIF_UFO_ADDR and RGXFWIF_UFO should move back into rgx_fwif_client.h */
+typedef struct _RGXFWIF_UFO_
+{
+ PRGXFWIF_UFO_ADDR puiAddrUFO;
+ IMG_UINT32 ui32Value;
+} RGXFWIF_UFO;
+
+
+/*!
+ Last reset reason for a context.
+*/
+typedef enum _RGXFWIF_CONTEXT_RESET_REASON_
+{
+ RGXFWIF_CONTEXT_RESET_REASON_NONE = 0, /*!< No reset reason recorded */
+ RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, /*!< Caused a reset due to locking up */
+ RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, /*!< Affected by another context locking up */
+ RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, /*!< Overran the global deadline */
+ RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, /*!< Affected by another context overrunning */
+} RGXFWIF_CONTEXT_RESET_REASON;
+
+
+/*!
+ HWRTData state the render is in
+*/
+typedef enum
+{
+ RGXFWIF_RTDATA_STATE_NONE = 0,
+ RGXFWIF_RTDATA_STATE_KICKTA,
+ RGXFWIF_RTDATA_STATE_KICKTAFIRST,
+ RGXFWIF_RTDATA_STATE_TAFINISHED,
+ RGXFWIF_RTDATA_STATE_KICK3D,
+ RGXFWIF_RTDATA_STATE_3DFINISHED,
+ RGXFWIF_RTDATA_STATE_TAOUTOFMEM,
+ RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED,
+ RGXFWIF_RTDATA_STATE_HWR /*!< In case of HWR, we can't set the RTDATA state to NONE,
+ as this will cause any TA to become a first TA.
+ To ensure all related TA's are skipped, we use the HWR state */
+} RGXFWIF_RTDATA_STATE;
+
+typedef struct _RGXFWIF_CLEANUP_CTL_
+{
+ IMG_UINT32 ui32SubmittedCommands; /*!< Number of commands received by the FW */
+ IMG_UINT32 ui32ExecutedCommands; /*!< Number of commands executed by the FW */
+} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL;
+
+
+/*!
+ ******************************************************************************
+ * Client CCB control for RGX
+ *****************************************************************************/
+typedef struct _RGXFWIF_CCCB_CTL_
+{
+ IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */
+ IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */
+ IMG_UINT32 ui32DepOffset; /*!< Dependency offset */
+ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */
+} UNCACHED_ALIGN RGXFWIF_CCCB_CTL;
+
+typedef enum
+{
+ RGXFW_LOCAL_FREELIST = 0,
+ RGXFW_GLOBAL_FREELIST = 1,
+#if defined(SUPPORT_MMU_FREELIST)
+ RGXFW_MMU_FREELIST = 2,
+#endif
+ RGXFW_MAX_FREELISTS
+} RGXFW_FREELIST_TYPE;
+
+typedef struct _RGXFWIF_RTA_CTL_
+{
+ IMG_UINT32 ui32RenderTargetIndex; //Render number
+ IMG_UINT32 ui32CurrentRenderTarget; //index in RTA
+ IMG_UINT32 ui32ActiveRenderTargets; //total active RTs
+ IMG_UINT32 ui32CumulActiveRenderTargets; //total active RTs from the first TA kick, for OOM
+ RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices
+ RGXFWIF_DEV_VIRTADDR sNumRenders; //Array of number of occurred partial renders per render target
+ IMG_UINT16 ui16MaxRTs; //Number of render targets in the array
+} UNCACHED_ALIGN RGXFWIF_RTA_CTL;
+
+typedef struct _RGXFWIF_FREELIST_
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN psFreeListDevVAddr;
+ IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr;
+ IMG_UINT32 ui32CurrentStackTop;
+ IMG_UINT32 ui32MaxPages;
+ IMG_UINT32 ui32GrowPages;
+ IMG_UINT32 ui32CurrentPages;
+ IMG_UINT32 ui32AllocatedPageCount;
+ IMG_UINT32 ui32AllocatedMMUPageCount;
+ IMG_UINT32 ui32HWRCounter;
+ IMG_UINT32 ui32FreeListID;
+ IMG_BOOL bGrowPending;
+} UNCACHED_ALIGN RGXFWIF_FREELIST;
+
+typedef enum
+{
+ RGXFW_RPM_SHF_FREELIST = 0,
+ RGXFW_RPM_SHG_FREELIST = 1,
+} RGXFW_RPM_FREELIST_TYPE;
+
+#define RGXFW_MAX_RPM_FREELISTS (2)
+
+typedef struct _RGXFWIF_RPM_FREELIST_
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListDevVAddr; /*!< device base address */
+ //IMG_DEV_VIRTADDR RGXFW_ALIGN sRPMPageListDevVAddr; /*!< device base address for RPM pages in-use */
+ IMG_UINT32 sSyncAddr; /*!< Free list sync object for OOM event */
+ IMG_UINT32 ui32MaxPages; /*!< maximum size */
+ IMG_UINT32 ui32GrowPages; /*!< grow size = maximum pages which may be added later */
+ IMG_UINT32 ui32CurrentPages; /*!< number of pages */
+ IMG_UINT32 ui32ReadOffset; /*!< head: where to read alloc'd pages */
+ IMG_UINT32 ui32WriteOffset; /*!< tail: where to write de-alloc'd pages */
+ IMG_BOOL bReadToggle; /*!< toggle bit for circular buffer */
+ IMG_BOOL bWriteToggle;
+ IMG_UINT32 ui32AllocatedPageCount; /*!< TODO: not sure yet if this is useful */
+ IMG_UINT32 ui32HWRCounter;
+ IMG_UINT32 ui32FreeListID; /*!< unique ID per device, e.g. rolling counter */
+ IMG_BOOL bGrowPending; /*!< FW is waiting for host to grow the freelist */
+} UNCACHED_ALIGN RGXFWIF_RPM_FREELIST;
+
+typedef struct _RGXFWIF_RAY_FRAME_DATA_
+{
+ /* state manager for shared state between vertex and ray processing */
+
+ /* TODO: not sure if this will be useful, link it here for now */
+ IMG_UINT32 sRPMFreeLists[RGXFW_MAX_RPM_FREELISTS];
+
+ IMG_BOOL bAbortOccurred;
+
+ /* cleanup state.
+ * Both the SHG and RTU must complete or discard any outstanding work
+ * which references this frame data.
+ */
+ RGXFWIF_CLEANUP_CTL sCleanupStateSHG;
+ RGXFWIF_CLEANUP_CTL sCleanupStateRTU;
+ IMG_UINT32 ui32CleanupStatus;
+#define HWFRAMEDATA_SHG_CLEAN (1 << 0)
+#define HWFRAMEDATA_RTU_CLEAN (1 << 1)
+
+} UNCACHED_ALIGN RGXFWIF_RAY_FRAME_DATA;
+
+
+typedef struct _RGXFWIF_RENDER_TARGET_
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap Data Store */
+ IMG_BOOL bTACachesNeedZeroing; /*!< Whether RTC and TPC caches (on mem) need to be zeroed on next first TA kick */
+
+} UNCACHED_ALIGN RGXFWIF_RENDER_TARGET;
+
+
+typedef struct _RGXFWIF_HWRTDATA_
+{
+ RGXFWIF_RTDATA_STATE eState;
+
+ IMG_UINT32 ui32NumPartialRenders; /*!< Number of partial renders. Used to setup ZLS bits correctly */
+ IMG_DEV_VIRTADDR RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */
+
+ IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[4];
+ IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[4];
+ IMG_UINT64 RGXFW_ALIGN ui64TECatBase[4];
+ IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[4];
+ IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase;
+ IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase;
+
+#if defined(SUPPORT_VFP)
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sVFPPageTableAddr;
+#endif
+ IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer;
+ IMG_UINT32 ui32PMMListStackPointer;
+
+ PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS];
+ IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS];
+
+ PRGXFWIF_RENDER_TARGET psParentRenderTarget;
+
+ RGXFWIF_CLEANUP_CTL sTACleanupState;
+ RGXFWIF_CLEANUP_CTL s3DCleanupState;
+ IMG_UINT32 ui32CleanupStatus;
+#define HWRTDATA_TA_CLEAN (1 << 0)
+#define HWRTDATA_3D_CLEAN (1 << 1)
+
+ PRGXFWIF_RTA_CTL psRTACtl;
+
+ IMG_UINT32 bHasLastTA;
+ IMG_BOOL bPartialRendered;
+
+ IMG_UINT32 ui32PPPScreen;
+ IMG_UINT32 ui32PPPGridOffset;
+ IMG_UINT64 RGXFW_ALIGN ui64PPPMultiSampleCtl;
+ IMG_UINT32 ui32TPCStride;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr;
+ IMG_UINT32 ui32TPCSize;
+ IMG_UINT32 ui32TEScreen;
+ IMG_UINT32 ui32MTileStride;
+ IMG_UINT32 ui32TEAA;
+ IMG_UINT32 ui32TEMTILE1;
+ IMG_UINT32 ui32TEMTILE2;
+ IMG_UINT32 ui32ISPMergeLowerX;
+ IMG_UINT32 ui32ISPMergeLowerY;
+ IMG_UINT32 ui32ISPMergeUpperX;
+ IMG_UINT32 ui32ISPMergeUpperY;
+ IMG_UINT32 ui32ISPMergeScaleX;
+ IMG_UINT32 ui32ISPMergeScaleY;
+ IMG_BOOL bDisableTileReordering;
+} UNCACHED_ALIGN RGXFWIF_HWRTDATA;
+
+typedef enum
+{
+ RGXFWIF_ZSBUFFER_UNBACKED = 0,
+ RGXFWIF_ZSBUFFER_BACKED,
+ RGXFWIF_ZSBUFFER_BACKING_PENDING,
+ RGXFWIF_ZSBUFFER_UNBACKING_PENDING,
+}RGXFWIF_ZSBUFFER_STATE;
+
+typedef struct _RGXFWIF_ZSBUFFER_
+{
+ IMG_UINT32 ui32ZSBufferID; /*!< Buffer ID*/
+ IMG_BOOL bOnDemand; /*!< Needs On-demand ZS Buffer allocation */
+ RGXFWIF_ZSBUFFER_STATE eState; /*!< Z/S-Buffer state */
+ RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Cleanup state */
+} UNCACHED_ALIGN RGXFWIF_FWZSBUFFER;
+
+/* Number of BIF tiling configurations / heaps */
+#define RGXFWIF_NUM_BIF_TILING_CONFIGS 4
+
+/*!
+ *****************************************************************************
+ * RGX Compatibility checks
+ *****************************************************************************/
+/* WARNING: RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX can be increased only and
+ always equal to (N * sizeof(IMG_UINT32) - 1) */
+#define RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX 7
+
+/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC is a subject of change,
+ following define should be increased by 1 to indicate to compatibility logic,
+ that layout has changed */
+#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 2
+
+typedef struct _RGXFWIF_COMPCHECKS_BVNC_
+{
+ IMG_UINT32 ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */
+ IMG_UINT32 ui32VLenMax;
+ IMG_UINT64 RGXFW_ALIGN ui64BNC;
+ IMG_CHAR aszV[RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX + 1];
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC;
+
+#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \
+ RGXFWIF_COMPCHECKS_BVNC name = { \
+ RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \
+ RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX, \
+ 0, \
+ { 0 }, \
+ }
+#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \
+ do { \
+ (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \
+ (name).ui32VLenMax = RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX; \
+ (name).ui64BNC = 0; \
+ (name).aszV[0] = 0; \
+ } while (0)
+
+typedef struct _RGXFWIF_COMPCHECKS_
+{
+ RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BNC (from the RGX registers) */
+ RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BNC */
+ IMG_UINT32 ui32FWProcessorVersion; /*!< identifier of the MIPS/META version */
+ IMG_UINT32 ui32DDKVersion; /*!< software DDK version */
+ IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */
+ IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */
+ IMG_BOOL bUpdated; /*!< Information is valid */
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS;
+
+
+#define GET_CCB_SPACE(WOff, ROff, CCBSize) \
+ ((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1))
+
+#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \
+ (Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1))
+
+#define RESERVED_CCB_SPACE (sizeof(IMG_UINT32))
+
+
+/* Defines relating to the per-context CCBs */
+
+/* This size is to be used when a client CCB is found to consume very negligible space
+ * (e.g. a few hundred bytes to few KBs - less than a page). In such a case, instead of
+ * allocating CCB of size of only a few KBs, we allocate at-least this much to be future
+ * risk-free. */
+#define MIN_SAFE_CCB_SIZE_LOG2 13 /* 8K (2 Pages) */
+
+/* cCCB sizes per DM context */
+#if defined(EMULATOR)
+
+/* On emulator platform, the sizes are kept as 64 KB for all contexts as the cCCBs
+ * are expected to be almost always used up to their full sizes */
+
+#define RGX_TQ3D_CCB_SIZE_LOG2 16 /* 64K */
+#define RGX_TQ2D_CCB_SIZE_LOG2 16
+#define RGX_CDM_CCB_SIZE_LOG2 16
+#define RGX_TA_CCB_SIZE_LOG2 16
+#define RGX_3D_CCB_SIZE_LOG2 16
+#define RGX_KICKSYNC_CCB_SIZE_LOG2 16
+#define RGX_RTU_CCB_SIZE_LOG2 16
+
+#else /* defined (EMULATOR) */
+
+/* The following figures are obtained by observing the cCCB usage levels of various
+ * GL/CL benchmark applications under different platforms and configurations, such
+ * that the high watermarks (almost) never hit the full size of the cCCB */
+#define RGX_TQ3D_CCB_SIZE_LOG2 14 /* 16K */
+#define RGX_TQ2D_CCB_SIZE_LOG2 14 /* 16K */
+#define RGX_CDM_CCB_SIZE_LOG2 MIN_SAFE_CCB_SIZE_LOG2 /* The compute cCCB was found to consume only a few hundred bytes on a compute benchmark */
+#define RGX_TA_CCB_SIZE_LOG2 15 /* 32K */
+#define RGX_3D_CCB_SIZE_LOG2 16 /* 64K */
+#define RGX_KICKSYNC_CCB_SIZE_LOG2 MIN_SAFE_CCB_SIZE_LOG2 /* KickSync expected to consume low, hence minimum size */
+#define RGX_RTU_CCB_SIZE_LOG2 15
+
+#endif /* defined (EMULATOR) */
+
+/*!
+ ******************************************************************************
+ * Client CCB commands for RGX
+ *****************************************************************************/
+
+#define RGX_CCB_TYPE_TASK (1 << 31)
+#define RGX_CCB_FWALLOC_ALIGN(size) (((size) + (RGXFWIF_FWALLOC_ALIGN-1)) & ~(RGXFWIF_FWALLOC_ALIGN - 1))
+
+typedef enum _RGXFWIF_CCB_CMD_TYPE_
+{
+ RGXFWIF_CCB_CMD_TYPE_TA = 201 | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_3D = 202 | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_CDM = 203 | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_TQ_3D = 204 | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_TQ_2D = 205 | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_3D_PR = 206 | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_NULL = 207 | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_SHG = 208 | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_RTU = 209 | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_RTU_FC = 210 | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP = 211 | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_TQ_TDM = 212 | RGX_CCB_TYPE_TASK,
+
+/* Leave a gap between CCB specific commands and generic commands */
+ RGXFWIF_CCB_CMD_TYPE_FENCE = 213,
+ RGXFWIF_CCB_CMD_TYPE_UPDATE = 214,
+ RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE = 215,
+ RGXFWIF_CCB_CMD_TYPE_FENCE_PR = 216,
+ RGXFWIF_CCB_CMD_TYPE_PRIORITY = 217,
+/* Pre and Post timestamp commands are supposed to sandwich the DM cmd. The
+ padding code with the CCB wrap upsets the FW if we don't have the task type
+ bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+*/
+ RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP = 218,
+ RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE = 219,
+ RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE = 220,
+
+ RGXFWIF_CCB_CMD_TYPE_PADDING = 221,
+} RGXFWIF_CCB_CMD_TYPE;
+
+typedef struct _RGXFWIF_WORKLOAD_DATA_
+{
+ /* Workload characteristics data*/
+ IMG_UINT64 RGXFW_ALIGN ui64WorkloadCharacteristics;
+ /* Deadline for the workload */
+ IMG_UINT64 RGXFW_ALIGN ui64DeadlineInus;
+ /* Bool for whether the workload was completed */
+ IMG_BOOL bComplete;
+ /* Predicted time taken to do the work in cycles */
+ IMG_UINT64 RGXFW_ALIGN ui64CyclesPrediction;
+ /* The actual time taken in cycles */
+ IMG_UINT64 RGXFW_ALIGN ui64CyclesTaken;
+ /* The memory descriptor for this workload */
+ IMG_UINT64 RGXFW_ALIGN ui64SelfMemDesc;
+ /* Memory descriptor to be able to chain workload data */
+ IMG_UINT64 RGXFW_ALIGN ui64NextNodeMemdesc;
+ /* Reference to Host side data */
+ IMG_UINT64 RGXFW_ALIGN ui64WorkloadHostData;
+ /* Reference to Specific Hash table */
+ IMG_UINT64 RGXFW_ALIGN ui64WorkloadMatchingData;
+ /* The following are for the memory management of the PDVFS workload
+ * tree in the firmware */
+ PRGXFWIF_DEADLINE_LIST_NODE RGXFW_ALIGN sDeadlineNodeFWAddress;
+ PRGXFWIF_WORKLOAD_LIST_NODE RGXFW_ALIGN sWorkloadNodeFWAddress;
+ IMG_UINT64 RGXFW_ALIGN ui64DeadlineNodeMemDesc;
+ IMG_UINT64 RGXFW_ALIGN ui64WorkloadNodeMemDesc;
+} RGXFWIF_WORKLOAD_DATA;
+
+typedef struct _RGXFWIF_WORKEST_KICK_DATA_
+{
+ /* Index for the KM Workload estimation return data array */
+ IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex;
+ /* Deadline for the workload */
+ IMG_UINT64 RGXFW_ALIGN ui64DeadlineInus;
+ /* Predicted time taken to do the work in cycles */
+ IMG_UINT64 RGXFW_ALIGN ui64CyclesPrediction;
+} RGXFWIF_WORKEST_KICK_DATA;
+
+typedef struct _RGXFWIF_WORKLOAD_LIST_NODE_ RGXFWIF_WORKLOAD_LIST_NODE;
+typedef struct _RGXFWIF_DEADLINE_LIST_NODE_ RGXFWIF_DEADLINE_LIST_NODE;
+
+struct _RGXFWIF_WORKLOAD_LIST_NODE_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64Cycles;
+ IMG_UINT64 RGXFW_ALIGN ui64SelfMemDesc;
+ IMG_UINT64 RGXFW_ALIGN ui64WorkloadDataMemDesc;
+ IMG_BOOL bReleased;
+ RGXFWIF_WORKLOAD_LIST_NODE *psNextNode;
+};
+
+struct _RGXFWIF_DEADLINE_LIST_NODE_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64DeadlineInus;
+ RGXFWIF_WORKLOAD_LIST_NODE *psWorkloadList;
+ IMG_UINT64 RGXFW_ALIGN ui64SelfMemDesc;
+ IMG_UINT64 RGXFW_ALIGN ui64WorkloadDataMemDesc;
+ IMG_BOOL bReleased;
+ RGXFWIF_DEADLINE_LIST_NODE *psNextNode;
+};
+typedef struct _RGXFWIF_CCB_CMD_HEADER_
+{
+ RGXFWIF_CCB_CMD_TYPE eCmdType;
+ IMG_UINT32 ui32CmdSize;
+ IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */
+ IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */
+ PRGXFWIF_WORKLOAD_DATA RGXFW_ALIGN sWorkloadDataFWAddr;
+ RGXFWIF_WORKEST_KICK_DATA sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */
+} RGXFWIF_CCB_CMD_HEADER;
+
+typedef enum _RGXFWIF_REG_CFG_TYPE_
+{
+ RGXFWIF_REG_CFG_TYPE_PWR_ON=0, /* Sidekick power event */
+ RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, /* Rascal / dust power event */
+ RGXFWIF_REG_CFG_TYPE_TA, /* TA kick */
+ RGXFWIF_REG_CFG_TYPE_3D, /* 3D kick */
+ RGXFWIF_REG_CFG_TYPE_CDM, /* Compute kick */
+ RGXFWIF_REG_CFG_TYPE_TLA, /* TLA kick */
+ RGXFWIF_REG_CFG_TYPE_TDM, /* TDM kick */
+ RGXFWIF_REG_CFG_TYPE_ALL /* Applies to all types. Keep as last element */
+} RGXFWIF_REG_CFG_TYPE;
+
+typedef struct _RGXFWIF_REG_CFG_REC_
+{
+ IMG_UINT64 ui64Addr;
+ IMG_UINT64 ui64Mask;
+ IMG_UINT64 ui64Value;
+} RGXFWIF_REG_CFG_REC;
+
+
+typedef struct _RGXFWIF_TIME_CORR_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp;
+ IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp;
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp;
+ IMG_UINT32 ui32CoreClockSpeed;
+
+ /* Utility variable used to convert CR timer deltas to OS timer deltas (nS),
+ * where the deltas are relative to the timestamps above:
+ * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */
+ IMG_UINT32 ui32CRDeltaToOSDeltaKNs;
+} UNCACHED_ALIGN RGXFWIF_TIME_CORR;
+
+
+/* These macros are used to help converting FW timestamps to the Host time domain.
+ * On the FW the RGX_CR_TIMER counter is used to keep track of the time;
+ * it increments by 1 every 256 GPU clock ticks, so the general formula
+ * to perform the conversion is:
+ *
+ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS,
+ * otherwise if (scale == 10^6) then deltaOS is in uS ]
+ *
+ * deltaCR * 256 256 * scale
+ * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ]
+ * GPUclockspeed GPUclockspeed
+ *
+ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20)
+ * to get some better accuracy and to avoid returning 0 in the integer
+ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz.
+ * This is the same as keeping K as a decimal number.
+ *
+ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies
+ * (deltaCR * K is more or less a constant), and it's relative to
+ * the base OS timestamp sampled as a part of the timer correlation data.
+ * This base is refreshed on GPU power-on, DVFS transition and
+ * periodic frequency calibration (executed every few seconds if the FW is
+ * doing some work), so as long as the GPU is doing something and one of these
+ * events is triggered then deltaCR * K will not overflow and deltaOS will be
+ * correct.
+ */
+
+#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20)
+
+#define RGXFWIF_GET_CRDELTA_TO_OSDELTA_K_NS(clockfreq, remainder) \
+ OSDivide64((256000000ULL << RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT), \
+ ((clockfreq) + 500) / 1000, \
+ &(remainder))
+
+#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \
+ ( ((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT)
+
+#define RGXFWIF_GET_DELTA_OSTIME_US(deltacr, clockfreq, remainder) \
+ OSDivide64r64((deltacr) * 256000, ((clockfreq) + 500) / 1000, &(remainder))
+
+/* Use this macro to get a more realistic GPU core clock speed than
+ * the one given by the upper layers (used when doing GPU frequency
+ * calibration)
+ */
+#define RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(deltacr_us, deltaos_us, remainder) \
+ OSDivide64((deltacr_us) * 256000000, (deltaos_us), &(remainder))
+
+/*
+ The maximum configurable size via RGX_FW_HEAP_SHIFT is
+ 32MiB (1<<25) and the minimum is 4MiB (1<<22); the
+ default firmware heap size is set to maximum 32MiB.
+*/
+#if (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 25)
+#error "RGX_FW_HEAP_SHIFT is outside valid range [22, 25]"
+#endif
+
+#endif /* __RGX_FWIF_SHARED_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_shared.h)
+******************************************************************************/
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX firmware signature checks
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware interface structures used by srvinit and server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_SIG_H__)
+#define __RGX_FWIF_SIG_H__
+
+#include "rgxdefs_km.h"
+
+/************************************************************************
+* RGX FW signature checks
+************************************************************************/
+
+#if defined(PDUMP) && defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__)
+
+#define SIG_REG_TA_MAX_COUNT (12)
+static RGXFW_REGISTER_LIST asTASigRegList[SIG_REG_TA_MAX_COUNT];
+static IMG_UINT32 gui32TASigRegCount = 0;
+
+#define SIG_REG_3D_MAX_COUNT (6)
+static RGXFW_REGISTER_LIST as3DSigRegList[SIG_REG_3D_MAX_COUNT];
+static IMG_UINT32 gui323DSigRegCount = 0;
+
+#else
+
+/* List of TA signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST asTASigRegList[] =
+{ /* Register */ /* Indirect_Reg */ /* Start, End */
+#if defined(RGX_FEATURE_SCALABLE_VDM_GPP)
+ {RGX_CR_USC_UVB_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, RGX_NUM_PHANTOMS-1},
+#else
+ {RGX_CR_USC_UVS0_CHECKSUM, 0, 0, 0},
+ {RGX_CR_USC_UVS1_CHECKSUM, 0, 0, 0},
+ {RGX_CR_USC_UVS2_CHECKSUM, 0, 0, 0},
+ {RGX_CR_USC_UVS3_CHECKSUM, 0, 0, 0},
+ {RGX_CR_USC_UVS4_CHECKSUM, 0, 0, 0},
+ {RGX_CR_USC_UVS5_CHECKSUM, 0, 0, 0},
+#endif
+#if defined(RGX_FEATURE_SCALABLE_TE_ARCH)
+#if defined(RGX_FEATURE_SCALABLE_VDM_GPP)
+ {RGX_CR_PPP_CLIP_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, RGX_NUM_PHANTOMS-1},
+#else
+ {RGX_CR_PPP, 0, 0, 0},
+#endif
+ {RGX_CR_TE_CHECKSUM, 0, 0, 0},
+#else
+ {RGX_CR_PPP_SIGNATURE, 0, 0, 0},
+ {RGX_CR_TE_SIGNATURE, 0, 0, 0},
+#endif
+ {RGX_CR_VCE_CHECKSUM, 0, 0, 0},
+#if !defined(RGX_FEATURE_PDS_PER_DUST)
+ {RGX_CR_PDS_DOUTM_STM_SIGNATURE, 0, 0, 0},
+#endif
+};
+
+
+/* List of 3D signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST as3DSigRegList[] =
+{ /* Register */ /* Indirect_Reg */ /* Start, End */
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+ {RGX_CR_ISP_PDS_CHECKSUM, 0, 0, 0},
+ {RGX_CR_ISP_TPF_CHECKSUM, 0, 0, 0},
+ {RGX_CR_TFPU_PLANE0_CHECKSUM, 0, 0, 0},
+ {RGX_CR_TFPU_PLANE1_CHECKSUM, 0, 0, 0},
+ {RGX_CR_PBE_CHECKSUM, 0, 0, 0},
+ {RGX_CR_IFPU_ISP_CHECKSUM, 0, 0, 0},
+#else
+ {RGX_CR_ISP_PDS_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, RGX_NUM_PHANTOMS-1},
+ {RGX_CR_ISP_TPF_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, RGX_NUM_PHANTOMS-1},
+ {RGX_CR_TFPU_PLANE0_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, RGX_NUM_PHANTOMS-1},
+ {RGX_CR_TFPU_PLANE1_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, RGX_NUM_PHANTOMS-1},
+ {RGX_CR_PBE_CHECKSUM, RGX_CR_PBE_INDIRECT, 0, RGX_FEATURE_NUM_CLUSTERS-1},
+ {RGX_CR_IFPU_ISP_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, RGX_NUM_PHANTOMS-1},
+#endif
+};
+#endif
+
+#if defined (RGX_FEATURE_RAY_TRACING) || defined(__KERNEL__)
+/* List of SHG signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST asRTUSigRegList[] =
+{ /* Register */ /* Indirect_Reg */ /* Start, End */
+ {DPX_CR_RS_PDS_RR_CHECKSUM, 0, 0, 0},
+ {RGX_CR_FBA_FC0_CHECKSUM, 0, 0, 0},
+ {RGX_CR_FBA_FC1_CHECKSUM, 0, 0, 0},
+ {RGX_CR_FBA_FC2_CHECKSUM, 0, 0, 0},
+ {RGX_CR_FBA_FC3_CHECKSUM, 0, 0, 0},
+ {DPX_CR_RQ_USC_DEBUG, 0, 0, 0},
+};
+
+/* List of SHG signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST asSHGSigRegList[] =
+{ /* Register */ /* Indirect_Reg */ /* Start, End */
+ {RGX_CR_SHF_SHG_CHECKSUM, 0, 0, 0},
+ {RGX_CR_SHF_VERTEX_BIF_CHECKSUM, 0, 0, 0},
+ {RGX_CR_SHF_VARY_BIF_CHECKSUM, 0, 0, 0},
+ {RGX_CR_RPM_BIF_CHECKSUM, 0, 0, 0},
+ {RGX_CR_SHG_BIF_CHECKSUM, 0, 0, 0},
+ {RGX_CR_SHG_FE_BE_CHECKSUM, 0, 0, 0},
+};
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+#endif /* __RGX_FWIF_SIG_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_sig.h)
+******************************************************************************/
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX heap definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGX_HEAPS_H__)
+#define __RGX_HEAPS_H__
+
+#include "km/rgxdefs_km.h"
+#include "log2.h"
+#include "pvr_debug.h"
+
+/* RGX Heap IDs, note: not all heaps are available to clients */
+/* N.B. Old heap identifiers are deprecated now that the old memory
+ management is. New heap identifiers should be suitably renamed */
+#define RGX_UNDEFINED_HEAP_ID (~0LU) /*!< RGX Undefined Heap ID */
+#define RGX_GENERAL_SVM_HEAP_ID 0 /*!< RGX General SVM (shared virtual memory) Heap ID */
+#define RGX_GENERAL_HEAP_ID 1 /*!< RGX General Heap ID */
+#define RGX_GENERAL_NON4K_HEAP_ID 2 /*!< RGX General none-4K Heap ID */
+#define RGX_RGNHDR_BRN_63142__ID 3 /*!< RGX General Heap ID */
+#define RGX_PDSCODEDATA_HEAP_ID 4 /*!< RGX PDS Code/Data Heap ID */
+#define RGX_USCCODE_HEAP_ID 5 /*!< RGX USC Code Heap ID */
+#define RGX_FIRMWARE_HEAP_ID 6 /*!< RGX Firmware Heap ID */
+#define RGX_TQ3DPARAMETERS_HEAP_ID 7 /*!< RGX Firmware Heap ID */
+#define RGX_BIF_TILING_HEAP_1_ID 8 /*!< RGX BIF Tiling Heap 1 ID */
+#define RGX_BIF_TILING_HEAP_2_ID 9 /*!< RGX BIF Tiling Heap 2 ID */
+#define RGX_BIF_TILING_HEAP_3_ID 10 /*!< RGX BIF Tiling Heap 3 ID */
+#define RGX_BIF_TILING_HEAP_4_ID 11 /*!< RGX BIF Tiling Heap 4 ID */
+#define RGX_HWBRN37200_HEAP_ID 12 /*!< RGX HWBRN37200 */
+#define RGX_DOPPLER_HEAP_ID 13 /*!< Doppler Heap ID */
+#define RGX_DOPPLER_OVERFLOW_HEAP_ID 14 /*!< Doppler Overflow Heap ID */
+#define RGX_SERVICES_SIGNALS_HEAP_ID 15 /*!< Services Signals Heap ID */
+#define RGX_SIGNALS_HEAP_ID 16 /*!< Signals Heap ID */
+#define RGX_TDM_TPU_YUV_COEFFS_HEAP_ID 17
+#define RGX_GUEST_FIRMWARE_HEAP_ID 18 /*!< Additional OSIDs Firmware */
+#define RGX_MAX_HEAP_ID (RGX_GUEST_FIRMWARE_HEAP_ID + RGXFW_NUM_OS) /*!< Max Valid Heap ID */
+
+/*
+ Identify heaps by their names
+*/
+#define RGX_GENERAL_SVM_HEAP_IDENT "General SVM" /*!< RGX General SVM (shared virtual memory) Heap Identifier */
+#define RGX_GENERAL_HEAP_IDENT "General" /*!< RGX General Heap Identifier */
+#define RGX_GENERAL_NON4K_HEAP_IDENT "General NON-4K" /*!< RGX General non-4K Heap Identifier */
+#define RGX_RGNHDR_BRN_63142_HEAP_IDENT "RgnHdr BRN63142" /*!< RGX RgnHdr BRN63142 Heap Identifier */
+#define RGX_PDSCODEDATA_HEAP_IDENT "PDS Code and Data" /*!< RGX PDS Code/Data Heap Identifier */
+#define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */
+#define RGX_TQ3DPARAMETERS_HEAP_IDENT "TQ3DParameters" /*!< RGX TQ 3D Parameters Heap Identifier */
+#define RGX_BIF_TILING_HEAP_1_IDENT "BIF Tiling Heap l" /*!< RGX BIF Tiling Heap 1 identifier */
+#define RGX_BIF_TILING_HEAP_2_IDENT "BIF Tiling Heap 2" /*!< RGX BIF Tiling Heap 2 identifier */
+#define RGX_BIF_TILING_HEAP_3_IDENT "BIF Tiling Heap 3" /*!< RGX BIF Tiling Heap 3 identifier */
+#define RGX_BIF_TILING_HEAP_4_IDENT "BIF Tiling Heap 4" /*!< RGX BIF Tiling Heap 4 identifier */
+#define RGX_DOPPLER_HEAP_IDENT "Doppler" /*!< Doppler Heap Identifier */
+#define RGX_DOPPLER_OVERFLOW_HEAP_IDENT "Doppler Overflow" /*!< Doppler Heap Identifier */
+#define RGX_SERVICES_SIGNALS_HEAP_IDENT "Services Signals" /*!< Services Signals Heap Identifier */
+#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Signals Heap Identifier */
+#define RGX_VISTEST_HEAP_IDENT "VisTest" /*!< VisTest heap */
+#define RGX_TDM_TPU_YUV_COEFFS_HEAP_IDENT "TDM TPU YUV Coeffs"
+
+/* BIF tiling heaps have specific buffer requirements based on their XStride
+ * configuration. This is detailed in the BIF tiling documentation and ensures
+ * that the bits swapped by the BIF tiling algorithm do not result in addresses
+ * outside the allocated buffer. The representation here reflects the diagram
+ * in the BIF tiling documentation for tiling mode '0'.
+ *
+ * For tiling mode '1', the overall tile size does not change, width increases
+ * to 2^9 but the height drops to 2^3.
+ * This means the RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE macro can be
+ * used for both modes.
+ *
+ * Previous TILING_HEAP_STRIDE macros are retired in preference to storing an
+ * alignment to stride factor, derived from the tiling mode, with the tiling
+ * heap configuration data.
+ *
+ * XStride is defined for a platform in sysconfig.h, but the resulting
+ * alignment and stride factor can be queried through the
+ * PVRSRVGetHeapLog2ImportAlignmentAndTilingStrideFactor() API.
+ * For reference:
+ * Log2BufferStride = Log2Alignment - Log2AlignmentToTilingStrideFactor
+ */
+#define RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE(X) (4+X+1+8)
+#define RGX_BIF_TILING_HEAP_LOG2_ALIGN_TO_STRIDE_BASE (4)
+
+/*
+ * Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID
+ */
+#define RGX_HEAP_4KB_PAGE_SHIFT (12)
+#define RGX_HEAP_16KB_PAGE_SHIFT (14)
+#define RGX_HEAP_64KB_PAGE_SHIFT (16)
+#define RGX_HEAP_256KB_PAGE_SHIFT (18)
+#define RGX_HEAP_1MB_PAGE_SHIFT (20)
+#define RGX_HEAP_2MB_PAGE_SHIFT (21)
+
+/* Takes a log2 page size parameter and calculates a suitable page size
+ * for the RGX heaps. Returns 0 if parameter is wrong.*/
+static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize)
+{
+ IMG_BOOL bFound = IMG_FALSE;
+
+ /* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT,
+ * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/
+ if ( uiLog2PageSize == 0 ||
+ (uiLog2PageSize < RGX_HEAP_4KB_PAGE_SHIFT) ||
+ (uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Provided incompatible log2 page size %u",
+ __FUNCTION__,
+ uiLog2PageSize));
+ PVR_ASSERT(0);
+ return 0;
+ }
+
+ do
+ {
+ switch (uiLog2PageSize)
+ {
+ case RGX_HEAP_4KB_PAGE_SHIFT:
+ case RGX_HEAP_16KB_PAGE_SHIFT:
+ case RGX_HEAP_64KB_PAGE_SHIFT:
+ case RGX_HEAP_256KB_PAGE_SHIFT:
+ case RGX_HEAP_1MB_PAGE_SHIFT:
+ case RGX_HEAP_2MB_PAGE_SHIFT:
+ /* All good, RGX page size equals given page size
+ * => use it as default for heaps */
+ bFound = IMG_TRUE;
+ break;
+ default:
+ /* We have to fall back to a smaller device
+ * page size than given page size because there
+ * is no exact match for any supported size. */
+ uiLog2PageSize -= 1;
+ break;
+ }
+ } while (!bFound);
+
+ return uiLog2PageSize;
+}
+
+
+#endif /* __RGX_HEAPS_H__ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX HWPerf Types and Defines Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common data types definitions for hardware performance API
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_HWPERF_KM_H_
+#define RGX_HWPERF_KM_H_
+
+/*
+ * This header file holds the HWPerf related macros and types needed by the
+ * code in the Kernel Mode (KM) server/driver module and its content is
+ * intended to be suitable for distribution under a public software license.
+ * The definitions within are common and may be used in user-mode, kernel-mode
+ * and firmware compilation units.
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define RGX_HWPERF_V2_FORMAT 2
+
+#include "rgx_common.h"
+#include "pvrsrv_tlcommon.h"
+#include <powervr/sync_external.h>
+
+/* HWPerf host buffer size constraints in KBs */
+#define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT (128U)
+#define HWPERF_HOST_TL_STREAM_SIZE_MIN (32U)
+#define HWPERF_HOST_TL_STREAM_SIZE_MAX (1024U)
+
+/*! The number of indirectly addressable TPU_MSC blocks in the GPU */
+#define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST MAX((RGX_FEATURE_NUM_CLUSTERS>>1),1)
+
+/*! The number of indirectly addressable USC blocks in the GPU */
+#define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER (RGX_FEATURE_NUM_CLUSTERS)
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+
+ /*! Defines the number of performance counter blocks that are directly
+ * addressable in the RGX register map. */
+ #define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 1 /* JONES */
+ #define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS)
+ #define RGX_HWPERF_PHANTOM_NONDUST_BLKS 1 /* BLACKPEARL */
+ #define RGX_HWPERF_PHANTOM_DUST_BLKS 2 /* TPU, TEXAS */
+ #define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 2 /* USC, PBE */
+ #define RGX_HWPERF_DOPPLER_BX_TU_BLKS 0 /* No doppler unit */
+ #define RGX_HWPERF_MAX_OTHER_INDIRECT_BLKS 0 /* some indirect blocks have fixed instances in a core */
+
+#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+
+ #if defined(RGX_FEATURE_RAY_TRACING)
+ /*! Defines the number of performance counter blocks that are directly
+ * addressable in the RGX register map. */
+ #define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 6 /* TORNADO, TA, BF, BT, RT, SH */
+ #define RGX_HWPERF_DOPPLER_BX_TU_BLKS 4 /* Doppler unit unconditionally has 4 instances of BX_TU */
+ #define RGX_HWPERF_MAX_OTHER_INDIRECT_BLKS RGX_HWPERF_DOPPLER_BX_TU_BLKS
+ #else /*#if defined(RAY_TRACING) */
+ /*! Defines the number of performance counter blocks that are directly
+ * addressable in the RGX register map. */
+ #define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 2 /* TORNADO, TA */
+ #define RGX_HWPERF_DOPPLER_BX_TU_BLKS 0 /* No doppler unit */
+ #define RGX_HWPERF_MAX_OTHER_INDIRECT_BLKS 0
+ #endif /*#if defined(RAY_TRACING) */
+
+ #define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS)
+ #define RGX_HWPERF_PHANTOM_NONDUST_BLKS 2 /* RASTER, TEXAS */
+ #define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */
+ #define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */
+
+#else /* if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) */
+
+ /*! Defines the number of performance counter blocks that are directly
+ * addressable in the RGX register map. */
+ #define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 3 /* TA, RASTER, HUB */
+ #define RGX_HWPERF_INDIRECT_BY_PHANTOM 0 /* PHANTOM is not there is Rogue1. Just using it to keep naming same as later series (RogueXT n Rogue XT+) */
+ #define RGX_HWPERF_PHANTOM_NONDUST_BLKS 0
+ #define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */
+ #define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */
+#define RGX_HWPERF_DOPPLER_BX_TU_BLKS 0 /* No doppler unit */
+ #define RGX_HWPERF_MAX_OTHER_INDIRECT_BLKS 0
+
+#endif
+
+/*! The number of indirect addressable layout blocks in the GPU with performance counters */
+#define RGX_HWPERF_MAX_INDIRECT_ADDR_BLKS (RGX_HWPERF_PHANTOM_DUST_BLKS * RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST +\
+ RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS * RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER +\
+ RGX_HWPERF_PHANTOM_NONDUST_BLKS * RGX_HWPERF_INDIRECT_BY_PHANTOM +\
+ RGX_HWPERF_MAX_OTHER_INDIRECT_BLKS)
+
+/*! The number of custom non-mux counter blocks supported */
+#define RGX_HWPERF_MAX_CUSTOM_BLKS 5
+
+/*! The number of counters supported in each non-mux counter block */
+#define RGX_HWPERF_MAX_CUSTOM_CNTRS 8
+
+/******************************************************************************
+ * Data Stream Common Types
+ *****************************************************************************/
+
+/* These structures are used on both GPU and CPU and must be a size that is a
+ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities
+ * at 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this.
+ */
+
+/*! Type used to encode the event that generated the HW performance packet.
+ * NOTE: When this type is updated the corresponding hwperfbin2json tool source
+ * needs to be updated as well. The RGX_HWPERF_EVENT_MASK_* macros will also need
+ * updating when adding new types.
+ */
+typedef enum
+{
+ RGX_HWPERF_INVALID = 0x00,
+
+ /* FW types 0x01..0x06 */
+ RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE = 0x01,
+
+ RGX_HWPERF_FW_BGSTART = 0x01,
+ RGX_HWPERF_FW_BGEND = 0x02,
+ RGX_HWPERF_FW_IRQSTART = 0x03,
+
+ RGX_HWPERF_FW_IRQEND = 0x04,
+ RGX_HWPERF_FW_DBGSTART = 0x05,
+ RGX_HWPERF_FW_DBGEND = 0x06,
+
+ RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE = 0x06,
+
+ /* HW types 0x07..0x19 */
+ RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE = 0x07,
+
+ RGX_HWPERF_HW_PMOOM_TAPAUSE = 0x07,
+ RGX_HWPERF_HW_TAKICK = 0x08,
+/* RGX_HWPERF_HW_PMOOM_TAPAUSE = 0x07, */
+/* RGX_HWPERF_HW_PMOOM_TARESUME = 0x19, */
+ RGX_HWPERF_HW_TAFINISHED = 0x09,
+ RGX_HWPERF_HW_3DTQKICK = 0x0A,
+/* RGX_HWPERF_HW_3DTQFINISHED = 0x17, */
+/* RGX_HWPERF_HW_3DSPMKICK = 0x11, */
+/* RGX_HWPERF_HW_3DSPMFINISHED = 0x18, */
+ RGX_HWPERF_HW_3DKICK = 0x0B,
+ RGX_HWPERF_HW_3DFINISHED = 0x0C,
+ RGX_HWPERF_HW_CDMKICK = 0x0D,
+ RGX_HWPERF_HW_CDMFINISHED = 0x0E,
+ RGX_HWPERF_HW_TLAKICK = 0x0F,
+ RGX_HWPERF_HW_TLAFINISHED = 0x10,
+ RGX_HWPERF_HW_3DSPMKICK = 0x11,
+ RGX_HWPERF_HW_PERIODIC = 0x12,
+ RGX_HWPERF_HW_RTUKICK = 0x13,
+ RGX_HWPERF_HW_RTUFINISHED = 0x14,
+ RGX_HWPERF_HW_SHGKICK = 0x15,
+ RGX_HWPERF_HW_SHGFINISHED = 0x16,
+ RGX_HWPERF_HW_3DTQFINISHED = 0x17,
+ RGX_HWPERF_HW_3DSPMFINISHED = 0x18,
+ RGX_HWPERF_HW_PMOOM_TARESUME = 0x19,
+
+ /* HW_EVENT_RANGE0 used up. Use next empty range below to add new hardware events */
+ RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE = 0x19,
+
+ /* other types 0x1A..0x1F */
+ RGX_HWPERF_CLKS_CHG = 0x1A,
+ RGX_HWPERF_GPU_STATE_CHG = 0x1B,
+
+ /* power types 0x20..0x27 */
+ RGX_HWPERF_PWR_EST_RANGE_FIRST_TYPE = 0x20,
+ RGX_HWPERF_PWR_EST_REQUEST = 0x20,
+ RGX_HWPERF_PWR_EST_READY = 0x21,
+ RGX_HWPERF_PWR_EST_RESULT = 0x22,
+ RGX_HWPERF_PWR_EST_RANGE_LAST_TYPE = 0x22,
+
+ RGX_HWPERF_PWR_CHG = 0x23,
+
+ /* HW_EVENT_RANGE1 0x28..0x2F, for accommodating new hardware events */
+ RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE = 0x28,
+
+ RGX_HWPERF_HW_TDMKICK = 0x28,
+ RGX_HWPERF_HW_TDMFINISHED = 0x29,
+
+ RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE = 0x29,
+
+ /* context switch types 0x30..0x31 */
+ RGX_HWPERF_CSW_START = 0x30,
+ RGX_HWPERF_CSW_FINISHED = 0x31,
+
+ /* firmware misc 0x38..0x39 */
+ RGX_HWPERF_UFO = 0x38,
+ RGX_HWPERF_FWACT = 0x39,
+
+ /* last */
+ RGX_HWPERF_LAST_TYPE,
+
+ /* This enumeration must have a value that is a power of two as it is
+ * used in masks and a filter bit field (currently 64 bits long).
+ */
+ RGX_HWPERF_MAX_TYPE = 0x40
+} RGX_HWPERF_EVENT_TYPE;
+
+/* Macro used to check if an event type ID is present in the known set of hardware type events */
+#define HWPERF_PACKET_IS_HW_TYPE(_etype) (((_etype) >= RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) || \
+ ((_etype) >= RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE))
+
+#define HWPERF_PACKET_IS_FW_TYPE(_etype) \
+ ((_etype) >= RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE && \
+ (_etype) <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE)
+
+typedef enum {
+ RGX_HWPERF_HOST_INVALID = 0x00,
+ RGX_HWPERF_HOST_ENQ = 0x01,
+ RGX_HWPERF_HOST_UFO = 0x02,
+ RGX_HWPERF_HOST_ALLOC = 0x03,
+ RGX_HWPERF_HOST_CLK_SYNC = 0x04,
+ RGX_HWPERF_HOST_FREE = 0x05,
+
+ /* last */
+ RGX_HWPERF_HOST_LAST_TYPE,
+
+ /* event used as a control layer for HWPerf */
+ RGX_HWPERF_HOST_CTRL = 0x1f,
+
+ /* This enumeration must have a value that is a power of two as it is
+ * used in masks and a filter bit field (currently 32 bits long).
+ */
+ RGX_HWPERF_HOST_MAX_TYPE = 0x20
+} RGX_HWPERF_HOST_EVENT_TYPE;
+
+/* The event type values are incrementing integers for use as a shift ordinal
+ * in the event filtering process at the point events are generated.
+ * This scheme thus implies a limit of 63 event types.
+ */
+static_assert(RGX_HWPERF_LAST_TYPE < RGX_HWPERF_MAX_TYPE, "Too many HWPerf event types");
+
+/******************************************************************************
+ * Packet Format Version 2 Types
+ *****************************************************************************/
+
+/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet
+ */
+#define HWPERF_PACKET_V2_SIG 0x48575032
+/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet
+ */
+#define HWPERF_PACKET_V2A_SIG 0x48575041
+
+/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet
+ */
+#define HWPERF_PACKET_V2B_SIG 0x48575042
+
+#define HWPERF_PACKET_ISVALID(_ptr) (((_ptr) == HWPERF_PACKET_V2_SIG) || ((_ptr) == HWPERF_PACKET_V2A_SIG)|| ((_ptr) == HWPERF_PACKET_V2B_SIG))
+
+/*! This structure defines version 2 of the packet format which is
+ * based around a header and a variable length data payload structure.
+ * The address of the next packet can be found by adding the ui16Size field
+ * in the header to the current packet address.
+ * Producers of packets must always ensure the size field is a multiple of 8
+ * as packets must start on an 8-byte granular address.
+ */
+typedef struct
+{
+ /* HEADER - packet header fields common to all packet types */
+ IMG_UINT32 ui32Sig; /*!< Always the value HWPERF_PACKET_SIG */
+
+ IMG_UINT32 ui32Size; /*!< Overall packet size in bytes, includes
+ * header and payload. Size is a 16-bit field
+ * stored in the 16 LSb. 16 MSb reserved.
+ * Use RGX_HWPERF_MAKE_SIZE_* and RGX_HWPERF_GET_SIZE
+ * macros to set/get, never write directly. */
+
+ IMG_UINT32 eTypeId; /*!< Fields layout as shown:
+ * |<--------------------------32-bits----------------------------->|
+ * |<---8---->|<----5----->|<----3----->|<---1---->|<------15------>|
+ * | OSID | RESERVED | STREAM-ID | META TID | EventType |
+ *
+ * NOTE- It seems 3-bits (total 8 OSIDs) are enough for OSID field above?
+ * In case some bits are in future, perhaps 5 MS bits can be borrowed.
+ *
+ * Use RGX_HWPERF_MAKE_TYPEID and RGX_HWPERF_GET_*
+ * macros to set/get, never write directly. */
+
+ IMG_UINT32 ui32Ordinal; /*!< Sequential number of the packet */
+ IMG_UINT64 ui64Timestamp; /*!< Depending on the side that packet originated
+ * may be either CPU timestamp or value of RGX_CR_TIMER
+ * at event. */
+
+ /* PAYLOAD - bytes from this point on in the buffer are from the
+ * RGX_PHWPERF_V2_PACKET_DATA union which encodes the payload data specific
+ * to the event type set in the header. When the structure in the union
+ * has a variable length member e.g. HW packets, the payload length
+ * varies and ui32Size in the header defines the end of the payload data.
+ */
+} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR;
+
+RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64Timestamp);
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR);
+
+
+/*! Mask for use with the IMG_UINT32 ui32Size header field */
+#define RGX_HWPERF_SIZE_MASK 0xFFFFU
+
+/*! This macro defines an upper limit to which the size of the largest variable
+ * length HWPerf packet must fall within, currently 3KB. This constant may be
+ * used to allocate a buffer to hold one packet.
+ * This upper limit is policed by packet producing code.
+ */
+#define RGX_HWPERF_MAX_PACKET_SIZE 0xC00U
+
+/*! Defines an upper limit to the size of a variable length packet payload.
+ */
+#define RGX_HWPERF_MAX_PAYLOAD_SIZE ((IMG_UINT32)(RGX_HWPERF_MAX_PACKET_SIZE-\
+ sizeof(RGX_HWPERF_V2_PACKET_HDR)))
+
+
+/*! Macro which takes a structure name and provides the packet size for
+ * a fixed size payload packet, rounded up to 8 bytes to align packets
+ * for 64 bit architectures. */
+#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(sizeof(_struct), PVRSRVTL_PACKET_ALIGNMENT))))
+
+/*! Macro which takes the number of bytes written in the data payload of a
+ * packet for a variable size payload packet, rounded up to 8 bytes to
+ * align packets for 64 bit architectures. */
+#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(_size, PVRSRVTL_PACKET_ALIGNMENT))))
+
+/*! Macro to obtain the size of the packet */
+#define RGX_HWPERF_GET_SIZE(_packet_addr) ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK))
+
+/*! Macro to obtain the size of the packet data */
+#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr) (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR))
+
+
+
+/*! Masks for use with the IMG_UINT32 eTypeId header field */
+#define RGX_HWPERF_TYPEID_MASK 0x7FFFFU
+#define RGX_HWPERF_TYPEID_EVENT_MASK 0x07FFFU
+#define RGX_HWPERF_TYPEID_THREAD_MASK 0x08000U
+#define RGX_HWPERF_TYPEID_STREAM_MASK 0x70000U
+#define RGX_HWPERF_TYPEID_OSID_MASK 0xFF000000U
+
+/*! Meta thread macros for encoding the ID into the type field of a packet */
+#define RGX_HWPERF_META_THREAD_SHIFT 15U
+#define RGX_HWPERF_META_THREAD_ID0 0x0U
+#define RGX_HWPERF_META_THREAD_ID1 0x1U
+/*! Obsolete, kept for source compatibility */
+#define RGX_HWPERF_META_THREAD_MASK 0x1U
+/*! Stream ID macros for encoding the ID into the type field of a packet */
+#define RGX_HWPERF_STREAM_SHIFT 16U
+/*! OSID bit-shift macro used for encoding OSID into type field of a packet */
+#define RGX_HWPERF_OSID_SHIFT 24U
+typedef enum {
+ RGX_HWPERF_STREAM_ID0_FW, /*!< Events from the Firmware/GPU */
+ RGX_HWPERF_STREAM_ID1_HOST, /*!< Events from the Server host driver component */
+ RGX_HWPERF_STREAM_ID2_CLIENT, /*!< Events from the Client host driver component */
+ RGX_HWPERF_STREAM_ID_LAST,
+} RGX_HWPERF_STREAM_ID;
+
+/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */
+static_assert((RGX_HWPERF_STREAM_ID_LAST - 1) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT),
+ "To many HWPerf stream IDs.");
+
+/*! Macros used to set the packet type and encode meta thread ID (0|1), hwperf stream ID, and OSID within */
+#define RGX_HWPERF_MAKE_TYPEID(_stream,_type,_thread,_osid)\
+ ((IMG_UINT32) ((RGX_HWPERF_TYPEID_STREAM_MASK&((_stream)<<RGX_HWPERF_STREAM_SHIFT)) | \
+ (RGX_HWPERF_TYPEID_THREAD_MASK&((_thread)<<RGX_HWPERF_META_THREAD_SHIFT)) | \
+ (RGX_HWPERF_TYPEID_EVENT_MASK&(_type)) | \
+ (RGX_HWPERF_TYPEID_OSID_MASK & ((_osid) << RGX_HWPERF_OSID_SHIFT))))
+
+/*! Obtains the event type that generated the packet */
+#define RGX_HWPERF_GET_TYPE(_packet_addr) (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK)
+
+/*! Obtains the META Thread number that generated the packet */
+#define RGX_HWPERF_GET_THREAD_ID(_packet_addr) (((((_packet_addr)->eTypeId)&RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT))
+
+/*! Obtains the guest OSID which resulted in packet generation */
+#define RGX_HWPERF_GET_OSID(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT)
+
+/*! Obtain stream id */
+#define RGX_HWPERF_GET_STREAM_ID(_packet_addr) (((((_packet_addr)->eTypeId)&RGX_HWPERF_TYPEID_STREAM_MASK) >> RGX_HWPERF_STREAM_SHIFT))
+
+/*! Macros to obtain a typed pointer to a packet or data structure given a packet address */
+#define RGX_HWPERF_GET_PACKET(_buffer_addr) ((RGX_HWPERF_V2_PACKET_HDR*) (_buffer_addr))
+#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) ((IMG_BYTE*) ( ((IMG_BYTE*)(_packet_addr)) +sizeof(RGX_HWPERF_V2_PACKET_HDR) ) )
+#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR*) ( ((IMG_BYTE*)(_packet_addr))+(RGX_HWPERF_SIZE_MASK&(_packet_addr)->ui32Size)) )
+
+/*! Obtains a typed pointer to a packet header given the packed data address */
+#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR*) ( ((IMG_BYTE*)(_packet_addr)) - sizeof(RGX_HWPERF_V2_PACKET_HDR) ))
+
+/* This macro is not a real array size, but indicates the array has a
+ * variable length only known at run-time but always contains at least 1 element.
+ * The final size of the array is deduced from the size field of a packet
+ * header. */
+#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS 1U
+
+/* This macro is not a real array size, but indicates the array is optional
+ * and if present has a variable length only known at run-time. The final
+ * size of the array is deduced from the size field of a packet header. */
+#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U
+
+
+/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK 0xFFFF0000U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK 0x0000FFFFU
+
+/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT 16U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U
+
+/*! Macro used to set the block info word as a combination of two 16-bit integers */
+#define RGX_HWPERF_MAKE_BLKINFO(_numblks,_blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT))))
+
+/*! Macro used to obtain get the number of counter blocks present in the packet */
+#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo) ((_blkinfo & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)
+
+/*! Obtains the offset of the counter block stream in the packet */
+#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo) ((_blkinfo & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)
+
+/* This is the maximum frame contexts that are supported in the driver at the moment */
+#define RGX_HWPERF_HW_MAX_WORK_CONTEXT 2
+
+/*! Masks for use with the RGX_HWPERF_UFO_EV eEvType field */
+#define RGX_HWPERF_UFO_STREAMSIZE_MASK 0xFFFF0000U
+#define RGX_HWPERF_UFO_STREAMOFFSET_MASK 0x0000FFFFU
+
+/*! Shift for the UFO count and data stream fields */
+#define RGX_HWPERF_UFO_STREAMSIZE_SHIFT 16U
+#define RGX_HWPERF_UFO_STREAMOFFSET_SHIFT 0U
+
+/*! Macro used to set ufo stream info word as a combination of two 16-bit integers */
+#define RGX_HWPERF_MAKE_UFOPKTINFO(_ssize,_soff)\
+ ((IMG_UINT32) ((RGX_HWPERF_UFO_STREAMSIZE_MASK&((_ssize) << RGX_HWPERF_UFO_STREAMSIZE_SHIFT)) |\
+ (RGX_HWPERF_UFO_STREAMOFFSET_MASK&((_soff) << RGX_HWPERF_UFO_STREAMOFFSET_SHIFT))))
+
+/*! Macro used to obtain ufo count*/
+#define RGX_HWPERF_GET_UFO_STREAMSIZE(_streaminfo)\
+ ((_streaminfo & RGX_HWPERF_UFO_STREAMSIZE_MASK) >> RGX_HWPERF_UFO_STREAMSIZE_SHIFT)
+
+/*! Obtains the offset of the ufo stream in the packet */
+#define RGX_HWPERF_GET_UFO_STREAMOFFSET(_streaminfo)\
+ ((_streaminfo & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT)
+
+/******************************************************************************
+ * API Types
+ *****************************************************************************/
+
+/*! Counter block IDs for all the hardware blocks with a performance
+ * counting module. Directly addressable blocks must have a value between 0..15.
+ * First hex digit represents a group number and the second hex digit represents
+ * the unit within the group. Group 0 is the direct group, all others are
+ * indirect groups.
+ */
+typedef enum
+{
+ /* Directly addressable counter blocks */
+ RGX_CNTBLK_ID_TA = 0x0000,
+ RGX_CNTBLK_ID_RASTER = 0x0001, /* Non-cluster grouping cores */
+ RGX_CNTBLK_ID_HUB = 0x0002, /* Non-cluster grouping cores */
+ RGX_CNTBLK_ID_TORNADO = 0x0003, /* XT cores */
+ RGX_CNTBLK_ID_JONES = 0x0004, /* S7 cores */
+ RGX_CNTBLK_ID_BF = 0x0005, /* Doppler unit */
+ RGX_CNTBLK_ID_BT = 0x0006, /* Doppler unit */
+ RGX_CNTBLK_ID_RT = 0x0007, /* Doppler unit */
+ RGX_CNTBLK_ID_SH = 0x0008, /* Ray tracing unit */
+
+ RGX_CNTBLK_ID_DIRECT_LAST,
+
+ /* Indirectly addressable counter blocks */
+ RGX_CNTBLK_ID_TPU_MCU0 = 0x0010, /* Addressable by Dust */
+ RGX_CNTBLK_ID_TPU_MCU1 = 0x0011,
+ RGX_CNTBLK_ID_TPU_MCU2 = 0x0012,
+ RGX_CNTBLK_ID_TPU_MCU3 = 0x0013,
+ RGX_CNTBLK_ID_TPU_MCU4 = 0x0014,
+ RGX_CNTBLK_ID_TPU_MCU5 = 0x0015,
+ RGX_CNTBLK_ID_TPU_MCU6 = 0x0016,
+ RGX_CNTBLK_ID_TPU_MCU7 = 0x0017,
+ RGX_CNTBLK_ID_TPU_MCU_ALL = 0x4010,
+
+ RGX_CNTBLK_ID_USC0 = 0x0020, /* Addressable by Cluster */
+ RGX_CNTBLK_ID_USC1 = 0x0021,
+ RGX_CNTBLK_ID_USC2 = 0x0022,
+ RGX_CNTBLK_ID_USC3 = 0x0023,
+ RGX_CNTBLK_ID_USC4 = 0x0024,
+ RGX_CNTBLK_ID_USC5 = 0x0025,
+ RGX_CNTBLK_ID_USC6 = 0x0026,
+ RGX_CNTBLK_ID_USC7 = 0x0027,
+ RGX_CNTBLK_ID_USC8 = 0x0028,
+ RGX_CNTBLK_ID_USC9 = 0x0029,
+ RGX_CNTBLK_ID_USC10 = 0x002A,
+ RGX_CNTBLK_ID_USC11 = 0x002B,
+ RGX_CNTBLK_ID_USC12 = 0x002C,
+ RGX_CNTBLK_ID_USC13 = 0x002D,
+ RGX_CNTBLK_ID_USC14 = 0x002E,
+ RGX_CNTBLK_ID_USC15 = 0x002F,
+ RGX_CNTBLK_ID_USC_ALL = 0x4020,
+
+ RGX_CNTBLK_ID_TEXAS0 = 0x0030, /* Addressable by Phantom in XT, Dust in S7 */
+ RGX_CNTBLK_ID_TEXAS1 = 0x0031,
+ RGX_CNTBLK_ID_TEXAS2 = 0x0032,
+ RGX_CNTBLK_ID_TEXAS3 = 0x0033,
+ RGX_CNTBLK_ID_TEXAS4 = 0x0034,
+ RGX_CNTBLK_ID_TEXAS5 = 0x0035,
+ RGX_CNTBLK_ID_TEXAS6 = 0x0036,
+ RGX_CNTBLK_ID_TEXAS7 = 0x0037,
+ RGX_CNTBLK_ID_TEXAS_ALL = 0x4030,
+
+ RGX_CNTBLK_ID_RASTER0 = 0x0040, /* Addressable by Phantom, XT only */
+ RGX_CNTBLK_ID_RASTER1 = 0x0041,
+ RGX_CNTBLK_ID_RASTER2 = 0x0042,
+ RGX_CNTBLK_ID_RASTER3 = 0x0043,
+ RGX_CNTBLK_ID_RASTER_ALL = 0x4040,
+
+ RGX_CNTBLK_ID_BLACKPEARL0 = 0x0050, /* Addressable by Phantom, S7 only */
+ RGX_CNTBLK_ID_BLACKPEARL1 = 0x0051,
+ RGX_CNTBLK_ID_BLACKPEARL2 = 0x0052,
+ RGX_CNTBLK_ID_BLACKPEARL3 = 0x0053,
+ RGX_CNTBLK_ID_BLACKPEARL_ALL= 0x4050,
+
+ RGX_CNTBLK_ID_PBE0 = 0x0060, /* Addressable by Cluster, S7 only */
+ RGX_CNTBLK_ID_PBE1 = 0x0061,
+ RGX_CNTBLK_ID_PBE2 = 0x0062,
+ RGX_CNTBLK_ID_PBE3 = 0x0063,
+ RGX_CNTBLK_ID_PBE4 = 0x0064,
+ RGX_CNTBLK_ID_PBE5 = 0x0065,
+ RGX_CNTBLK_ID_PBE6 = 0x0066,
+ RGX_CNTBLK_ID_PBE7 = 0x0067,
+ RGX_CNTBLK_ID_PBE8 = 0x0068,
+ RGX_CNTBLK_ID_PBE9 = 0x0069,
+ RGX_CNTBLK_ID_PBE10 = 0x006A,
+ RGX_CNTBLK_ID_PBE11 = 0x006B,
+ RGX_CNTBLK_ID_PBE12 = 0x006C,
+ RGX_CNTBLK_ID_PBE13 = 0x006D,
+ RGX_CNTBLK_ID_PBE14 = 0x006E,
+ RGX_CNTBLK_ID_PBE15 = 0x006F,
+ RGX_CNTBLK_ID_PBE_ALL = 0x4060,
+
+ RGX_CNTBLK_ID_BX_TU0 = 0x0070, /* Doppler unit, XT only */
+ RGX_CNTBLK_ID_BX_TU1 = 0x0071,
+ RGX_CNTBLK_ID_BX_TU2 = 0x0072,
+ RGX_CNTBLK_ID_BX_TU3 = 0x0073,
+ RGX_CNTBLK_ID_BX_TU_ALL = 0x4070,
+
+ RGX_CNTBLK_ID_LAST = 0x0074,
+
+ RGX_CNTBLK_ID_CUSTOM0 = 0x7FF0,
+ RGX_CNTBLK_ID_CUSTOM1 = 0x7FF1,
+ RGX_CNTBLK_ID_CUSTOM2 = 0x7FF2,
+ RGX_CNTBLK_ID_CUSTOM3 = 0x7FF3,
+ RGX_CNTBLK_ID_CUSTOM4_FW = 0x7FF4 /* Custom block used for getting statistics held in the FW */
+
+} RGX_HWPERF_CNTBLK_ID;
+
+/* Masks for the counter block ID*/
+#define RGX_CNTBLK_ID_GROUP_MASK (0x00F0U)
+#define RGX_CNTBLK_ID_GROUP_SHIFT (4)
+#define RGX_CNTBLK_ID_UNIT_ALL_MASK (0x4000U)
+#define RGX_CNTBLK_ID_UNIT_MASK (0xf)
+
+#define RGX_CNTBLK_INDIRECT_COUNT(_class, _n) ((RGX_CNTBLK_ID_ ## _class ## _n) - (RGX_CNTBLK_ID_ ## _class ## 0) +1)
+
+/*! The number of layout blocks defined with configurable multiplexed
+ * performance counters, hence excludes custom counter blocks.
+ */
+#define RGX_HWPERF_MAX_DEFINED_BLKS (\
+ RGX_CNTBLK_ID_DIRECT_LAST +\
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7)+\
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15)+\
+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7)+\
+ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3)+\
+ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3)+\
+ RGX_CNTBLK_INDIRECT_COUNT(PBE, 15)+\
+ RGX_CNTBLK_INDIRECT_COUNT(BX_TU, 3) )
+
+#define RGX_HWPERF_EVENT_MASK_VALUE(e) (((IMG_UINT64)1)<<(e))
+
+#define RGX_CUSTOM_FW_CNTRS \
+ X(TA_LOCAL_FL_SIZE, 0x0, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \
+ X(TA_GLOBAL_FL_SIZE, 0x1, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \
+ X(3D_LOCAL_FL_SIZE, 0x2, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \
+ X(3D_GLOBAL_FL_SIZE, 0x3, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED))
+
+/*! Counter IDs for the firmware held stats */
+typedef enum
+{
+#define X(ctr, id, allow_mask) RGX_CUSTOM_FW_CNTR_##ctr = id,
+ RGX_CUSTOM_FW_CNTRS
+#undef X
+
+ /* always the last entry in the list */
+ RGX_CUSTOM_FW_CNTR_LAST
+} RGX_HWPERF_CUSTOM_FW_CNTR_ID;
+
+/*! Identifier for each counter in a performance counting module */
+typedef enum
+{
+ RGX_CNTBLK_COUNTER0_ID = 0,
+ RGX_CNTBLK_COUNTER1_ID = 1,
+ RGX_CNTBLK_COUNTER2_ID = 2,
+ RGX_CNTBLK_COUNTER3_ID = 3,
+ RGX_CNTBLK_COUNTER4_ID = 4,
+ RGX_CNTBLK_COUNTER5_ID = 5,
+ /* MAX value used in server handling of counter config arrays */
+ RGX_CNTBLK_COUNTERS_MAX
+} RGX_HWPERF_CNTBLK_COUNTER_ID;
+
+/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */
+#define _MASK_RANGE(_b1, _b2) (((IMG_UINT64_C(1) << ((_b2)-(_b1)+1)) - 1) << _b1)
+#define MASK_RANGE(R) _MASK_RANGE(R##_FIRST_TYPE, R##_LAST_TYPE)
+#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) ((IMG_UINT32)(1<<(e)))
+
+/*! Mask macros for use with RGXCtrlHWPerf() API.
+ */
+#define RGX_HWPERF_EVENT_MASK_NONE (IMG_UINT64_C(0x0000000000000000))
+#define RGX_HWPERF_EVENT_MASK_ALL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+
+/*! HWPerf Firmware event masks
+ * Next macro covers all FW Start/End/Debug (SED) events.
+ */
+#define RGX_HWPERF_EVENT_MASK_FW_SED (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_FW_UFO (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO))
+#define RGX_HWPERF_EVENT_MASK_FW_CSW (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED))
+#define RGX_HWPERF_EVENT_MASK_ALL_FW (RGX_HWPERF_EVENT_MASK_FW_SED |\
+ RGX_HWPERF_EVENT_MASK_FW_UFO |\
+ RGX_HWPERF_EVENT_MASK_FW_CSW)
+
+#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC))
+#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\
+ MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\
+ ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_HW (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\
+ RGX_HWPERF_EVENT_MASK_HW_PERIODIC)
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG))
+
+/*! HWPerf Host event masks
+ */
+#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC))
+
+
+
+/*! Type used in the RGX API RGXConfigureAndEnableHWPerfCounters()
+ * It is used to configure the performance counter module in a layout
+ * block and allows one or more counters in the block to be
+ * configured in one operation based on the counter select mask. The bit
+ * shifts for this are the values in RGX_HWPERF_CNTBLK_COUNTER_ID. This mask
+ * also encodes which values in the arrays are valid, for example, if bit 1 set
+ * then aui8Mode[1], aui16GroupSelect[1], aui16BitSelect[1], aui32BatchMax[1],
+ * and aui32BatchMin[1] must be set. If these array elements are all set to
+ * 0 then the counter will not count and will not be in the HW event,
+ * effectively disabling the counter from the callers point of view.
+ * If any are non zero then the counter will be included in the HW event.
+ *
+ * Each layout block has 4 or 6 counters that can be programmed independently to
+ * profile the performance of a HW block. Each counter can be configured to
+ * accumulate statistics from 1 of 32 counter groups defined for that block.
+ * Each counter group can have up to 16 signals/bits defined that can be
+ * selected. Each counter may accumulate in one of two modes.
+ * See hwdefs/regapiperf.h for block/group/signal definitions.
+ */
+ typedef struct _RGX_HWPERF_CONFIG_CNTBLK_
+{
+ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */
+ IMG_UINT16 ui16BlockID;
+
+ /*! 4 or 6 LSBs are a mask of which counters to configure. Bit 0 is counter 0,
+ * bit 1 is counter 1 on so on. */
+ IMG_UINT8 ui8CounterSelect;
+
+ /*! 4 or 6 LSBs 0 for counting 1's in the group, 1 for treating the group
+ * signals as a number for unsigned addition. Bit 0 is counter 0, bit 1 is
+ * counter 1 on so on. This member relates to the MODE field
+ * in the RGX_CR_<N>_PERF_SELECTm register for each counter */
+ IMG_UINT8 ui8Mode;
+
+ /*! 5 or 6 LSBs used as the GROUP_SELECT field in the RGX_CR_<N>_PERF_SELECTm
+ * register. Array index 0 is counter 0, index 1 is counter 1 and so on. */
+ IMG_UINT8 aui8GroupSelect[RGX_CNTBLK_COUNTERS_MAX];
+
+ /*! 16 LSBs used as the BIT_SELECT field in the RGX_CR_<N>_PERF_SELECTm
+ * register. Array indexes relate to counters as above. */
+ IMG_UINT16 aui16BitSelect[RGX_CNTBLK_COUNTERS_MAX];
+
+ /*! 14 LSBs used as the BATCH_MAX field in the RGX_CR_<N>_PERF_SELECTm
+ * register. Array indexes relate to counters as above. */
+ IMG_UINT32 aui32BatchMax[RGX_CNTBLK_COUNTERS_MAX];
+
+ /*! 14 LSBs used as the BATCH_MIN field in the RGX_CR_<N>_PERF_SELECTm
+ * register. Array indexes relate to counters as above. */
+ IMG_UINT32 aui32BatchMin[RGX_CNTBLK_COUNTERS_MAX];
+} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK);
+
+/******************************************************************************
+ * Data Stream Common Types
+ *****************************************************************************/
+
+/* All the Data Masters HWPerf is aware of. When a new DM is added to this list,
+ * it should be appended at the end to maintain backward compatibility of HWPerf data */
+typedef enum _RGX_HWPERF_DM {
+
+ RGX_HWPERF_DM_GP,
+ RGX_HWPERF_DM_2D,
+ RGX_HWPERF_DM_TA,
+ RGX_HWPERF_DM_3D,
+ RGX_HWPERF_DM_CDM,
+ RGX_HWPERF_DM_RTU,
+ RGX_HWPERF_DM_SHG,
+ RGX_HWPERF_DM_TDM,
+
+ RGX_HWPERF_DM_LAST,
+
+ RGX_HWPERF_DM_INVALID = 0x1FFFFFFF
+} RGX_HWPERF_DM;
+
+/* These structures are used on both GPU and CPU and must be a size that is a
+ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities
+ * at 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this.
+ */
+
+
+/*! This structure holds the field data of a Hardware packet.
+ */
+#define RGX_HWPERF_HW_DATA_FIELDS_LIST \
+IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */\
+IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */\
+IMG_UINT32 ui32PID; /*!< Process identifier */\
+IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */\
+IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */\
+IMG_UINT32 ui32ExtJobRef; /*!< Externally provided job reference used to track work for debugging purposes */\
+IMG_UINT32 ui32IntJobRef; /*!< Internally provided job reference used to track work for debugging purposes */\
+IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */\
+IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counterblock stream offset */\
+IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */\
+IMG_UINT32 ui32CtxPriority; /*!< Context priority */\
+IMG_UINT32 ui32Padding1; /* To ensure correct alignment */
+
+typedef struct
+{
+ RGX_HWPERF_HW_DATA_FIELDS_LIST
+} RGX_HWPERF_HW_DATA_FIELDS;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA_FIELDS);
+
+typedef enum {
+ RGX_HWPERF_UFO_EV_UPDATE,
+ RGX_HWPERF_UFO_EV_CHECK_SUCCESS,
+ RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS,
+ RGX_HWPERF_UFO_EV_CHECK_FAIL,
+ RGX_HWPERF_UFO_EV_PRCHECK_FAIL,
+
+ RGX_HWPERF_UFO_EV_LAST
+} RGX_HWPERF_UFO_EV;
+
+/*! Data stream tuple. */
+typedef union
+{
+ struct
+ {
+ IMG_UINT32 ui32FWAddr;
+ IMG_UINT32 ui32Value;
+ } sCheckSuccess;
+ struct
+ {
+ IMG_UINT32 ui32FWAddr;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Required;
+ } sCheckFail;
+ struct
+ {
+ IMG_UINT32 ui32FWAddr;
+ IMG_UINT32 ui32OldValue;
+ IMG_UINT32 ui32NewValue;
+ } sUpdate;
+} RGX_HWPERF_UFO_DATA_ELEMENT;
+
+/*! This structure holds data for ufo packet. */
+typedef struct
+{
+ RGX_HWPERF_UFO_EV eEvType;
+ IMG_UINT32 ui32TimeCorrIndex;
+ IMG_UINT32 ui32PID;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_UINT32 ui32IntJobRef;
+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */
+ IMG_UINT32 ui32StreamInfo;
+ RGX_HWPERF_DM eDM;
+ IMG_UINT32 ui32Padding;
+
+ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+ /*!< Data stream contains UFO objects for specific command.
+ *
+ * Data stream consists of tuples containing UFO related data. Depending
+ * on the UFO event type there are three tuple formats:
+ * For UPDATE packet tuple consist of two 32bit values:
+ * <32bit> : UFO's firmware address
+ * <32bit> : old UFO's value
+ * <32bit> : update value
+ * For PRCHECK/CHECK SUCCESS packets tuple consists of two 32bit
+ * values:
+ * <32bit> : UFO's firmware address
+ * <32bit> : UFO's/fence value
+ * For PRCHECK/CHECK FAIL packets tuple consists of three 32bit values:
+ * <32bit> : UFO's firmware address
+ * <32bit> : UFO's value
+ * <32bit> : fence value
+ *
+ * An example of data stream:
+ * <UFO0addr>, <UFO0val>, <Fnc0Val>, <UFO1addr>, <UFO1val>, <Fnc1Val> ...
+ *
+ * The array size is at least large enough to fit in one tuple. Real
+ * size is however dynamic and reflects number of all tuples that are fit
+ * into the array.
+ */
+} RGX_HWPERF_UFO_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA);
+
+/*! This structure holds the data of a firmware packet. */
+typedef struct
+{
+ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */
+ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */
+ IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */
+ IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */
+ IMG_UINT32 ui32TimeCorrIndex;
+ IMG_UINT32 ui32Padding;
+} RGX_HWPERF_FW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA);
+
+typedef enum
+{
+ RGX_HWPERF_CTRL_TYPE_INVALID,
+ RGX_HWPERF_CTRL_TYPE_CLIENT_STREAM_OPEN,
+ RGX_HWPERF_CTRL_TYPE_CLIENT_STREAM_CLOSE,
+ RGX_HWPERF_CTRL_TYPE_LAST
+} RGX_HWPERF_HOST_CTRL_TYPE;
+
+typedef struct
+{
+ RGX_HWPERF_HOST_CTRL_TYPE eEvType;
+ union
+ {
+ IMG_UINT32 ui32Pid;
+ } uData;
+} RGX_HWPERF_HOST_CTRL_DATA;
+
+typedef enum
+{
+ RGX_HWPERF_KICK_TYPE_TA3D,
+ RGX_HWPERF_KICK_TYPE_TQ2D,
+ RGX_HWPERF_KICK_TYPE_TQ3D,
+ RGX_HWPERF_KICK_TYPE_CDM,
+ RGX_HWPERF_KICK_TYPE_RS,
+ RGX_HWPERF_KICK_TYPE_VRDM,
+ RGX_HWPERF_KICK_TYPE_TQTDM,
+ RGX_HWPERF_KICK_TYPE_SYNC,
+ RGX_HWPERF_KICK_TYPE_LAST
+} RGX_HWPERF_KICK_TYPE;
+
+typedef struct
+{
+ RGX_HWPERF_KICK_TYPE ui32EnqType;
+ IMG_UINT32 ui32PID;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_UINT32 ui32IntJobRef;
+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_ENQ_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+ RGX_HWPERF_UFO_EV eEvType;
+ IMG_UINT32 ui32StreamInfo;
+ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_UFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+ RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID,
+ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC,
+ RGX_HWPERF_HOST_RESOURCE_TYPE_LAST
+} RGX_HWPERF_HOST_RESOURCE_TYPE;
+
+typedef union
+{
+ struct
+ {
+ IMG_UINT32 ui32FWAddr;
+ IMG_CHAR acName[SYNC_MAX_CLASS_NAME_LEN];
+ } sSyncAlloc;
+} RGX_HWPERF_HOST_ALLOC_DETAIL;
+
+typedef struct
+{
+ RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType;
+ RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail;
+} RGX_HWPERF_HOST_ALLOC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef union
+{
+ struct
+ {
+ IMG_UINT32 ui32FWAddr;
+ } sSyncFree;
+} RGX_HWPERF_HOST_FREE_DETAIL;
+
+typedef struct
+{
+ RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType;
+ RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail;
+} RGX_HWPERF_HOST_FREE_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+ IMG_UINT64 ui64CRTimestamp;
+ IMG_UINT64 ui64OSTimestamp;
+ IMG_UINT32 ui32ClockSpeed;
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_CLK_SYNC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* RGX_HWPERF_KM_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX HW Performance counter table
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX HW Performance counters table
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgx_fwif_hwperf.h"
+#include "rgxdefs_km.h"
+#include "rgx_hwperf_table.h"
+
+/* Includes needed for PVRSRVKM (Server) context */
+#if defined(SUPPORT_KERNEL_SRVINIT)
+# include "rgx_bvnc_defs_km.h"
+# if defined(__KERNEL__)
+# include "rgxdevice.h"
+# endif
+#endif
+
+/* Shared compile-time context ASSERT macro */
+#if defined(RGX_FIRMWARE)
+# include "rgxfw_utils.h"
+/* firmware context */
+# define DBG_ASSERT(_c) RGXFW_ASSERT((_c))
+#else
+# include "pvr_debug.h"
+/* host client/server context */
+# define DBG_ASSERT(_c) PVR_ASSERT((_c))
+#endif
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered()
+
+ Referenced in gasCntBlkTypeModel[] table below and only called from
+ RGX_FIRMWARE run-time context. Therefore compile time configuration is used.
+ *****************************************************************************/
+
+#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS)
+# include "rgxfw_pow.h"
+# include "rgxfw_utils.h"
+
+static IMG_BOOL rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+ PVR_UNREFERENCED_PARAMETER(eBlkType);
+ PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+ /* S7XT: JONES */
+ return (eBlkType == RGX_CNTBLK_ID_JONES) ? IMG_TRUE : IMG_FALSE;
+#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+ /* S6XT: TA, TORNADO */
+ return IMG_TRUE;
+#else
+ /* S6 : TA, HUB, RASTER (RASCAL) */
+ return (gsPowCtl.ePowState & RGXFW_POW_ST_RD_ON) ? IMG_TRUE : IMG_FALSE;
+#endif
+}
+
+/* Only use conditional compilation when counter blocks appear in different
+ * islands for different Rogue families.
+ */
+static IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+ IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_dusts_num();
+
+ if ((gsPowCtl.ePowState & RGXFW_POW_ST_RD_ON) &&
+ (ui32NumDustsEnabled > 0))
+ {
+#if defined(RGX_FEATURE_DYNAMIC_DUST_POWER)
+ IMG_UINT32 ui32NumUscEnabled = ui32NumDustsEnabled*2;
+
+ switch (eBlkType)
+ {
+ case RGX_CNTBLK_ID_TPU_MCU0: /* S6 and S6XT */
+#if defined (RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+ case RGX_CNTBLK_ID_TEXAS0: /* S7 */
+#endif
+ if (ui8UnitId >= ui32NumDustsEnabled)
+ {
+ return IMG_FALSE;
+ }
+ break;
+ case RGX_CNTBLK_ID_USC0: /* S6, S6XT, S7 */
+ case RGX_CNTBLK_ID_PBE0: /* S7 */
+ /* Handle single cluster cores */
+ if (ui8UnitId >= ((ui32NumUscEnabled > RGX_FEATURE_NUM_CLUSTERS) ? RGX_FEATURE_NUM_CLUSTERS : ui32NumUscEnabled))
+ {
+ return IMG_FALSE;
+ }
+ break;
+ case RGX_CNTBLK_ID_BLACKPEARL0: /* S7 */
+ case RGX_CNTBLK_ID_RASTER0: /* S6XT */
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+ case RGX_CNTBLK_ID_TEXAS0: /* S6XT */
+#endif
+ if (ui8UnitId >= (RGX_REQ_NUM_PHANTOMS(ui32NumUscEnabled)))
+ {
+ return IMG_FALSE;
+ }
+ break;
+ default:
+ RGXFW_ASSERT(IMG_FALSE); /* should never get here, table error */
+ break;
+ }
+#else
+ /* Always true, no fused DUSTs, all powered so do not check unit */
+ PVR_UNREFERENCED_PARAMETER(eBlkType);
+ PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+#endif
+ }
+ else
+ {
+ return IMG_FALSE;
+ }
+ return IMG_TRUE;
+}
+
+#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+# define rgxfw_hwperf_pow_st_direct ((void*)NULL)
+# define rgxfw_hwperf_pow_st_indirect ((void*)NULL)
+# define rgxfw_hwperf_pow_st_gandalf ((void*)NULL)
+
+#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_RAY_TRACING)
+
+/* Currently there is no power island control in the firmware for ray tracing
+ * so we currently assume these blocks are always powered. */
+static IMG_BOOL rgxfw_hwperf_pow_st_gandalf(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+ PVR_UNREFERENCED_PARAMETER(eBlkType);
+ PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+
+ return IMG_TRUE;
+}
+
+#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_RAY_TRACING) */
+
+# define rgxfw_hwperf_pow_st_gandalf ((void*)NULL)
+
+#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_RAY_TRACING) */
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end
+ *****************************************************************************/
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start
+
+ Referenced in gasCntBlkTypeModel[] table below and called from all build
+ contexts:
+ RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server).
+
+ Therefore each function has two implementations, one for compile time and one
+ run time configuration depending on the context. The functions will inform the
+ caller whether this block is valid for this particular RGX device. Other
+ run-time dependent data is returned in psRtInfo for the caller to use.
+ *****************************************************************************/
+
+/* Used for block types: USC */
+static IMG_BOOL rgx_hwperf_blk_present_perfbus(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_USC0);
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PERFBUS_BIT_MASK) != 0)
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ psRtInfo->uiNumUnits = psDevInfo->sDevFeatureCfg.ui32NumClusters;
+ return IMG_TRUE;
+ }
+ }
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+#else /* FW or Client context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_PERFBUS)
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+/* Used for block types: Direct RASTERISATION, HUB */
+static IMG_BOOL rgx_hwperf_blk_present_not_clustergrouping(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_HUB));
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if (((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK) == 0) &&
+ ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PERFBUS_BIT_MASK) != 0))
+ {
+ psRtInfo->uiNumUnits = 1;
+ if (((psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_44885_BIT_MASK) != 0) &&
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER))
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0X7c00;
+ }
+ else
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ }
+ return IMG_TRUE;
+ }
+ }
+#else /* FW or Client context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS)
+ psRtInfo->uiNumUnits = 1;
+# if defined(HW_ERN_44885)
+ if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER)
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x7C00;
+ }
+ else
+# endif
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ }
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+/* Used for block types: BF, BT, RT, SH, BX_TU */
+static IMG_BOOL rgx_hwperf_blk_present_raytracing(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BF) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BT) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RT) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_SH) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BX_TU0));
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK) != 0)
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+ return IMG_TRUE;
+ }
+ }
+#else /* FW or Client context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_RAY_TRACING)
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+ DBG_ASSERT(psBlkTypeDesc->uiPerfReg != 0); /* Check for broken config */
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__) /* Server context */
+static INLINE IMG_UINT32 rgx_units_indirect_by_phantom(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+ /* Run-time math for RGX_HWPERF_INDIRECT_BY_PHANTOM */
+ return ((psFeatCfg->ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK) == 0) ? 1
+ : (psFeatCfg->ui32NumClusters+3)/4;
+}
+
+static INLINE IMG_UINT32 rgx_units_phantom_indirect_by_dust(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+ /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST */
+ return MAX((psFeatCfg->ui32NumClusters>>1),1);
+}
+
+static INLINE IMG_UINT32 rgx_units_phantom_indirect_by_cluster(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+ /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER */
+ return psFeatCfg->ui32NumClusters;
+}
+#endif /* defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__) */
+
+/* Used for block types: TORNADO, TEXAS, Indirect RASTERISATION */
+static IMG_BOOL rgx_hwperf_blk_present_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TORNADO) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER0));
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK) != 0)
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ psRtInfo->uiNumUnits =
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TORNADO) ? 1
+ : rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); // Texas, Ind. Raster
+ return IMG_TRUE;
+ }
+ }
+#else /* FW or Client context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS)
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+/* Used for block types: JONES, TPU_MCU, TEXAS, BLACKPERL, PBE */
+static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_JONES) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_PBE0));
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) != 0)
+ {
+ if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0)
+ {
+ psRtInfo->uiBitSelectPreserveMask =
+ ((psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_41805_BIT_MASK) != 0)
+ ? 0x8000 : 0x0000;
+ psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+ return IMG_TRUE;
+ }
+ else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0)
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+ return IMG_TRUE;
+ }
+ else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0)
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ psRtInfo->uiNumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg);
+ return IMG_TRUE;
+ }
+ else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_PBE0)
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg);
+ return IMG_TRUE;
+ }
+ else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_JONES)
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ psRtInfo->uiNumUnits = 1;
+ return IMG_TRUE;
+ }
+ }
+ }
+#else /* FW or Client context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+# if defined(HW_ERN_41805)
+ if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0)
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x8000;
+ }
+ else
+# endif
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ }
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+/* Used for block types: TA, TPU_MCU */
+static IMG_BOOL rgx_hwperf_blk_present_not_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TA) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0));
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if (((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) == 0) &&
+ ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PERFBUS_BIT_MASK) != 0))
+ {
+ if (((psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_41805_BIT_MASK) != 0) &&
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0))
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0X8000;
+ }
+ else
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ }
+ psRtInfo->uiNumUnits = (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TA) ? 1
+ : rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); // TPU_MCU0
+ return IMG_TRUE;
+ }
+ }
+#else /* FW or Client context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS)
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+# if defined(HW_ERN_41805)
+ if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0)
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x8000;
+ }
+ else
+# endif
+ {
+ psRtInfo->uiBitSelectPreserveMask = 0x0000;
+ }
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+#if !defined(__KERNEL__) /* Firmware or User-mode context */
+static IMG_BOOL rgx_hwperf_blk_present_false(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+
+ /* Some functions not used on some BVNCs, silence compiler warnings */
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_perfbus);
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_clustergrouping);
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_raytracing);
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_xttop);
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_s7top);
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_s7top);
+
+ return IMG_FALSE;
+}
+
+/* Used to instantiate a null row in the block type model table below where the
+ * block is not supported for a given build BVNC in firmware/user mode context.
+ * This is needed as the blockid to block type lookup uses the table as well
+ * and clients may try to access blocks not in the hardware. */
+#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) {_blkid, 0, 0, 0, 0, 0, 0, 0, 0, #_blkid, NULL, rgx_hwperf_blk_present_false}
+
+#endif
+
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end
+ *****************************************************************************/
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table
+
+ This table holds the entries for the performance counter block type model.
+ Where the block is not present on an RGX device in question the ()
+ pfnIsBlkPresent() returns false, if valid and present it returns true.
+ Columns in the table with a ** indicate the value is a default and the
+ value returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent()
+ should be used at runtime by the caller. These columns are only valid for
+ compile time BVNC configured contexts.
+
+ Order of table rows must match order of counter block IDs in the enumeration
+ RGX_HWPERF_CNTBLK_ID.
+*****************************************************************************/
+
+static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] =
+{
+/* uiCntBlkIdBase, iIndirectReg, uiPerfReg, uiSelect0BaseReg, uiCounter0BaseReg uiNumCounters, uiNumUnits**, uiSelectRegModeShift, uiSelectRegOffsetShift, pfnIsBlkPowered pfnIsBlkPresent
+ * pszBlockNameComment, */
+ /*RGX_CNTBLK_ID_TA*/
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TA, 0, /* direct */ RGX_CR_TA_PERF, RGX_CR_TA_PERF_SELECT0, RGX_CR_TA_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_TA_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TA),
+#endif
+
+ /*RGX_CNTBLK_ID_RASTER*/
+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_RASTER, 0, /* direct */ RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER),
+#endif
+
+ /*RGX_CNTBLK_ID_HUB*/
+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_HUB, 0, /* direct */ RGX_CR_HUB_BIFPMCACHE_PERF, RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0, RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_HUB_BIFPMCACHE_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_HUB),
+#endif
+
+ /*RGX_CNTBLK_ID_TORNADO*/
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TORNADO, 0, /* direct */ RGX_CR_TORNADO_PERF, RGX_CR_TORNADO_PERF_SELECT0, RGX_CR_TORNADO_PERF_COUNTER_0, 4, 1, 21, 4, "RGX_CR_TORNADO_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_xttop },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TORNADO),
+#endif
+
+ /*RGX_CNTBLK_ID_JONES*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_JONES, 0, /* direct */ RGX_CR_JONES_PERF, RGX_CR_JONES_PERF_SELECT0, RGX_CR_JONES_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_JONES_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_JONES),
+#endif
+
+ /*RGX_CNTBLK_ID_BF RGX_CNTBLK_ID_BT RGX_CNTBLK_ID_RT RGX_CNTBLK_ID_SH*/
+ /* Conditional for rgxsrvinit.c UM build where CR defs not unconditional in any context and multi BVNC is not operational */
+#if defined(RGX_FEATURE_RAY_TRACING ) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_BF, 0, /* direct */ DPX_CR_BF_PERF, DPX_CR_BF_PERF_SELECT0, DPX_CR_BF_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_BF_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+ {RGX_CNTBLK_ID_BT, 0, /* direct */ DPX_CR_BT_PERF, DPX_CR_BT_PERF_SELECT0, DPX_CR_BT_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_BT_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+ {RGX_CNTBLK_ID_RT, 0, /* direct */ DPX_CR_RT_PERF, DPX_CR_RT_PERF_SELECT0, DPX_CR_RT_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_RT_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+ {RGX_CNTBLK_ID_SH, 0, /* direct */ RGX_CR_SH_PERF, RGX_CR_SH_PERF_SELECT0, RGX_CR_SH_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_SH_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BF),
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BT),
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RT),
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_SH),
+#endif
+
+ /*RGX_CNTBLK_ID_TPU_MCU0*/
+#if defined(RGX_FEATURE_PERFBUS) && !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TPU_MCU0, RGX_CR_TPU_MCU_L0_PERF_INDIRECT, RGX_CR_TPU_MCU_L0_PERF, RGX_CR_TPU_MCU_L0_PERF_SELECT0, RGX_CR_TPU_MCU_L0_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST, 21, 3, "RGX_CR_TPU_MCU_L0_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_not_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0),
+#endif
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TPU_MCU0, RGX_CR_TPU_PERF_INDIRECT, RGX_CR_TPU_MCU_L0_PERF, RGX_CR_TPU_MCU_L0_PERF_SELECT0, RGX_CR_TPU_MCU_L0_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST, 21, 3, "RGX_CR_TPU_MCU_L0_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0),
+#endif
+
+ /*RGX_CNTBLK_ID_USC0*/
+#if defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_USC0, RGX_CR_USC_PERF_INDIRECT, RGX_CR_USC_PERF, RGX_CR_USC_PERF_SELECT0, RGX_CR_USC_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_USC_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_perfbus },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_USC0),
+#endif
+
+ /*RGX_CNTBLK_ID_TEXAS0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TEXAS0, RGX_CR_TEXAS3_PERF_INDIRECT, RGX_CR_TEXAS_PERF, RGX_CR_TEXAS_PERF_SELECT0, RGX_CR_TEXAS_PERF_COUNTER_0, 6, RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST, 31, 3, "RGX_CR_TEXAS_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0),
+#endif
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TEXAS0, RGX_CR_TEXAS_PERF_INDIRECT, RGX_CR_TEXAS_PERF, RGX_CR_TEXAS_PERF_SELECT0, RGX_CR_TEXAS_PERF_COUNTER_0, 6, RGX_HWPERF_INDIRECT_BY_PHANTOM, 31, 3, "RGX_CR_TEXAS_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0),
+#endif
+
+ /*RGX_CNTBLK_ID_RASTER0*/
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_RASTER0, RGX_CR_RASTERISATION_PERF_INDIRECT, RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER0),
+#endif
+
+ /*RGX_CNTBLK_ID_BLACKPEARL0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_BLACKPEARL0, RGX_CR_BLACKPEARL_PERF_INDIRECT, RGX_CR_BLACKPEARL_PERF, RGX_CR_BLACKPEARL_PERF_SELECT0, RGX_CR_BLACKPEARL_PERF_COUNTER_0, 6, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_BLACKPEARL_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BLACKPEARL0),
+#endif
+
+ /*RGX_CNTBLK_ID_PBE0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_PBE0, RGX_CR_PBE_PERF_INDIRECT, RGX_CR_PBE_PERF, RGX_CR_PBE_PERF_SELECT0, RGX_CR_PBE_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_PBE_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_PBE0),
+#endif
+
+ /*RGX_CNTBLK_ID_BX_TU0*/
+ /* Conditional for rgxsrvinit.c UM build where CR defs not unconditional in any context and multi BVNC is not operational */
+#if defined (RGX_FEATURE_RAY_TRACING) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_BX_TU0, RGX_CR_BX_TU_PERF_INDIRECT, DPX_CR_BX_TU_PERF, DPX_CR_BX_TU_PERF_SELECT0, DPX_CR_BX_TU_PERF_COUNTER_0, 4, RGX_HWPERF_DOPPLER_BX_TU_BLKS, 21, 3, "RGX_CR_BX_TU_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BX_TU0),
+#endif
+ };
+
+
+IMG_INTERNAL IMG_UINT32
+RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel)
+{
+ *ppsModel = gasCntBlkTypeModel;
+ return IMG_ARR_NUM_ELEMS(gasCntBlkTypeModel);
+}
+
+/******************************************************************************
+ End of file (rgx_hwperf_table.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title HWPerf counter table header
+
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Utility functions used internally for HWPerf data retrieval
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_HWPERF_TABLE_H__)
+#define __RGX_HWPERF_TABLE_H__
+
+#include "img_types.h"
+#include "rgx_fwif_hwperf.h"
+
+
+/*****************************************************************************/
+
+/* Forward declaration */
+typedef struct _RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL;
+
+/* Function pointer type for functions to check dynamic power state of
+ * counter block instance. Used only in firmware. */
+typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)(
+ RGX_HWPERF_CNTBLK_ID eBlkType,
+ IMG_UINT8 ui8UnitId);
+
+/* Counter block run-time info */
+typedef struct _RGX_HWPERF_CNTBLK_RT_INFO_
+{
+ IMG_UINT32 uiBitSelectPreserveMask; /* Select register bits to preserve on programming, HW_ERN_41805 */
+ IMG_UINT32 uiNumUnits; /* Number of instances of this block type in the core */
+} RGX_HWPERF_CNTBLK_RT_INFO;
+
+/* Function pointer type for functions to check block is valid and present
+ * on that RGX Device at runtime. It may have compile logic or run-time
+ * logic depending on where the code executes: server, srvinit or firmware.
+ * Values in the psRtInfo output parameter are only valid if true returned. */
+typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)(
+ const struct _RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc,
+ void *pvDev_km,
+ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo);
+
+/* This structure encodes properties of a type of performance counter block.
+ * The structure is sometimes referred to as a block type descriptor. These
+ * properties contained in this structure represent the columns in the
+ * block type model table variable below. There values vary depending on
+ * the build BVNC and core type.
+ * Each direct block has a unique type descriptor and each indirect group has
+ * a type descriptor. */
+struct _RGXFW_HWPERF_CNTBLK_TYPE_MODEL_
+{
+ /* Could use RGXFW_ALIGN_DCACHEL here but then we would waste 40% of the cache line? */
+ IMG_UINT32 uiCntBlkIdBase; /* The starting block id for this block type */
+ IMG_UINT32 uiIndirectReg; /* 0 if direct type otherwise the indirect control register to select indirect unit */
+ IMG_UINT32 uiPerfReg; /* RGX_CR_*_PERF register for this block type */
+ IMG_UINT32 uiSelect0BaseReg; /* RGX_CR_*_PERF_SELECT0 register for this block type */
+ IMG_UINT32 uiCounter0BaseReg; /* RGX_CR_*_PERF_COUNTER_0 register for this block type */
+ IMG_UINT8 uiNumCounters; /* Number of counters in this block type */
+ IMG_UINT8 uiNumUnits; /* Number of instances of this block type in the core */
+ IMG_UINT8 uiSelectRegModeShift; /* Mode field shift value of select registers */
+ IMG_UINT8 uiSelectRegOffsetShift; /* Interval between select registers, either 8 bytes or 16, hence << 3 or << 4 */
+ IMG_CHAR pszBlockNameComment[30]; /* Name of the PERF register. Used while dumping the perf counters to pdumps */
+ PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */
+ PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */
+};
+
+/*****************************************************************************/
+
+IMG_INTERNAL IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel);
+
+
+#endif /* __RGX_HWPERF_TABLE_H__ */
+
+/******************************************************************************
+ End of file (rgx_hwperf_table.h)
+******************************************************************************/
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title RGX memory allocation flags
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGX_MEMALLOCFLAGS_H_
+#define _RGX_MEMALLOCFLAGS_H_
+
+#define PMMETA_PROTECT (1 << 0) /* Memory that only the PM and Meta can access */
+#define FIRMWARE_CACHED (1 << 1) /* Memory that is cached in META/MIPS */
+
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX META definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX META helper definitions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_META_H__)
+#define __RGX_META_H__
+
+
+/***** The META HW register definitions in the file are updated manually *****/
+
+
+#include "img_defs.h"
+#include "km/rgxdefs_km.h"
+
+
+/************************************************************************
+* META registers and MACROS
+************************************************************************/
+#define META_CR_CTRLREG_BASE(T) (0x04800000 + 0x1000*(T))
+
+#define META_CR_TXPRIVEXT (0x048000E8)
+#define META_CR_TXPRIVEXT_MINIM_EN (0x1<<7)
+
+#define META_CR_SYSC_JTAG_THREAD (0x04830030)
+#define META_CR_SYSC_JTAG_THREAD_PRIV_EN (0x00000004)
+
+#define META_CR_PERF_COUNT0 (0x0480FFE0)
+#define META_CR_PERF_COUNT1 (0x0480FFE8)
+#define META_CR_PERF_COUNT_CTRL_SHIFT (28)
+#define META_CR_PERF_COUNT_CTRL_MASK (0xF0000000)
+#define META_CR_PERF_COUNT_CTRL_DCACHEHITS (0x8 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEHITS (0x9 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEMISS (0xA << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICORE (0xD << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_THR_SHIFT (24)
+#define META_CR_PERF_COUNT_THR_MASK (0x0F000000)
+#define META_CR_PERF_COUNT_THR_0 (0x1 << META_CR_PERF_COUNT_THR_SHIFT)
+#define META_CR_PERF_COUNT_THR_1 (0x2 << META_CR_PERF_COUNT_THR_1)
+
+#define META_CR_TxVECINT_BHALT (0x04820500)
+#define META_CR_PERF_ICORE0 (0x0480FFD0)
+#define META_CR_PERF_ICORE1 (0x0480FFD8)
+#define META_CR_PERF_ICORE_DCACHEMISS (0x8)
+
+#define META_CR_PERF_COUNT(CTRL, THR) ((META_CR_PERF_COUNT_CTRL_##CTRL << META_CR_PERF_COUNT_CTRL_SHIFT) | \
+ (THR << META_CR_PERF_COUNT_THR_SHIFT))
+
+#define META_CR_TXUXXRXDT_OFFSET (META_CR_CTRLREG_BASE(0) + 0x0000FFF0)
+#define META_CR_TXUXXRXRQ_OFFSET (META_CR_CTRLREG_BASE(0) + 0x0000FFF8)
+
+#define META_CR_TXUXXRXRQ_DREADY_BIT (0x80000000) /* Poll for done */
+#define META_CR_TXUXXRXRQ_RDnWR_BIT (0x00010000) /* Set for read */
+#define META_CR_TXUXXRXRQ_TX_S (12)
+#define META_CR_TXUXXRXRQ_RX_S (4)
+#define META_CR_TXUXXRXRQ_UXX_S (0)
+
+#define META_CR_TXUIN_ID (0x0) /* Internal ctrl regs */
+#define META_CR_TXUD0_ID (0x1) /* Data unit regs */
+#define META_CR_TXUD1_ID (0x2) /* Data unit regs */
+#define META_CR_TXUA0_ID (0x3) /* Address unit regs */
+#define META_CR_TXUA1_ID (0x4) /* Address unit regs */
+#define META_CR_TXUPC_ID (0x5) /* PC registers */
+
+/* Macros to calculate register access values */
+#define META_CR_CORE_REG(Thr, RegNum, Unit) (((Thr) << META_CR_TXUXXRXRQ_TX_S ) | \
+ ((RegNum) << META_CR_TXUXXRXRQ_RX_S ) | \
+ ((Unit) << META_CR_TXUXXRXRQ_UXX_S))
+
+#define META_CR_THR0_PC META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID)
+#define META_CR_THR0_PCX META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID)
+#define META_CR_THR0_SP META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID)
+
+#define META_CR_THR1_PC META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID)
+#define META_CR_THR1_PCX META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID)
+#define META_CR_THR1_SP META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID)
+
+#define SP_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUA0_ID)
+#define PC_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUPC_ID)
+
+#define META_CR_COREREG_ENABLE (0x0000000)
+#define META_CR_COREREG_STATUS (0x0000010)
+#define META_CR_COREREG_DEFR (0x00000A0)
+#define META_CR_COREREG_PRIVEXT (0x00000E8)
+
+#define META_CR_T0ENABLE_OFFSET (META_CR_CTRLREG_BASE(0) + META_CR_COREREG_ENABLE)
+#define META_CR_T0STATUS_OFFSET (META_CR_CTRLREG_BASE(0) + META_CR_COREREG_STATUS)
+#define META_CR_T0DEFR_OFFSET (META_CR_CTRLREG_BASE(0) + META_CR_COREREG_DEFR)
+#define META_CR_T0PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(0) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_T1ENABLE_OFFSET (META_CR_CTRLREG_BASE(1) + META_CR_COREREG_ENABLE)
+#define META_CR_T1STATUS_OFFSET (META_CR_CTRLREG_BASE(1) + META_CR_COREREG_STATUS)
+#define META_CR_T1DEFR_OFFSET (META_CR_CTRLREG_BASE(1) + META_CR_COREREG_DEFR)
+#define META_CR_T1PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(1) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_TXENABLE_ENABLE_BIT (0x00000001) /* Set if running */
+#define META_CR_TXSTATUS_PRIV (0x00020000)
+#define META_CR_TXPRIVEXT_MINIM (0x00000080)
+
+#define META_MEM_GLOBAL_RANGE_BIT (0x80000000)
+
+
+/************************************************************************
+* META LDR Format
+************************************************************************/
+/* Block header structure */
+typedef struct
+{
+ IMG_UINT32 ui32DevID;
+ IMG_UINT32 ui32SLCode;
+ IMG_UINT32 ui32SLData;
+ IMG_UINT16 ui16PLCtrl;
+ IMG_UINT16 ui16CRC;
+
+} RGX_META_LDR_BLOCK_HDR;
+
+/* High level data stream block structure */
+typedef struct
+{
+ IMG_UINT16 ui16Cmd;
+ IMG_UINT16 ui16Length;
+ IMG_UINT32 ui32Next;
+ IMG_UINT32 aui32CmdData[4];
+
+} RGX_META_LDR_L1_DATA_BLK;
+
+/* High level data stream block structure */
+typedef struct
+{
+ IMG_UINT16 ui16Tag;
+ IMG_UINT16 ui16Length;
+ IMG_UINT32 aui32BlockData[4];
+
+} RGX_META_LDR_L2_DATA_BLK;
+
+/* Config command structure */
+typedef struct
+{
+ IMG_UINT32 ui32Type;
+ IMG_UINT32 aui32BlockData[4];
+
+} RGX_META_LDR_CFG_BLK;
+
+/* Block type definitions */
+#define RGX_META_LDR_COMMENT_TYPE_MASK (0x0010)
+#define RGX_META_LDR_BLK_IS_COMMENT(X) ((X & RGX_META_LDR_COMMENT_TYPE_MASK) != 0)
+
+/* Command definitions
+ Value Name Description
+ 0 LoadMem Load memory with binary data.
+ 1 LoadCore Load a set of core registers.
+ 2 LoadMMReg Load a set of memory mapped registers.
+ 3 StartThreads Set each thread PC and SP, then enable threads.
+ 4 ZeroMem Zeros a memory region.
+ 5 Config Perform a configuration command. */
+#define RGX_META_LDR_CMD_MASK (0x000F)
+
+#define RGX_META_LDR_CMD_LOADMEM (0x0000)
+#define RGX_META_LDR_CMD_LOADCORE (0x0001)
+#define RGX_META_LDR_CMD_LOADMMREG (0x0002)
+#define RGX_META_LDR_CMD_START_THREADS (0x0003)
+#define RGX_META_LDR_CMD_ZEROMEM (0x0004)
+#define RGX_META_LDR_CMD_CONFIG (0x0005)
+
+/* Config Command definitions
+ Value Name Description
+ 0 Pause Pause for x times 100 instructions
+ 1 Read Read a value from register - No value return needed.
+ Utilises effects of issuing reads to certain registers
+ 2 Write Write to mem location
+ 3 MemSet Set mem to value
+ 4 MemCheck check mem for specific value.*/
+#define RGX_META_LDR_CFG_PAUSE (0x0000)
+#define RGX_META_LDR_CFG_READ (0x0001)
+#define RGX_META_LDR_CFG_WRITE (0x0002)
+#define RGX_META_LDR_CFG_MEMSET (0x0003)
+#define RGX_META_LDR_CFG_MEMCHECK (0x0004)
+
+
+/************************************************************************
+* RGX FW segmented MMU definitions
+************************************************************************/
+/* All threads can access the segment */
+#define RGXFW_SEGMMU_ALLTHRS (0xf << 8)
+/* Writable */
+#define RGXFW_SEGMMU_WRITEABLE (0x1 << 1)
+/* All threads can access and writable */
+#define RGXFW_SEGMMU_ALLTHRS_WRITEABLE (RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE)
+
+/* Direct map region 11 used for mapping GPU memory */
+#define RGXFW_SEGMMU_DMAP_GPU_ID (11)
+#define RGXFW_SEGMMU_DMAP_GPU_ADDR_START (0x07800000U)
+
+/* Segment IDs */
+#define RGXFW_SEGMMU_DATA_ID (1)
+#define RGXFW_SEGMMU_BOOTLDR_ID (2)
+#define RGXFW_SEGMMU_TEXT_ID (RGXFW_SEGMMU_BOOTLDR_ID)
+
+#define RGXFW_SEGMMU_META_DM_ID (0x7)
+
+#if defined(HW_ERN_45914)
+/* SLC caching strategy is emitted through the segment MMU. All the segments configured
+ through this macro are CACHED in the SLC. The interface has been kept the same to
+ simplify the code changes. The bifdm argument is ignored (no longer relevant). */
+#if defined(HW_ERN_49144)
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7(pers, coheren, mmu_ctx) ( (((IMG_UINT64) ((pers) & 0x3)) << 50) | \
+ (((IMG_UINT64) ((mmu_ctx) & 0xFF)) << 42) | \
+ (((IMG_UINT64) ((coheren) & 0x1)) << 40) )
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_S7(0x3, 0x0, mmu_ctx)
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_S7(0x0, 0x1, mmu_ctx)
+
+/* Set FW code/data cached in the SLC as default */
+#define RGXFW_SEGMMU_OUTADDR_TOP(mmu_ctx, bifdm) RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED(mmu_ctx | (bifdm&0x0))
+#else
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7(pers, coheren, mmu_ctx) ( (((IMG_UINT64) ((pers) & 0x3)) << 52) | \
+ (((IMG_UINT64) ((mmu_ctx) & 0xFF)) << 44) | \
+ (((IMG_UINT64) ((coheren) & 0x1)) << 40) )
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_S7(0x3, 0x0, mmu_ctx)
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_S7(0x0, 0x1, mmu_ctx)
+
+/* Set FW code/data cached in the SLC as default */
+#define RGXFW_SEGMMU_OUTADDR_TOP(mmu_ctx, bifdm) RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED(mmu_ctx | (bifdm&0x0))
+#endif
+#else
+/* To configure the Page Catalog and BIF-DM fed into the BIF for Garten accesses through this segment */
+#define RGXFW_SEGMMU_OUTADDR_TOP(pc, bifdm) ( (((IMG_UINT64) ((pc) & 0xF)) << 44) | \
+ (((IMG_UINT64) ((bifdm) & 0xF)) << 40) )
+#endif
+
+/* META segments have 4kB minimum size */
+#define RGXFW_SEGMMU_ALIGN (0x1000)
+
+/* Segmented MMU registers (n = segment id) */
+#define META_CR_MMCU_SEGMENTn_BASE(n) (0x04850000 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_LIMIT(n) (0x04850004 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_OUTA0(n) (0x04850008 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_OUTA1(n) (0x0485000C + (n)*0x10)
+
+/* The following defines must be recalculated if the Meta MMU segments
+ * used to access Host-FW data are changed
+ * Current combinations are:
+ * - SLC uncached, META cached, FW base address 0x70000000
+ * - SLC uncached, META uncached, FW base address 0xF0000000
+ * - SLC cached, META cached, FW base address 0x10000000
+ * - SLC cached, META uncached, FW base address 0x90000000
+ */
+#define RGXFW_SEGMMU_DATA_BASE_ADDRESS (0x10000000)
+#define RGXFW_SEGMMU_DATA_META_CACHED (0x0)
+#define RGXFW_SEGMMU_DATA_META_UNCACHED (META_MEM_GLOBAL_RANGE_BIT) // 0x80000000
+#define RGXFW_SEGMMU_DATA_META_CACHE_MASK (META_MEM_GLOBAL_RANGE_BIT)
+/* For non-VIVT SLCs the cacheability of the FW data in the SLC is selected
+ * in the PTEs for the FW data, not in the Meta Segment MMU,
+ * which means these defines have no real effect in those cases */
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED (0x0)
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000)
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000)
+
+
+/************************************************************************
+* RGX FW RGX MMU definitions
+************************************************************************/
+#if defined(RGX_FEATURE_SLC_VIVT) && defined(SUPPORT_TRUSTED_DEVICE)
+
+#define META_MMU_CONTEXT_MAPPING (0x1) /* fw data */
+#define META_MMU_CONTEXT_MAPPING_CODE (0x0) /* fw code */
+
+#else
+
+#define META_MMU_CONTEXT_MAPPING (0x0)
+
+#endif
+
+/************************************************************************
+* RGX FW Bootloader defaults
+************************************************************************/
+#define RGXFW_BOOTLDR_META_ADDR (0x40000000)
+#define RGXFW_BOOTLDR_DEVV_ADDR_0 (0xC0000000)
+#define RGXFW_BOOTLDR_DEVV_ADDR_1 (0x000000E1)
+#define RGXFW_BOOTLDR_DEVV_ADDR ((((IMG_UINT64) RGXFW_BOOTLDR_DEVV_ADDR_1) << 32) | RGXFW_BOOTLDR_DEVV_ADDR_0)
+#define RGXFW_BOOTLDR_LIMIT (0x1FFFF000)
+
+/* Bootloader configuration offset is in dwords (512 bytes) */
+#define RGXFW_BOOTLDR_CONF_OFFSET (0x80)
+
+
+/************************************************************************
+* RGX META Stack
+************************************************************************/
+#define RGX_META_STACK_SIZE (0x1000)
+
+/************************************************************************
+* RGX META Core memory
+************************************************************************/
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+ #define RGX_META_COREMEM_STACK_SIZE (RGX_META_STACK_SIZE*2)
+ #define RGX_META_COREMEM_BSS_SIZE (0xF40)
+ #if defined(RGX_FEATURE_META_DMA)
+ #define RGX_META_COREMEM_CCBBUF_SIZE (0x2E0)
+ #define RGX_META_COREMEM_DATA_SIZE (RGX_META_COREMEM_CCBBUF_SIZE + RGX_META_COREMEM_BSS_SIZE + RGX_META_COREMEM_STACK_SIZE)
+ #else
+ #define RGX_META_COREMEM_DATA_SIZE (RGX_META_COREMEM_BSS_SIZE + RGX_META_COREMEM_STACK_SIZE)
+ #endif
+#else
+ #define RGX_META_COREMEM_STACK_SIZE (RGX_META_STACK_SIZE)
+ #define RGX_META_COREMEM_BSS_SIZE (0xB00)
+ #if defined(RGX_FEATURE_META_DMA)
+ #define RGX_META_COREMEM_CCBBUF_SIZE (0x2E0)
+ #define RGX_META_COREMEM_DATA_SIZE (RGX_META_COREMEM_CCBBUF_SIZE + RGX_META_COREMEM_BSS_SIZE + RGX_META_COREMEM_STACK_SIZE)
+ #else
+ #define RGX_META_COREMEM_DATA_SIZE (RGX_META_COREMEM_BSS_SIZE + RGX_META_COREMEM_STACK_SIZE)
+ #endif
+#endif
+
+#if defined (RGX_META_COREMEM_CODE)
+#define RGX_META_COREMEM_CODE_SIZE (RGX_META_COREMEM_SIZE - RGX_META_COREMEM_DATA_SIZE)
+#endif
+
+/* code and data both map to the same physical memory */
+#define RGX_META_COREMEM_CODE_ADDR (0x80000000)
+#define RGX_META_COREMEM_DATA_ADDR (0x82000000)
+/* because data and code share the same memory, base address for code is offset by the data */
+#define RGX_META_COREMEM_CODE_BADDR (RGX_META_COREMEM_CODE_ADDR + RGX_META_COREMEM_DATA_SIZE)
+
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+ #define RGX_META_COREMEM_STACK_ADDR (RGX_META_COREMEM_DATA_ADDR)
+ #define RGX_META_COREMEM_2ND_STACK_ADDR (RGX_META_COREMEM_DATA_ADDR + RGX_META_STACK_SIZE)
+ #define RGX_META_COREMEM_BSS_ADDR (RGX_META_COREMEM_2ND_STACK_ADDR + RGX_META_STACK_SIZE)
+#else
+ #define RGX_META_COREMEM_STACK_ADDR (RGX_META_COREMEM_DATA_ADDR)
+ #define RGX_META_COREMEM_BSS_ADDR (RGX_META_COREMEM_STACK_ADDR + RGX_META_STACK_SIZE)
+#endif
+
+#if defined(RGX_FEATURE_META_DMA)
+ #define RGX_META_COREMEM_CCBBUF_ADDR (RGX_META_COREMEM_BSS_ADDR + RGX_META_COREMEM_BSS_SIZE)
+#endif
+
+#define RGX_META_IS_COREMEM_CODE(A, B) (((A) >= RGX_META_COREMEM_CODE_ADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + (B))))
+#define RGX_META_IS_COREMEM_DATA(A, B) (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + (B))))
+
+/************************************************************************
+* 2nd thread
+************************************************************************/
+#define RGXFW_THR1_PC (0x18930000)
+#define RGXFW_THR1_SP (0x78890000)
+
+/************************************************************************
+* META compatibility
+************************************************************************/
+
+#define META_CR_CORE_ID (0x04831000)
+#define META_CR_CORE_ID_VER_SHIFT (16U)
+#define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU)
+
+#if !defined(__KERNEL__) && defined(RGX_FEATURE_META)
+
+ #if (RGX_FEATURE_META == MTP218)
+ #define RGX_CR_META_CORE_ID_VALUE 0x19
+ #elif (RGX_FEATURE_META == MTP219)
+ #define RGX_CR_META_CORE_ID_VALUE 0x1E
+ #elif (RGX_FEATURE_META == LTP218)
+ #define RGX_CR_META_CORE_ID_VALUE 0x1C
+ #elif (RGX_FEATURE_META == LTP217)
+ #define RGX_CR_META_CORE_ID_VALUE 0x1F
+ #else
+ #error "Unknown META ID"
+ #endif
+#else
+
+ #define RGX_CR_META_MTP218_CORE_ID_VALUE 0x19
+ #define RGX_CR_META_MTP219_CORE_ID_VALUE 0x1E
+ #define RGX_CR_META_LTP218_CORE_ID_VALUE 0x1C
+ #define RGX_CR_META_LTP217_CORE_ID_VALUE 0x1F
+
+#endif
+#define RGXFW_PROCESSOR_META "META"
+
+
+#endif /* __RGX_META_H__ */
+
+/******************************************************************************
+ End of file (rgx_meta.h)
+******************************************************************************/
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File rgx_mips.h
+@Title
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform RGX
+@Description RGX MIPS definitions, user space
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_MIPS_H__)
+#define __RGX_MIPS_H__
+
+/*
+ * Utility defines for memory management
+ */
+#define RGXMIPSFW_LOG2_PAGE_SIZE (12)
+#define RGXMIPSFW_LOG2_PAGE_SIZE_64K (16)
+/* Page size */
+#define RGXMIPSFW_PAGE_SIZE (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_PAGE_MASK (RGXMIPSFW_PAGE_SIZE - 1)
+#define RGXMIPSFW_LOG2_PAGETABLE_PAGE_SIZE (15)
+#define RGXMIPSFW_LOG2_PTE_ENTRY_SIZE (2)
+/* Page mask MIPS register setting for bigger pages */
+#define RGXMIPSFW_PAGE_MASK_16K (0x00007800)
+#define RGXMIPSFW_PAGE_MASK_64K (0x0001F800)
+/* Page Frame Number of the entry lo */
+#define RGXMIPSFW_ENTRYLO_PFN_MASK (0x03FFFFC0)
+#define RGXMIPSFW_ENTRYLO_PFN_SHIFT (6)
+/* Dirty Valid And Global bits in entry lo */
+#define RGXMIPSFW_ENTRYLO_DVG_MASK (0x00000007)
+/* Dirty Valid And Global bits + caching policy in entry lo */
+#define RGXMIPSFW_ENTRYLO_DVGC_MASK (0x0000003F)
+/* Total number of TLB entries */
+#define RGXMIPSFW_NUMBER_OF_TLB_ENTRIES (16)
+
+
+/*
+ * Firmware physical layout
+ */
+#define RGXMIPSFW_CODE_BASE_PAGE (0x0)
+#define RGXMIPSFW_CODE_OFFSET (RGXMIPSFW_CODE_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#if defined(SUPPORT_TRUSTED_DEVICE)
+/* Clean way of getting a 256K allocation (62 + 1 + 1 pages) without using too many ifdefs */
+/* This will need to be changed if the non-secure builds reach this amount of pages */
+#define RGXMIPSFW_CODE_NUMPAGES (62)
+#else
+#define RGXMIPSFW_CODE_NUMPAGES (38)
+#endif
+#define RGXMIPSFW_CODE_SIZE (RGXMIPSFW_CODE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE (RGXMIPSFW_CODE_BASE_PAGE + RGXMIPSFW_CODE_NUMPAGES)
+#define RGXMIPSFW_EXCEPTIONSVECTORS_OFFSET (RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_EXCEPTIONSVECTORS_NUMPAGES (1)
+#define RGXMIPSFW_EXCEPTIONSVECTORS_SIZE (RGXMIPSFW_EXCEPTIONSVECTORS_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE (RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE + RGXMIPSFW_EXCEPTIONSVECTORS_NUMPAGES)
+#define RGXMIPSFW_BOOT_NMI_CODE_OFFSET (RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_BOOT_NMI_CODE_NUMPAGES (1)
+#define RGXMIPSFW_BOOT_NMI_CODE_SIZE (RGXMIPSFW_BOOT_NMI_CODE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+
+#define RGXMIPSFW_DATA_BASE_PAGE (0x0)
+#define RGXMIPSFW_DATA_OFFSET (RGXMIPSFW_DATA_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_DATA_NUMPAGES (22)
+#define RGXMIPSFW_DATA_SIZE (RGXMIPSFW_DATA_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE (RGXMIPSFW_DATA_BASE_PAGE + RGXMIPSFW_DATA_NUMPAGES)
+#define RGXMIPSFW_BOOT_NMI_DATA_OFFSET (RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_BOOT_NMI_DATA_NUMPAGES (1)
+#define RGXMIPSFW_BOOT_NMI_DATA_SIZE (RGXMIPSFW_BOOT_NMI_DATA_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_STACK_BASE_PAGE (RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE + RGXMIPSFW_BOOT_NMI_DATA_NUMPAGES)
+#define RGXMIPSFW_STACK_OFFSET (RGXMIPSFW_STACK_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_STACK_NUMPAGES (1)
+#define RGXMIPSFW_STACK_SIZE (RGXMIPSFW_STACK_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+/*
+ * Pages to trampoline problematic physical addresses:
+ * - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000
+ * - RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000
+ * - RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000
+ * - (benign trampoline) : 0x1FC0_3000
+ * that would otherwise be erroneously remapped by the MIPS wrapper
+ * (see "Firmware virtual layout and remap configuration" section below)
+ */
+
+#define RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES (2)
+#define RGXMIPSFW_TRAMPOLINE_NUMPAGES (1 << RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES)
+#define RGXMIPSFW_TRAMPOLINE_SIZE (RGXMIPSFW_TRAMPOLINE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE (RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES + RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+#define RGXMIPSFW_TRAMPOLINE_OFFSET(a) (a - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+
+#define RGXMIPSFW_SENSITIVE_ADDR(a) (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1<<RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE)-1) & a))
+
+/*
+ * Firmware virtual layout and remap configuration
+ */
+/*
+ * For each remap region we define:
+ * - the virtual base used by the Firmware to access code/data through that region
+ * - the microAptivAP physical address correspondent to the virtual base address,
+ * used as input address and remapped to the actual physical address
+ * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from
+ * the bottom of the base input address that survive onto the output address
+ * (this defines both the alignment and the maximum size of the remapped region)
+ * - one or more code/data segments within the remapped region
+ */
+
+/* Boot remap setup */
+#define RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE (0xBFC00000)
+#define RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN (0x1FC00000)
+#define RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE (12)
+#define RGXMIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE (RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE)
+
+/* Data remap setup */
+#define RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE (0xBFC01000)
+#define RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN (0x1FC01000)
+#define RGXMIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE (12)
+#define RGXMIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE (RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE)
+
+/* Code remap setup */
+#define RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE (0x9FC02000)
+#define RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN (0x1FC02000)
+#define RGXMIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE (12)
+#define RGXMIPSFW_EXCEPTIONS_VIRTUAL_BASE (RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE)
+
+/* Fixed TLB setup */
+#define RGXMIPSFW_PT_VIRTUAL_BASE (0xCF000000)
+#define RGXMIPSFW_REGISTERS_VIRTUAL_BASE (0xCF400000)
+#define RGXMIPSFW_STACK_VIRTUAL_BASE (0xCF600000)
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+/* The extra fixed TLB entries are used in security builds for the FW code */
+#define RGXMIPSFW_NUMBER_OF_RESERVED_TLB (5)
+#else
+#define RGXMIPSFW_NUMBER_OF_RESERVED_TLB (3)
+#endif
+
+/* Firmware heap setup */
+#define RGXMIPSFW_FIRMWARE_HEAP_BASE (0xC0000000)
+#define RGXMIPSFW_CODE_VIRTUAL_BASE (RGXMIPSFW_FIRMWARE_HEAP_BASE)
+/* The data virtual base takes into account the exception vectors page
+ * and the boot code page mapped in the FW heap together with the FW code
+ * (we can only map Firmware code allocation as a whole) */
+#define RGXMIPSFW_DATA_VIRTUAL_BASE (RGXMIPSFW_CODE_VIRTUAL_BASE + RGXMIPSFW_CODE_SIZE + \
+ RGXMIPSFW_EXCEPTIONSVECTORS_SIZE + RGXMIPSFW_BOOT_NMI_CODE_SIZE)
+
+
+/*
+ * Bootloader configuration data
+ */
+/* Bootloader configuration offset within the bootloader/NMI data page */
+#define RGXMIPSFW_BOOTLDR_CONF_OFFSET (0x0)
+/* Offsets of bootloader configuration parameters in 64-bit words */
+#define RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET (0x0)
+#define RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET (0x1)
+#define RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET (0x2)
+#define RGXMIPSFW_RESERVED_FUTURE_OFFSET (0x3)
+#define RGXMIPSFW_FWINIT_VIRTADDR_OFFSET (0x4)
+
+/*
+ * MIPS Fence offset in the bootloader/NMI data page
+ */
+#define RGXMIPSFW_FENCE_OFFSET (0x80)
+
+/*
+ * NMI shared data
+ */
+/* Base address of the shared data within the bootloader/NMI data page */
+#define RGXMIPSFW_NMI_SHARED_DATA_BASE (0x100)
+/* Size used by Debug dump data */
+#define RGXMIPSFW_NMI_SHARED_SIZE (0x128)
+/* Offsets in the NMI shared area in 32-bit words */
+#define RGXMIPSFW_NMI_SYNC_FLAG_OFFSET (0x0)
+#define RGXMIPSFW_NMI_STATE_OFFSET (0x1)
+
+/*
+ * MIPS fault data
+ */
+/* Base address of the fault data within the bootloader/NMI data page */
+#define RGXMIPSFW_FAULT_DATA_BASE (0x280)
+
+/* The things that follow are excluded when compiling assembly sources*/
+#if !defined (RGXMIPSFW_ASSEMBLY_CODE)
+#include "img_types.h"
+#include "km/rgxdefs_km.h"
+
+#define RGXMIPSFW_GET_OFFSET_IN_DWORDS(offset) (offset / sizeof(IMG_UINT32))
+#define RGXMIPSFW_GET_OFFSET_IN_QWORDS(offset) (offset / sizeof(IMG_UINT64))
+
+/* Used for compatibility checks */
+#define RGXMIPSFW_ARCHTYPE_VER_CLRMSK (0xFFFFE3FFU)
+#define RGXMIPSFW_ARCHTYPE_VER_SHIFT (10U)
+#define RGXMIPSFW_CORE_ID_VALUE (0x001U)
+#define RGXFW_PROCESSOR_MIPS "MIPS"
+
+/* microAptivAP cache line size */
+#define RGXMIPSFW_MICROAPTIVEAP_CACHELINE_SIZE (16U)
+
+/* The SOCIF transactions are identified with the top 16 bits of the physical address emitted by the MIPS */
+#define RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN (16U)
+
+/* Values to put in the MIPS selectors for performance counters*/
+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0 (9U) /* Icache accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1 (9U) /* Icache misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0 (10U) /* Dcache accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1 (11U) /* Dcache misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0 (5U) /* ITLB instruction accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1 (7U) /* JTLB instruction accesses misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0 (1U) /* Instructions completed in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1 (8U) /* JTLB data misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT (5U) /* Shift for the Event field in the MIPS perf ctrl registers */
+/* Additional flags for performance counters. See MIPS manual for further reference*/
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE (8U)
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE (2U)
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_EXL (1U)
+
+
+#define RGXMIPSFW_C0_NBHWIRQ 8
+
+/* Macros to decode C0_Cause register */
+#define RGXMIPSFW_C0_CAUSE_EXCCODE(CAUSE) (((CAUSE) & 0x7c) >> 2)
+/* Use only when Coprocessor Unusable exception */
+#define RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(CAUSE) (((CAUSE) >> 28) & 0x3)
+#define RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(CAUSE) (((CAUSE) & 0x3fc00) >> 10)
+#define RGXMIPSFW_C0_CAUSE_FDCIPENDING (1 << 21)
+#define RGXMIPSFW_C0_CAUSE_IV (1 << 23)
+#define RGXMIPSFW_C0_CAUSE_IC (1 << 25)
+#define RGXMIPSFW_C0_CAUSE_PCIPENDING (1 << 26)
+#define RGXMIPSFW_C0_CAUSE_TIPENDING (1 << 30)
+
+/* Macros to decode C0_Debug register */
+#define RGXMIPSFW_C0_DEBUG_EXCCODE(DEBUG) (((DEBUG) >> 10) & 0x1f)
+#define RGXMIPSFW_C0_DEBUG_DSS (1 << 0)
+#define RGXMIPSFW_C0_DEBUG_DBP (1 << 1)
+#define RGXMIPSFW_C0_DEBUG_DDBL (1 << 2)
+#define RGXMIPSFW_C0_DEBUG_DDBS (1 << 3)
+#define RGXMIPSFW_C0_DEBUG_DIB (1 << 4)
+#define RGXMIPSFW_C0_DEBUG_DINT (1 << 5)
+#define RGXMIPSFW_C0_DEBUG_DIBIMPR (1 << 6)
+#define RGXMIPSFW_C0_DEBUG_DDBLIMPR (1 << 18)
+#define RGXMIPSFW_C0_DEBUG_DDBSIMPR (1 << 19)
+#define RGXMIPSFW_C0_DEBUG_IEXI (1 << 20)
+#define RGXMIPSFW_C0_DEBUG_DBUSEP (1 << 21)
+#define RGXMIPSFW_C0_DEBUG_CACHEEP (1 << 22)
+#define RGXMIPSFW_C0_DEBUG_MCHECKP (1 << 23)
+#define RGXMIPSFW_C0_DEBUG_IBUSEP (1 << 24)
+#define RGXMIPSFW_C0_DEBUG_DM (1 << 30)
+#define RGXMIPSFW_C0_DEBUG_DBD (1 << 31)
+
+/* ELF format defines */
+#define ELF_PT_LOAD (0x1U) /* Program header identifier as Load */
+#define ELF_SHT_SYMTAB (0x2U) /* Section identifier as Symbol Table */
+#define ELF_SHT_STRTAB (0x3U) /* Section identifier as String Table */
+#define MAX_STRTAB_NUM (0x8U) /* Maximum number of string table in the firmware ELF file */
+
+
+/* Redefined structs of ELF format */
+typedef struct
+{
+ IMG_UINT8 ui32Eident[16];
+ IMG_UINT16 ui32Etype;
+ IMG_UINT16 ui32Emachine;
+ IMG_UINT32 ui32Eversion;
+ IMG_UINT32 ui32Eentry;
+ IMG_UINT32 ui32Ephoff;
+ IMG_UINT32 ui32Eshoff;
+ IMG_UINT32 ui32Eflags;
+ IMG_UINT16 ui32Eehsize;
+ IMG_UINT16 ui32Ephentsize;
+ IMG_UINT16 ui32Ephnum;
+ IMG_UINT16 ui32Eshentsize;
+ IMG_UINT16 ui32Eshnum;
+ IMG_UINT16 ui32Eshtrndx;
+} RGX_MIPS_ELF_HDR;
+
+
+typedef struct
+{
+ IMG_UINT32 ui32Stname;
+ IMG_UINT32 ui32Stvalue;
+ IMG_UINT32 ui32Stsize;
+ IMG_UINT8 ui32Stinfo;
+ IMG_UINT8 ui32Stother;
+ IMG_UINT16 ui32Stshndx;
+} RGX_MIPS_ELF_SYM;
+
+
+typedef struct
+{
+ IMG_UINT32 ui32Shname;
+ IMG_UINT32 ui32Shtype;
+ IMG_UINT32 ui32Shflags;
+ IMG_UINT32 ui32Shaddr;
+ IMG_UINT32 ui32Shoffset;
+ IMG_UINT32 ui32Shsize;
+ IMG_UINT32 ui32Shlink;
+ IMG_UINT32 ui32Shinfo;
+ IMG_UINT32 ui32Shaddralign;
+ IMG_UINT32 ui32Shentsize;
+} RGX_MIPS_ELF_SHDR;
+
+typedef struct
+{
+ IMG_UINT32 ui32Ptype;
+ IMG_UINT32 ui32Poffset;
+ IMG_UINT32 ui32Pvaddr;
+ IMG_UINT32 ui32Ppaddr;
+ IMG_UINT32 ui32Pfilesz;
+ IMG_UINT32 ui32Pmemsz;
+ IMG_UINT32 ui32Pflags;
+ IMG_UINT32 ui32Palign;
+ } RGX_MIPS_ELF_PROGRAM_HDR;
+
+#define RGXMIPSFW_TLB_GET_MASK(ENTRY_PAGE_MASK) (((ENTRY_PAGE_MASK) >> 13) & 0xffffU)
+#define RGXMIPSFW_TLB_GET_VPN2(ENTRY_HI) ((ENTRY_HI) >> 13)
+#define RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO) (((ENTRY_LO) >> 3) & 0x7U)
+#define RGXMIPSFW_TLB_GET_PFN(ENTRY_LO) (((ENTRY_LO) >> 6) & 0xfffffU)
+#define RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO) (((ENTRY_LO) >> 30) & 0x3U)
+#define RGXMIPSFW_TLB_GET_DGV(ENTRY_LO) ((ENTRY_LO) & 0x7U)
+#define RGXMIPSFW_TLB_GLOBAL (1U)
+#define RGXMIPSFW_TLB_VALID (1U << 1)
+#define RGXMIPSFW_TLB_DIRTY (1U << 2)
+#define RGXMIPSFW_TLB_XI (1U << 30)
+#define RGXMIPSFW_TLB_RI (1U << 31)
+
+typedef struct {
+ IMG_UINT32 ui32TLBPageMask;
+ IMG_UINT32 ui32TLBHi;
+ IMG_UINT32 ui32TLBLo0;
+ IMG_UINT32 ui32TLBLo1;
+} RGX_MIPS_TLB_ENTRY;
+
+typedef struct {
+ IMG_UINT32 ui32ErrorEPC;
+ IMG_UINT32 ui32StatusRegister;
+ IMG_UINT32 ui32CauseRegister;
+ IMG_UINT32 ui32BadRegister;
+ IMG_UINT32 ui32EPC;
+ IMG_UINT32 ui32SP;
+ IMG_UINT32 ui32Debug;
+ IMG_UINT32 ui32DEPC;
+ IMG_UINT32 ui32BadInstr;
+ RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES];
+} RGX_MIPS_STATE;
+
+typedef struct {
+ IMG_UINT32 ui32FaultPageEntryLo;
+ IMG_UINT32 ui32BadVAddr;
+ IMG_UINT32 ui32EntryLo0;
+ IMG_UINT32 ui32EntryLo1;
+} RGX_MIPS_FAULT_DATA;
+
+#endif /* RGXMIPSFW_ASSEMBLY_CODE */
+
+
+#endif /*__RGX_MIPS_H__*/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX build options
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Each build option listed here is packed into a dword which
+ * provides up to log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM
+ * and (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM.
+ * The corresponding bit is set if the build option
+ * was enabled at compile time.
+ *
+ * In order to extract the enabled build flags the INTERNAL_TEST
+ * switch should be enabled in a client program which includes this
+ * header. Then the client can test specific build flags by reading
+ * the bit value at ##OPTIONNAME##_SET_OFFSET in RGX_BUILD_OPTIONS_KM
+ * RGX_BUILD_OPTIONS.
+ *
+ * IMPORTANT: add new options to unused bits or define a new dword
+ * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield
+ * remains backwards
+ * compatible.
+ */
+
+#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL
+
+#if defined(NO_HARDWARE) || defined (INTERNAL_TEST)
+ #define NO_HARDWARE_SET_OFFSET OPTIONS_BIT0
+ #define OPTIONS_BIT0 (0x1ul << 0)
+ #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT0 0x0
+#endif /* NO_HARDWARE */
+
+
+#if defined(PDUMP) || defined (INTERNAL_TEST)
+ #define PDUMP_SET_OFFSET OPTIONS_BIT1
+ #define OPTIONS_BIT1 (0x1ul << 1)
+ #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT1 0x0
+#endif /* PDUMP */
+
+
+#if defined (INTERNAL_TEST)
+ #define UNUSED_SET_OFFSET OPTIONS_BIT2
+ #define OPTIONS_BIT2 (0x1ul << 2)
+ #if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT2 0x0
+#endif /* SUPPORT_META_SLAVE_BOOT */
+
+
+#if defined(SUPPORT_MMU_FREELIST) || defined (INTERNAL_TEST)
+ #define SUPPORT_MMU_FREELIST_SET_OFFSET OPTIONS_BIT3
+ #define OPTIONS_BIT3 (0x1ul << 3)
+ #if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT3 0x0
+#endif /* SUPPORT_MMU_FREELIST */
+
+
+#if defined(SUPPORT_RGX) || defined (INTERNAL_TEST)
+ #define SUPPORT_RGX_SET_OFFSET OPTIONS_BIT4
+ #define OPTIONS_BIT4 (0x1ul << 4)
+ #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT4 0x0
+#endif /* SUPPORT_RGX */
+
+
+#if defined(SUPPORT_SECURE_EXPORT) || defined (INTERNAL_TEST)
+ #define SUPPORT_SECURE_EXPORT_SET_OFFSET OPTIONS_BIT5
+ #define OPTIONS_BIT5 (0x1ul << 5)
+ #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT5 0x0
+#endif /* SUPPORT_SECURE_EXPORT */
+
+
+#if defined(SUPPORT_INSECURE_EXPORT) || defined (INTERNAL_TEST)
+ #define SUPPORT_INSECURE_EXPORT_SET_OFFSET OPTIONS_BIT6
+ #define OPTIONS_BIT6 (0x1ul << 6)
+ #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT6 0x0
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+
+#if defined(SUPPORT_VFP) || defined (INTERNAL_TEST)
+ #define SUPPORT_VFP_SET_OFFSET OPTIONS_BIT7
+ #define OPTIONS_BIT7 (0x1ul << 7)
+ #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT7 0x0
+#endif /* SUPPORT_VFP */
+
+
+#if defined(DEBUG) || defined (INTERNAL_TEST)
+ #define DEBUG_SET_OFFSET OPTIONS_BIT10
+ #define OPTIONS_BIT10 (0x1ul << 10)
+ #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT10 0x0
+#endif /* DEBUG */
+/* The bit position of this should be the
+ * same as DEBUG_SET_OFFSET option when
+ * defined */
+#define OPTIONS_DEBUG_MASK (0x1ul << 10)
+
+#define RGX_BUILD_OPTIONS_KM \
+ OPTIONS_BIT0 |\
+ OPTIONS_BIT1 |\
+ OPTIONS_BIT2 |\
+ OPTIONS_BIT3 |\
+ OPTIONS_BIT4 |\
+ OPTIONS_BIT6 |\
+ OPTIONS_BIT7 |\
+ OPTIONS_BIT10
+
+
+#if defined(SUPPORT_PERCONTEXT_FREELIST) || defined (INTERNAL_TEST)
+ #define OPTIONS_BIT31 (0x1ul << 31)
+ #if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+ #define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET OPTIONS_BIT31
+#else
+ #define OPTIONS_BIT31 0x0
+#endif /* SUPPORT_PERCONTEXT_FREELIST */
+
+#define _KM_RGX_BUILD_OPTIONS_ RGX_BUILD_OPTIONS
+
+#define RGX_BUILD_OPTIONS \
+ RGX_BUILD_OPTIONS_KM |\
+ OPTIONS_BIT31
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX PDump panic definitions header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX PDump panic definitions header
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (RGX_PDUMP_PANICS_H_)
+#define RGX_PDUMP_PANICS_H_
+
+
+/*! Unique device specific IMG_UINT16 panic IDs to identify the cause of a
+ * RGX PDump panic in a PDump script. */
+typedef enum
+{
+ RGX_PDUMP_PANIC_UNDEFINED = 0,
+
+ /* These panics occur when test parameters and driver configuration
+ * enable features that require the firmware and host driver to
+ * communicate. Such features are not supported with off-line playback.
+ */
+ RGX_PDUMP_PANIC_ZSBUFFER_BACKING = 101, /*!< Requests ZSBuffer to be backed with physical pages */
+ RGX_PDUMP_PANIC_ZSBUFFER_UNBACKING = 102, /*!< Requests ZSBuffer to be unbacked */
+ RGX_PDUMP_PANIC_FREELIST_GROW = 103, /*!< Requests an on-demand freelist grow/shrink */
+ RGX_PDUMP_PANIC_FREELISTS_RECONSTRUCTION = 104, /*!< Requests freelists reconstruction */
+ RGX_PDUMP_PANIC_SPARSEMEM_SWAP = 105, /*!< Requests sparse remap memory swap feature */
+} RGX_PDUMP_PANIC;
+
+
+#endif /* RGX_PDUMP_PANICS_H_ */
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX transfer queue shared
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Shared definitions between client and server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGX_TQ_SHARED_H__
+#define __RGX_TQ_SHARED_H__
+
+#define TQ_MAX_PREPARES_PER_SUBMIT 16
+
+#define TQ_PREP_FLAGS_COMMAND_3D 0x0
+#define TQ_PREP_FLAGS_COMMAND_2D 0x1
+#define TQ_PREP_FLAGS_COMMAND_MASK (0xf)
+#define TQ_PREP_FLAGS_COMMAND_SHIFT 0
+#define TQ_PREP_FLAGS_PDUMPCONTINUOUS (1 << 4)
+#define TQ_PREP_FLAGS_START (1 << 5)
+#define TQ_PREP_FLAGS_END (1 << 6)
+
+#define TQ_PREP_FLAGS_COMMAND_SET(m) \
+ ((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK)
+
+#define TQ_PREP_FLAGS_COMMAND_IS(m,n) \
+ (((m & TQ_PREP_FLAGS_COMMAND_MASK) >> TQ_PREP_FLAGS_COMMAND_SHIFT) == TQ_PREP_FLAGS_COMMAND_##n)
+
+#endif /* __RGX_TQ_SHARED_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX API Header kernel mode
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exported RGX API details
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXAPI_KM_H__
+#define __RGXAPI_KM_H__
+
+#if defined(SUPPORT_SHARED_SLC) && !defined(PVRSRV_GPUVIRT_GUESTDRV)
+/*!
+******************************************************************************
+
+ @Function RGXInitSLC
+
+ @Description Init the SLC after a power up. It is required to call this
+ function if using SUPPORT_SHARED_SLC. Otherwise, it shouldn't
+ be called.
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle);
+#endif
+
+#include "rgx_hwperf_km.h"
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Control API(s)
+ *****************************************************************************/
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfLazyConnect
+@Description Obtain a connection object to the RGX device. The connection
+ is not actually opened until HWPerfOpen() is called.
+@Output phDevData Address of a handle to a connection object
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfLazyConnect(
+ IMG_HANDLE* phDevData);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfOpen
+@Description Opens a connection to the RGX device. Valid handle to the
+ connection object has to be provided which means the this
+ function needs to be preceded by the call to
+ RGXHWPerfLazyConnect() function.
+@Input phDevData handle to a connection object
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfOpen(
+ IMG_HANDLE hDevData);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfConnect
+@Description Obtain a connection object to the RGX device. Allocated
+ connection object references opened connection.
+ Calling this function is an equivalent of calling
+ RGXHWPerfLazyConnect and RGXHWPerfOpen.
+@Output phDevData Address of a handle to a connection object
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfConnect(
+ IMG_HANDLE* phDevData);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfFreeConnection
+@Description Frees the handle to RGX device
+@Input hSrvHandle Handle to connection object as returned from
+ RGXHWPerfLazyConnect()
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfFreeConnection(
+ IMG_HANDLE hDevData);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfClose
+@Description Disconnect from the RGX device
+@Input hSrvHandle Handle to connection object as returned from
+ RGXHWPerfConnect() or RGXHWPerfOpen()
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfClose(
+ IMG_HANDLE hDevData);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfDisconnect
+@Description Disconnect from the RGX device
+@Input hSrvHandle Handle to connection object as returned from
+ RGXHWPerfConnect() or RGXHWPerfOpen().
+ Calling this function is an equivalent of calling
+ RGXHWPerfClose and RGXHWPerfFreeConnection.
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfDisconnect(
+ IMG_HANDLE hDevData);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfControl
+@Description Enable or disable the generation of RGX HWPerf event packets.
+ See RGXCtrlHWPerf().
+@Input hDevData Handle to connection object
+@Input bToggle Switch to toggle or apply mask.
+@Input ui64Mask Mask of events to control.
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfControl(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_BOOL bToggle,
+ IMG_UINT64 ui64Mask);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfConfigureAndEnableCounters
+@Description Enable and configure the performance counter block for
+ one or more device layout modules.
+ See RGXConfigureAndEnableHWPerfCounters().
+@Input hDevData Handle to connection object
+@Input ui32NumBlocks Number of elements in the array
+@Input asBlockConfigs Address of the array of configuration blocks
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfConfigureAndEnableCounters(
+ IMG_HANDLE hDevData,
+ IMG_UINT32 ui32NumBlocks,
+ RGX_HWPERF_CONFIG_CNTBLK* asBlockConfigs);
+
+
+/**************************************************************************/ /*!
+@Function RGXDisableHWPerfCounters
+@Description Disable the performance counter block for one or more
+ device layout modules. See RGXDisableHWPerfCounters().
+@Input hDevData Handle to connection/device object
+@Input ui32NumBlocks Number of elements in the array
+@Input aeBlockIDs An array of bytes with values taken from
+ the RGX_HWPERF_CNTBLK_ID enumeration.
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfDisableCounters(
+ IMG_HANDLE hDevData,
+ IMG_UINT32 ui32NumBlocks,
+ IMG_UINT16* aeBlockIDs);
+
+/**************************************************************************/ /*!
+@Function RGXEnableHWPerfCounters
+@Description Enable the performance counter block for one or more
+ device layout modules. See RGXEnableHWPerfCounters().
+@Input hDevData Handle to connection/device object
+@Input ui32NumBlocks Number of elements in the array
+@Input aeBlockIDs An array of bytes with values taken from
+ the RGX_HWPERF_CNTBLK_ID enumeration.
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfEnableCounters(
+ IMG_HANDLE hDevData,
+ IMG_UINT32 ui32NumBlocks,
+ IMG_UINT16* aeBlockIDs);
+
+/******************************************************************************
+ * RGX HW Performance Profiling Retrieval API(s)
+ *
+ * The client must ensure their use of this acquire/release API for a single
+ * connection/stream must not be shared with multiple execution contexts e.g.
+ * between a kernel thread and an ISR handler. It is the client’s
+ * responsibility to ensure this API is not interrupted by a high priority
+ * thread/ISR
+ *****************************************************************************/
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfAcquireData
+@Description When there is data available to read this call returns with
+ the address and length of the data buffer the
+ client can safely read. This buffer may contain one or more
+ event packets. If no data is available then this call
+ returns OK and sets *puiBufLen to 0 on exit.
+ Clients must pair this call with a ReleaseData call.
+@Input hDevData Handle to connection/device object
+@Input eStreamId ID of the HWPerf stream
+@Output ppBuf Address of a pointer to a byte buffer. On exit
+ it contains the address of buffer to read from
+@Output puiBufLen Pointer to an integer. On exit it is the size
+ of the data to read from the buffer
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfAcquireData(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_PBYTE* ppBuf,
+ IMG_UINT32* pui32BufLen);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfGetFilter
+@Description Reads HWPerf stream filter where stream is identified by
+ the given stream ID.
+@Input hDevData Handle to connection/device object
+@Input eStreamId ID of the HWPerf stream
+@Output IMG_UINT64 HWPerf filter value
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfGetFilter(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_UINT64 *ui64Filter
+);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfReleaseData
+@Description Called after client has read the event data out of the buffer
+ retrieved from the Acquire Data call to release resources.
+@Input hDevData Handle to connection/device object
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXHWPerfReleaseData(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId);
+
+
+#endif /* __RGXAPI_KM_H__ */
+
+/******************************************************************************
+ End of file (rgxapi_km.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX Breakpoint routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Breakpoint routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxbreakpoint.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxmem.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ RGXFWIF_DM eFWDataMaster,
+ IMG_UINT32 ui32BPAddr,
+ IMG_UINT32 ui32HandlerAddr,
+ IMG_UINT32 ui32DataMaster)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sBPCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (psDevInfo->bBPSet == IMG_TRUE)
+ return PVRSRV_ERROR_BP_ALREADY_SET;
+
+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+ sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr;
+ sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr;
+ sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster;
+ sBPCmd.uCmdData.sBPData.bEnable = IMG_TRUE;
+ sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_WRITE;
+
+ RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+ psFWMemContextMemDesc,
+ 0 ,
+ RFW_FWADDR_NOREF_FLAG);
+
+ eError = RGXScheduleCommand(psDevInfo,
+ eFWDataMaster,
+ &sBPCmd,
+ sizeof(sBPCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+ return eError;
+ }
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDevInfo, eFWDataMaster, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXSetBreakpointKM: Wait for completion aborted with error (%u)", eError));
+ return eError;
+ }
+
+ psDevInfo->eBPDM = eFWDataMaster;
+ psDevInfo->bBPSet = IMG_TRUE;
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sBPCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+ sBPCmd.uCmdData.sBPData.ui32BPAddr = 0;
+ sBPCmd.uCmdData.sBPData.ui32HandlerAddr = 0;
+ sBPCmd.uCmdData.sBPData.bEnable = IMG_FALSE;
+ sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_CTL;
+
+ RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+ psFWMemContextMemDesc,
+ 0 ,
+ RFW_FWADDR_NOREF_FLAG);
+
+ eError = RGXScheduleCommand(psDevInfo,
+ psDevInfo->eBPDM,
+ &sBPCmd,
+ sizeof(sBPCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+ return eError;
+ }
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXClearBreakpointKM: Wait for completion aborted with error (%u)", eError));
+ return eError;
+ }
+
+ psDevInfo->bBPSet = IMG_FALSE;
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sBPCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (psDevInfo->bBPSet == IMG_FALSE)
+ return PVRSRV_ERROR_BP_NOT_SET;
+
+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+ sBPCmd.uCmdData.sBPData.bEnable = IMG_TRUE;
+ sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_CTL;
+
+ RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+ psFWMemContextMemDesc,
+ 0 ,
+ RFW_FWADDR_NOREF_FLAG);
+
+ eError = RGXScheduleCommand(psDevInfo,
+ psDevInfo->eBPDM,
+ &sBPCmd,
+ sizeof(sBPCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+ return eError;
+ }
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXEnableBreakpointKM: Wait for completion aborted with error (%u)", eError));
+ return eError;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sBPCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (psDevInfo->bBPSet == IMG_FALSE)
+ return PVRSRV_ERROR_BP_NOT_SET;
+
+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+ sBPCmd.uCmdData.sBPData.bEnable = IMG_FALSE;
+ sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_CTL;
+
+ RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+ psFWMemContextMemDesc,
+ 0 ,
+ RFW_FWADDR_NOREF_FLAG);
+
+ eError = RGXScheduleCommand(psDevInfo,
+ psDevInfo->eBPDM,
+ &sBPCmd,
+ sizeof(sBPCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+ return eError;
+ }
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXDisableBreakpointKM: Wait for completion aborted with error (%u)", eError));
+ return eError;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32TempRegs,
+ IMG_UINT32 ui32SharedRegs)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sBPCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+ sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_REGS;
+ sBPCmd.uCmdData.sBPData.ui32TempRegs = ui32TempRegs;
+ sBPCmd.uCmdData.sBPData.ui32SharedRegs = ui32SharedRegs;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sBPCmd,
+ sizeof(sBPCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXOverallocateBPRegistersKM: RGXScheduleCommand failed. Error:%u", eError));
+ return eError;
+ }
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXOverallocateBPRegistersKM: Wait for completion aborted with error (%u)", eError));
+ return eError;
+ }
+
+ return eError;
+}
+
+
+/******************************************************************************
+ End of file (rgxbreakpoint.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX breakpoint functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX breakpoint functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXBREAKPOINT_H__)
+#define __RGXBREAKPOINT_H__
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXSetBreakpointKM
+
+ @Description
+ Server-side implementation of RGXSetBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input eDataMaster - Data Master to schedule command for
+ @Input hMemCtxPrivData - memory context private data
+ @Input ui32BPAddr - Address of breakpoint
+ @Input ui32HandlerAddr - Address of breakpoint handler
+ @Input ui32BPCtl - Breakpoint controls
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ RGXFWIF_DM eFWDataMaster,
+ IMG_UINT32 ui32BPAddr,
+ IMG_UINT32 ui32HandlerAddr,
+ IMG_UINT32 ui32DataMaster);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXClearBreakpointKM
+
+ @Description
+ Server-side implementation of RGXClearBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXEnableBreakpointKM
+
+ @Description
+ Server-side implementation of RGXEnableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXDisableBreakpointKM
+
+ @Description
+ Server-side implementation of RGXDisableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXOverallocateBPRegistersKM
+
+ @Description
+ Server-side implementation of RGXOverallocateBPRegisters
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui32TempRegs - Number of temporary registers to overallocate
+ @Input ui32SharedRegs - Number of shared registers to overallocate
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32TempRegs,
+ IMG_UINT32 ui32SharedRegs);
+#endif /* __RGXBREAKPOINT_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX CCB routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX CCB routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "rgxfwutils.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgx_memallocflags.h"
+#include "devicemem_pdump.h"
+#include "dllist.h"
+#include "rgx_fwif_shared.h"
+#include "rgxtimerquery.h"
+#if defined(LINUX)
+#include "trace_events.h"
+#endif
+#include "rgxutils.h"
+
+/*
+* Defines the number of fence updates to record so that future fences in the CCB
+* can be checked to see if they are already known to be satisfied.
+*/
+#define RGX_CCCB_FENCE_UPDATE_LIST_SIZE (32)
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+
+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD 0x1
+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED 0x2
+
+typedef struct _RGX_CLIENT_CCB_UTILISATION_
+{
+ /* the threshold in bytes.
+ * when the CCB utilisation hits the threshold then we will print
+ * a warning message.
+ */
+ IMG_UINT32 ui32ThresholdBytes;
+ /* Maximum cCCB usage at some point in time */
+ IMG_UINT32 ui32HighWaterMark;
+ /* keep track of the warnings already printed.
+ * bit mask of PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_xyz
+ */
+ IMG_UINT32 ui32Warnings;
+} RGX_CLIENT_CCB_UTILISATION;
+
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+
+struct _RGX_CLIENT_CCB_ {
+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; /*!< CPU mapping of the CCB control structure used by the fw */
+ IMG_UINT8 *pui8ClientCCB; /*!< CPU mapping of the CCB */
+ DEVMEM_MEMDESC *psClientCCBMemDesc; /*!< MemDesc for the CCB */
+ DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; /*!< MemDesc for the CCB control */
+ IMG_UINT32 ui32HostWriteOffset; /*!< CCB write offset from the driver side */
+ IMG_UINT32 ui32LastPDumpWriteOffset; /*!< CCB write offset from the last time we submitted a command in capture range */
+ IMG_UINT32 ui32LastROff; /*!< Last CCB Read offset to help detect any CCB wedge */
+ IMG_UINT32 ui32LastWOff; /*!< Last CCB Write offset to help detect any CCB wedge */
+ IMG_UINT32 ui32ByteCount; /*!< Count of the number of bytes written to CCCB */
+ IMG_UINT32 ui32LastByteCount; /*!< Last value of ui32ByteCount to help detect any CCB wedge */
+ IMG_UINT32 ui32Size; /*!< Size of the CCB */
+ DLLIST_NODE sNode; /*!< Node used to store this CCB on the per connection list */
+ PDUMP_CONNECTION_DATA *psPDumpConnectionData; /*!< Pointer to the per connection data in which we reside */
+ void *hTransition; /*!< Handle for Transition callback */
+ IMG_CHAR szName[MAX_CLIENT_CCB_NAME]; /*!< Name of this client CCB */
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; /*!< Parent server common context that this CCB belongs to */
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor;
+ RGX_CLIENT_CCB_UTILISATION sUtilisation; /*!< CCB utilisation data */
+#endif
+#if defined(DEBUG)
+ IMG_UINT32 ui32UpdateEntries; /*!< Number of Fence Updates in asFenceUpdateList */
+ RGXFWIF_UFO asFenceUpdateList[RGX_CCCB_FENCE_UPDATE_LIST_SIZE]; /*!< List of recent updates written in this CCB */
+#endif
+};
+
+
+/* Forms a table, with array of strings for each requestor type (listed in RGX_CCB_REQUESTORS X macro), to be used for
+ DevMemAllocation comments and PDump comments. Each tuple in the table consists of 3 strings:
+ { "FwClientCCB:" <requestor_name>, "FwClientCCBControl:" <requestor_name>, <requestor_name> },
+ The first string being used as comment when allocating ClientCCB for the given requestor, the second for CCBControl
+ structure, and the 3rd one for use in PDUMP comments. The number of tuples in the table must adhere to the following
+ build assert. */
+IMG_CHAR *const aszCCBRequestors[][3] =
+{
+#define REQUESTOR_STRING(prefix,req) #prefix ":" #req
+#define FORM_REQUESTOR_TUPLE(req) { REQUESTOR_STRING(FwClientCCB,req), REQUESTOR_STRING(FwClientCCBControl,req), #req },
+ RGX_CCB_REQUESTORS(FORM_REQUESTOR_TUPLE)
+#undef FORM_REQUESTOR_TUPLE
+};
+/* The number of tuples in the above table is always equal to those provided in the RGX_CCB_REQUESTORS X macro list.
+ In an event of change in value of DPX_MAX_RAY_CONTEXTS to say 'n', appropriate entry/entries up to FC[n-1] must be added to
+ the RGX_CCB_REQUESTORS list. */
+static_assert((sizeof(aszCCBRequestors)/(3*sizeof(aszCCBRequestors[0][0]))) == (REQ_TYPE_FIXED_COUNT + DPX_MAX_RAY_CONTEXTS + 1),
+ "Mismatch between aszCCBRequestors table and DPX_MAX_RAY_CONTEXTS");
+
+IMG_EXPORT PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32PDumpFlags)
+{
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+ "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)",
+ psClientCCB->szName,
+ psClientCCB,
+ psClientCCB->ui32LastPDumpWriteOffset);
+
+ return DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc,
+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+ psClientCCB->ui32LastPDumpWriteOffset,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+}
+
+static PVRSRV_ERROR _RGXCCBPDumpTransition(void **pvData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags)
+{
+ RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData;
+
+ /*
+ We're about to Transition into capture range and we've submitted
+ new commands since the last time we entered capture range so drain
+ the CCB as required
+ */
+ if (bInto)
+ {
+ volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl;
+ PVRSRV_ERROR eError;
+
+ /*
+ Wait for the FW to catch up (retry will get pushed back out services
+ client where we wait on the event object and try again later)
+ */
+ if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset)
+ {
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ /*
+ We drain whenever capture range is entered. Even if no commands
+ have been issued while where out of capture range we have to wait for
+ operations that we might have issued in the last capture range
+ to finish so the sync prim update that will happen after all the
+ PDumpTransition callbacks have been called doesn't clobber syncs
+ which the FW is currently working on.
+ Although this is suboptimal, while out of capture range for every
+ persistent operation we serialise the PDump script processing and
+ the FW, there is no easy solution.
+ Not all modules that work on syncs register a PDumpTransition and
+ thus we have no way of knowing if we can skip drain and the sync
+ prim dump or not.
+ */
+
+ eError = RGXCCBPDumpDrainCCB(psClientCCB, ui32PDumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "_RGXCCBPDumpTransition: problem pdumping POL for cCCBCtl (%d)", eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /*
+ If new command(s) have been written out of capture range then we
+ need to fast forward past uncaptured operations.
+ */
+ if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset)
+ {
+ /*
+ There are commands that where not captured so after the
+ simulation drain (above) we also need to fast-forward pass
+ those commands so the FW can start with the 1st command
+ which is in the new capture range
+ */
+ psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+ psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+ psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset;
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+ "cCCB(%s@%p): Fast-forward from %d to %d",
+ psClientCCB->szName,
+ psClientCCB,
+ psClientCCB->ui32LastPDumpWriteOffset,
+ psClientCCB->ui32HostWriteOffset);
+
+ DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+ 0,
+ sizeof(RGXFWIF_CCCB_CTL),
+ ui32PDumpFlags);
+
+ /*
+ Although we've entered capture range we might not do any work
+ on this CCB so update the ui32LastPDumpWriteOffset to reflect
+ where we got to for next so we start the drain from where we
+ got to last time
+ */
+ psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+#if defined (PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+
+static INLINE void _RGXInitCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+ psClientCCB->sUtilisation.ui32HighWaterMark = 0; /* initialize ui32HighWaterMark level to zero */
+ psClientCCB->sUtilisation.ui32ThresholdBytes = (psClientCCB->ui32Size *
+ PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD) / 100;
+ psClientCCB->sUtilisation.ui32Warnings = 0;
+}
+
+static INLINE void _RGXPrintCCBUtilisationWarning(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32WarningType,
+ IMG_UINT32 ui32CmdSize)
+{
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE)
+ if(ui32WarningType == PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED)
+ {
+ PVR_LOG(("Failed to acquire CCB space for %u byte command:", ui32CmdSize));
+ }
+
+ PVR_LOG(("%s: Client CCB (%s) watermark (%u) hit %d%% of its allocation size (%u)",
+ __FUNCTION__,
+ psClientCCB->szName,
+ psClientCCB->sUtilisation.ui32HighWaterMark,
+ psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size,
+ psClientCCB->ui32Size));
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32WarningType);
+ PVR_UNREFERENCED_PARAMETER(ui32CmdSize);
+
+ PVR_LOG(("GPU %s command buffer usage high (%u). This is not an error but the application may not run optimally.",
+ aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size));
+#endif
+}
+
+static INLINE void _RGXCCBUtilisationEvent(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32WarningType,
+ IMG_UINT32 ui32CmdSize)
+{
+ /* in VERBOSE mode we will print a message for each different
+ * event type as they happen.
+ * but by default we will only issue one message
+ */
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE)
+ if(!(psClientCCB->sUtilisation.ui32Warnings & ui32WarningType))
+#else
+ if(!psClientCCB->sUtilisation.ui32Warnings)
+#endif
+ {
+ _RGXPrintCCBUtilisationWarning(psClientCCB,
+ ui32WarningType,
+ ui32CmdSize);
+ /* record that we have issued a warning of this type */
+ psClientCCB->sUtilisation.ui32Warnings |= ui32WarningType;
+ }
+}
+
+/* Check the current CCB utilisation. Print a one-time warning message if it is above the
+ * specified threshold
+ */
+static INLINE void _RGXCheckCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+ /* Print a warning message if the cCCB watermark is above the threshold value */
+ if(psClientCCB->sUtilisation.ui32HighWaterMark >= psClientCCB->sUtilisation.ui32ThresholdBytes)
+ {
+ _RGXCCBUtilisationEvent(psClientCCB,
+ PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD,
+ 0);
+ }
+}
+
+/* Update the cCCB high watermark level if necessary */
+static void _RGXUpdateCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+ IMG_UINT32 ui32FreeSpace, ui32MemCurrentUsage;
+
+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+ psClientCCB->ui32Size);
+ ui32MemCurrentUsage = psClientCCB->ui32Size - ui32FreeSpace;
+
+ if (ui32MemCurrentUsage > psClientCCB->sUtilisation.ui32HighWaterMark)
+ {
+ psClientCCB->sUtilisation.ui32HighWaterMark = ui32MemCurrentUsage;
+
+ /* The high water mark has increased. Check if it is above the
+ * threshold so we can print a warning if necessary.
+ */
+ _RGXCheckCCBUtilisation(psClientCCB);
+ }
+}
+
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32CCBSizeLog2,
+ CONNECTION_DATA *psConnectionData,
+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ RGX_CLIENT_CCB **ppsClientCCB,
+ DEVMEM_MEMDESC **ppsClientCCBMemDesc,
+ DEVMEM_MEMDESC **ppsClientCCBCtrlMemDesc)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_FLAGS_T uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags;
+ IMG_UINT32 ui32AllocSize = (1U << ui32CCBSizeLog2);
+ RGX_CLIENT_CCB *psClientCCB;
+
+ /* All client CCBs should be at-least of the "minimum" size declared by the API */
+ PVR_ASSERT (ui32CCBSizeLog2 >= MIN_SAFE_CCB_SIZE_LOG2);
+
+ psClientCCB = OSAllocMem(sizeof(*psClientCCB));
+ if (psClientCCB == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+ psClientCCB->psServerCommonContext = psServerCommonContext;
+
+ uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+ uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+ PDUMPCOMMENT("Allocate RGXFW cCCB");
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32AllocSize,
+ uiClientCCBMemAllocFlags,
+ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING],
+ &psClientCCB->psClientCCBMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_alloc_ccb;
+ }
+
+
+ eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc,
+ (void **) &psClientCCB->pui8ClientCCB);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_map_ccb;
+ }
+
+ PDUMPCOMMENT("Allocate RGXFW cCCB control");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_CCCB_CTL),
+ uiClientCCBCtlMemAllocFlags,
+ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING],
+ &psClientCCB->psClientCCBCtrlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB control (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_alloc_ccbctrl;
+ }
+
+
+ eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc,
+ (void **) &psClientCCB->psClientCCBCtrl);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_map_ccbctrl;
+ }
+
+ psClientCCB->psClientCCBCtrl->ui32WriteOffset = 0;
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset = 0;
+ psClientCCB->psClientCCBCtrl->ui32DepOffset = 0;
+ psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1;
+ OSSNPrintf(psClientCCB->szName, MAX_CLIENT_CCB_NAME, "%s-P%lu-T%lu-%s",
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ (unsigned long) OSGetCurrentClientProcessIDKM(),
+ (unsigned long) OSGetCurrentClientThreadIDKM(),
+ OSGetCurrentClientProcessNameKM());
+
+ PDUMPCOMMENT("cCCB control");
+ DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+ 0,
+ sizeof(RGXFWIF_CCCB_CTL),
+ PDUMP_FLAGS_CONTINUOUS);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ psClientCCB->ui32HostWriteOffset = 0;
+ psClientCCB->ui32LastPDumpWriteOffset = 0;
+ psClientCCB->ui32Size = ui32AllocSize;
+ psClientCCB->ui32LastROff = ui32AllocSize - 1;
+ psClientCCB->ui32ByteCount = 0;
+ psClientCCB->ui32LastByteCount = 0;
+
+#if defined(DEBUG)
+ psClientCCB->ui32UpdateEntries = 0;
+#endif
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+ _RGXInitCCBUtilisation(psClientCCB);
+ psClientCCB->eRGXCCBRequestor = eRGXCCBRequestor;
+#endif
+ eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData,
+ _RGXCCBPDumpTransition,
+ psClientCCB,
+ &psClientCCB->hTransition);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_pdumpreg;
+ }
+
+ /*
+ * Note:
+ * Save the PDump specific structure, which is ref counted unlike
+ * the connection data, to ensure it's not freed too early
+ */
+ psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData;
+ PDUMPCOMMENT("New RGXFW cCCB(%s@%p) created",
+ psClientCCB->szName,
+ psClientCCB);
+
+ *ppsClientCCB = psClientCCB;
+ *ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc;
+ *ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc;
+ return PVRSRV_OK;
+
+fail_pdumpreg:
+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+fail_map_ccbctrl:
+ DevmemFwFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc);
+fail_alloc_ccbctrl:
+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+fail_map_ccb:
+ DevmemFwFree(psDevInfo, psClientCCB->psClientCCBMemDesc);
+fail_alloc_ccb:
+ OSFreeMem(psClientCCB);
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB)
+{
+ PDumpUnregisterTransitionCallback(psClientCCB->hTransition);
+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+ DevmemFwFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc);
+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+ DevmemFwFree(psDevInfo, psClientCCB->psClientCCBMemDesc);
+ OSFreeMem(psClientCCB);
+}
+
+
+/******************************************************************************
+ FUNCTION : RGXAcquireCCB
+
+ PURPOSE : Obtains access to write some commands to a CCB
+
+ PARAMETERS : psClientCCB - The client CCB
+ ui32CmdSize - How much space is required
+ ppvBufferSpace - Pointer to space in the buffer
+ ui32PDumpFlags - Should this be PDump continuous?
+
+ RETURNS : PVRSRV_ERROR
+******************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32CmdSize,
+ void **ppvBufferSpace,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_BOOL bInCaptureRange;
+ IMG_BOOL bPdumpEnabled;
+
+ PDumpIsCaptureFrameKM(&bInCaptureRange);
+ bPdumpEnabled = (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags));
+
+ /*
+ PDumpSetFrame will detect as we Transition into capture range for
+ frame based data but if we are PDumping continuous data then we
+ need to inform the PDump layer ourselves
+ */
+ if (PDUMP_IS_CONTINUOUS(ui32PDumpFlags) && !bInCaptureRange)
+ {
+ eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_TRUE, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ /* Check that the CCB can hold this command + padding */
+ if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)",
+ ui32CmdSize, psClientCCB->ui32Size));
+ return PVRSRV_ERROR_CMD_TOO_BIG;
+ }
+
+ /*
+ Check we don't overflow the end of the buffer and make sure we have
+ enough space for the padding command. We don't have enough space (including the
+ minimum amount for the padding command) we will need to make sure we insert a
+ padding command now and wrap before adding the main command.
+ */
+ if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size)
+ {
+ /*
+ The command can fit without wrapping...
+ */
+ IMG_UINT32 ui32FreeSpace;
+
+#if defined(PDUMP)
+ /* Wait for sufficient CCB space to become available */
+ PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+ ui32CmdSize, psClientCCB->ui32HostWriteOffset,
+ psClientCCB->szName);
+ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+ psClientCCB->ui32HostWriteOffset,
+ ui32CmdSize,
+ psClientCCB->ui32Size);
+#endif
+
+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+ psClientCCB->ui32Size);
+
+ /* Don't allow all the space to be used */
+ if (ui32FreeSpace > ui32CmdSize)
+ {
+ *ppvBufferSpace = (void *) (psClientCCB->pui8ClientCCB +
+ psClientCCB->ui32HostWriteOffset);
+ return PVRSRV_OK;
+ }
+
+ goto e_retry;
+ }
+ else
+ {
+ /*
+ We're at the end of the buffer without enough contiguous space.
+ The command cannot fit without wrapping, we need to insert a
+ padding command and wrap. We need to do this in one go otherwise
+ we would be leaving unflushed commands and forcing the client to
+ deal with flushing the padding command but not the command they
+ wanted to write. Therefore we either do all or nothing.
+ */
+ RGXFWIF_CCB_CMD_HEADER *psHeader;
+ IMG_UINT32 ui32FreeSpace;
+ IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+#if defined(PDUMP)
+ /* Wait for sufficient CCB space to become available */
+ PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+ ui32Remain, psClientCCB->ui32HostWriteOffset,
+ psClientCCB->szName);
+ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+ psClientCCB->ui32HostWriteOffset,
+ ui32Remain,
+ psClientCCB->ui32Size);
+ PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+ ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */,
+ psClientCCB->szName);
+ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+ 0 /*ui32HostWriteOffset after wrap */,
+ ui32CmdSize,
+ psClientCCB->ui32Size);
+#endif
+
+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+ psClientCCB->ui32Size);
+
+ /* Don't allow all the space to be used */
+ if (ui32FreeSpace > ui32Remain + ui32CmdSize)
+ {
+ psHeader = (void *) (psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset);
+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING;
+ psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize);
+ if (bPdumpEnabled)
+ {
+ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+ psClientCCB->ui32HostWriteOffset,
+ ui32Remain,
+ ui32PDumpFlags);
+ }
+
+ *ppvBufferSpace = (void *) (psClientCCB->pui8ClientCCB +
+ 0 /*ui32HostWriteOffset after wrap */);
+ return PVRSRV_OK;
+ }
+
+ goto e_retry;
+ }
+e_retry:
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+ _RGXCCBUtilisationEvent(psClientCCB,
+ PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED,
+ ui32CmdSize);
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+ return PVRSRV_ERROR_RETRY;
+}
+
+/******************************************************************************
+ FUNCTION : RGXReleaseCCB
+
+ PURPOSE : Release a CCB that we have been writing to.
+
+ PARAMETERS : psDevData - device data
+ psCCB - the CCB
+
+ RETURNS : None
+******************************************************************************/
+IMG_INTERNAL void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ IMG_BOOL bInCaptureRange;
+ IMG_BOOL bPdumpEnabled;
+
+ PDumpIsCaptureFrameKM(&bInCaptureRange);
+ bPdumpEnabled = (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags));
+
+ /*
+ * If a padding command was needed then we should now move ui32HostWriteOffset
+ * forward. The command has already be dumped (if bPdumpEnabled).
+ */
+ if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) > psClientCCB->ui32Size)
+ {
+ IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+ UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+ ui32Remain,
+ psClientCCB->ui32Size);
+ psClientCCB->ui32ByteCount += ui32Remain;
+ }
+
+ /* Dump the CCB data */
+ if (bPdumpEnabled)
+ {
+ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+ psClientCCB->ui32HostWriteOffset,
+ ui32CmdSize,
+ ui32PDumpFlags);
+ }
+
+ /*
+ * Check if there any fences being written that will already be
+ * satisfied by the last written update command in this CCB. At the
+ * same time we can ASSERT that all sync addresses are not NULL.
+ */
+#if defined(DEBUG)
+ {
+ IMG_UINT8 *pui8BufferStart = (void *)((uintptr_t)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset);
+ IMG_UINT8 *pui8BufferEnd = (void *)((uintptr_t)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset + ui32CmdSize);
+ IMG_BOOL bMessagePrinted = IMG_FALSE;
+
+ /* Walk through the commands in this section of CCB being released... */
+ while (pui8BufferStart < pui8BufferEnd)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8BufferStart;
+
+ if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+ {
+ /* If an UPDATE then record the values incase an adjacent fence uses it. */
+ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+ psClientCCB->ui32UpdateEntries = 0;
+ while (ui32NumUFOs-- > 0)
+ {
+ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+ if (psClientCCB->ui32UpdateEntries < RGX_CCCB_FENCE_UPDATE_LIST_SIZE)
+ {
+ psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateEntries++] = *psUFOPtr++;
+ }
+ }
+ }
+ else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE)
+ {
+ /* If a FENCE then check the values against the last UPDATE issued. */
+ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+ while (ui32NumUFOs-- > 0)
+ {
+ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+
+ if (bMessagePrinted == IMG_FALSE)
+ {
+ RGXFWIF_UFO *psUpdatePtr = psClientCCB->asFenceUpdateList;
+ IMG_UINT32 ui32UpdateIndex;
+
+ for (ui32UpdateIndex = 0; ui32UpdateIndex < psClientCCB->ui32UpdateEntries; ui32UpdateIndex++)
+ {
+ if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr &&
+ psUFOPtr->ui32Value == psUpdatePtr->ui32Value)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "Redundant fence check found in cCCB(%p) - 0x%x -> 0x%x",
+ psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+ bMessagePrinted = IMG_TRUE;
+ break;
+ }
+
+ psUpdatePtr++;
+ }
+ }
+
+ psUFOPtr++;
+ }
+ }
+ else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR ||
+ psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE)
+ {
+ /* For all other UFO ops check the UFO address is not NULL. */
+ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+ while (ui32NumUFOs-- > 0)
+ {
+ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+ psUFOPtr++;
+ }
+ }
+
+ /* Move to the next command in this section of CCB being released... */
+ pui8BufferStart += sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize;
+ }
+ }
+#endif /* REDUNDANT_SYNCS_DEBUG */
+
+ /*
+ * Update the CCB write offset.
+ */
+ UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+ ui32CmdSize,
+ psClientCCB->ui32Size);
+ psClientCCB->ui32ByteCount += ui32CmdSize;
+
+#if defined (PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+ _RGXUpdateCCBUtilisation(psClientCCB);
+#endif
+ /*
+ PDumpSetFrame will detect as we Transition out of capture range for
+ frame based data but if we are PDumping continuous data then we
+ need to inform the PDump layer ourselves
+ */
+ if (PDUMP_IS_CONTINUOUS(ui32PDumpFlags)&& !bInCaptureRange)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Only Transitioning into capture range can cause an error */
+ eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_FALSE, PDUMP_FLAGS_CONTINUOUS);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+
+ if (bPdumpEnabled)
+ {
+ /* Update the PDump write offset to show we PDumped this command */
+ psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+ }
+
+#if defined(NO_HARDWARE)
+ /*
+ The firmware is not running, it cannot update these; we do here instead.
+ */
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+ psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+#endif
+}
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB)
+{
+ return psClientCCB->ui32HostWriteOffset;
+}
+
+#define SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL PVR_DBG_ERROR
+#define CHECK_COMMAND(cmd, fenceupdate) \
+ case RGXFWIF_CCB_CMD_TYPE_##cmd: \
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, #cmd " command (%d bytes)", psHeader->ui32CmdSize)); \
+ bFenceUpdate = fenceupdate; \
+ break
+
+static void _RGXClientCCBDumpCommands(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32ByteCount)
+{
+#if defined(SUPPORT_DUMP_CLIENT_CCB_COMMANDS)
+ IMG_UINT8 *pui8Ptr = psClientCCB->pui8ClientCCB + ui32Offset;
+ IMG_UINT32 ui32ConsumeSize = ui32ByteCount;
+
+ while (ui32ConsumeSize)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8Ptr;
+ IMG_BOOL bFenceUpdate = IMG_FALSE;
+
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "@offset 0x%08x", pui8Ptr - psClientCCB->pui8ClientCCB));
+ switch(psHeader->eCmdType)
+ {
+ CHECK_COMMAND(TA, IMG_FALSE);
+ CHECK_COMMAND(3D, IMG_FALSE);
+ CHECK_COMMAND(CDM, IMG_FALSE);
+ CHECK_COMMAND(TQ_3D, IMG_FALSE);
+ CHECK_COMMAND(TQ_2D, IMG_FALSE);
+ CHECK_COMMAND(3D_PR, IMG_FALSE);
+ CHECK_COMMAND(NULL, IMG_FALSE);
+ CHECK_COMMAND(SHG, IMG_FALSE);
+ CHECK_COMMAND(RTU, IMG_FALSE);
+ CHECK_COMMAND(RTU_FC, IMG_FALSE);
+ CHECK_COMMAND(PRE_TIMESTAMP, IMG_FALSE);
+ CHECK_COMMAND(POST_TIMESTAMP, IMG_FALSE);
+ CHECK_COMMAND(FENCE, IMG_TRUE);
+ CHECK_COMMAND(UPDATE, IMG_TRUE);
+ CHECK_COMMAND(UNFENCED_UPDATE, IMG_FALSE);
+ CHECK_COMMAND(RMW_UPDATE, IMG_TRUE);
+ CHECK_COMMAND(FENCE_PR, IMG_TRUE);
+ CHECK_COMMAND(UNFENCED_RMW_UPDATE, IMG_FALSE);
+ CHECK_COMMAND(PADDING, IMG_FALSE);
+ default:
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Unknown command!"));
+ break;
+ }
+ pui8Ptr += sizeof(*psHeader);
+ if (bFenceUpdate)
+ {
+ IMG_UINT32 j;
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8Ptr;
+ for (j=0;j<psHeader->ui32CmdSize/sizeof(RGXFWIF_UFO);j++)
+ {
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Addr = 0x%08x, value = 0x%08x",
+ psUFOPtr[j].puiAddrUFO.ui32Addr, psUFOPtr[j].ui32Value));
+ }
+ }
+ else
+ {
+ IMG_UINT32 *pui32Ptr = (IMG_UINT32 *) pui8Ptr;
+ IMG_UINT32 ui32Remain = psHeader->ui32CmdSize/sizeof(IMG_UINT32);
+ while(ui32Remain)
+ {
+ if (ui32Remain >= 4)
+ {
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x 0x%08x",
+ pui32Ptr[0], pui32Ptr[1], pui32Ptr[2], pui32Ptr[3]));
+ pui32Ptr += 4;
+ ui32Remain -= 4;
+ }
+ if (ui32Remain == 3)
+ {
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x",
+ pui32Ptr[0], pui32Ptr[1], pui32Ptr[2]));
+ pui32Ptr += 3;
+ ui32Remain -= 3;
+ }
+ if (ui32Remain == 2)
+ {
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x",
+ pui32Ptr[0], pui32Ptr[1]));
+ pui32Ptr += 2;
+ ui32Remain -= 2;
+ }
+ if (ui32Remain == 1)
+ {
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x",
+ pui32Ptr[0]));
+ pui32Ptr += 1;
+ ui32Remain -= 1;
+ }
+ }
+ }
+ pui8Ptr += psHeader->ui32CmdSize;
+ ui32ConsumeSize -= sizeof(*psHeader) + psHeader->ui32CmdSize;
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(psClientCCB);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32ByteCount);
+#endif
+}
+
+/*
+ Workout how much space this command will require
+*/
+PVRSRV_ERROR RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32ClientFenceCount,
+ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress,
+ IMG_UINT32 *paui32FenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress,
+ IMG_UINT32 *paui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ IMG_UINT32 ui32ServerSyncFlagMask,
+ SERVER_SYNC_PRIMITIVE **papsServerSyncs,
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr,
+ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr,
+ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE eType,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32IntJobRef,
+ IMG_UINT32 ui32PDumpFlags,
+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+ IMG_CHAR *pszCommandName,
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData)
+{
+ IMG_UINT32 ui32FenceCount;
+ IMG_UINT32 ui32UpdateCount;
+ IMG_UINT32 i;
+
+ /* Job reference values */
+ psCmdHelperData->ui32ExtJobRef = ui32ExtJobRef;
+ psCmdHelperData->ui32IntJobRef = ui32IntJobRef;
+
+ /* Save the data we require in the submit call */
+ psCmdHelperData->psClientCCB = psClientCCB;
+ psCmdHelperData->ui32PDumpFlags = ui32PDumpFlags;
+ psCmdHelperData->pszCommandName = pszCommandName;
+
+ /* Client sync data */
+ psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount;
+ psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress;
+ psCmdHelperData->paui32FenceValue = paui32FenceValue;
+ psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount;
+ psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress;
+ psCmdHelperData->paui32UpdateValue = paui32UpdateValue;
+
+ /* Server sync data */
+ psCmdHelperData->ui32ServerSyncCount = ui32ServerSyncCount;
+ psCmdHelperData->paui32ServerSyncFlags = paui32ServerSyncFlags;
+ psCmdHelperData->ui32ServerSyncFlagMask = ui32ServerSyncFlagMask;
+ psCmdHelperData->papsServerSyncs = papsServerSyncs;
+
+ /* Command data */
+ psCmdHelperData->ui32CmdSize = ui32CmdSize;
+ psCmdHelperData->pui8DMCmd = pui8DMCmd;
+ psCmdHelperData->eType = eType;
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+ "%s Command Server Init on FWCtx %08x", pszCommandName,
+ FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr);
+
+ /* Init the generated data members */
+ psCmdHelperData->ui32ServerFenceCount = 0;
+ psCmdHelperData->ui32ServerUpdateCount = 0;
+ psCmdHelperData->ui32ServerUnfencedUpdateCount = 0;
+ psCmdHelperData->ui32PreTimeStampCmdSize = 0;
+ psCmdHelperData->ui32PostTimeStampCmdSize = 0;
+ psCmdHelperData->ui32RMWUFOCmdSize = 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Workload Data added */
+ psCmdHelperData->psWorkEstKickData = psWorkEstKickData;
+#endif
+
+ if (ppPreAddr && (ppPreAddr->ui32Addr != 0))
+ {
+
+ psCmdHelperData->pPreTimestampAddr = *ppPreAddr;
+ psCmdHelperData->ui32PreTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
+ + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1));
+ }
+
+ if (ppPostAddr && (ppPostAddr->ui32Addr != 0))
+ {
+ psCmdHelperData->pPostTimestampAddr = *ppPostAddr;
+ psCmdHelperData->ui32PostTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
+ + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1));
+ }
+
+ if (ppRMWUFOAddr && (ppRMWUFOAddr->ui32Addr != 0))
+ {
+ psCmdHelperData->pRMWUFOAddr = * ppRMWUFOAddr;
+ psCmdHelperData->ui32RMWUFOCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_UFO);
+ }
+
+
+ /* Workout how many fences and updates this command will have */
+ for (i = 0; i < ui32ServerSyncCount; i++)
+ {
+ IMG_UINT32 ui32Flag = paui32ServerSyncFlags[i] & ui32ServerSyncFlagMask;
+
+ if (ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+ {
+ /* Server syncs must fence */
+ psCmdHelperData->ui32ServerFenceCount++;
+ }
+
+ /* If it is an update */
+ if (ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+ {
+ /* is it a fenced update or a progress update (a.k.a unfenced update) ?*/
+ if ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE) == PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE)
+ {
+ /* it is a progress update */
+ psCmdHelperData->ui32ServerUnfencedUpdateCount++;
+ }
+ else
+ {
+ /* it is a fenced update */
+ psCmdHelperData->ui32ServerUpdateCount++;
+ }
+ }
+ }
+
+
+ /* Total fence command size (header plus command data) */
+ ui32FenceCount = ui32ClientFenceCount + psCmdHelperData->ui32ServerFenceCount;
+ if (ui32FenceCount)
+ {
+ psCmdHelperData->ui32FenceCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32FenceCount * sizeof(RGXFWIF_UFO)) +
+ sizeof(RGXFWIF_CCB_CMD_HEADER));
+ }
+ else
+ {
+ psCmdHelperData->ui32FenceCmdSize = 0;
+ }
+
+ /* Total DM command size (header plus command data) */
+ psCmdHelperData->ui32DMCmdSize = RGX_CCB_FWALLOC_ALIGN(ui32CmdSize +
+ sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+ /* Total update command size (header plus command data) */
+ ui32UpdateCount = ui32ClientUpdateCount + psCmdHelperData->ui32ServerUpdateCount;
+ if (ui32UpdateCount)
+ {
+ psCmdHelperData->ui32UpdateCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32UpdateCount * sizeof(RGXFWIF_UFO)) +
+ sizeof(RGXFWIF_CCB_CMD_HEADER));
+ }
+ else
+ {
+ psCmdHelperData->ui32UpdateCmdSize = 0;
+ }
+
+ /* Total unfenced update command size (header plus command data) */
+ if (psCmdHelperData->ui32ServerUnfencedUpdateCount != 0)
+ {
+ psCmdHelperData->ui32UnfencedUpdateCmdSize = RGX_CCB_FWALLOC_ALIGN((psCmdHelperData->ui32ServerUnfencedUpdateCount * sizeof(RGXFWIF_UFO)) +
+ sizeof(RGXFWIF_CCB_CMD_HEADER));
+ }
+ else
+ {
+ psCmdHelperData->ui32UnfencedUpdateCmdSize = 0;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+/*
+ Reserve space in the CCB and fill in the command and client sync data
+*/
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData)
+{
+ IMG_UINT32 ui32AllocSize = 0;
+ IMG_UINT32 i;
+ IMG_UINT8 *pui8StartPtr;
+ PVRSRV_ERROR eError;
+
+ /*
+ Workout how much space we need for all the command(s)
+ */
+ ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+
+
+ for (i = 0; i < ui32CmdCount; i++)
+ {
+ if ((asCmdHelperData[0].ui32PDumpFlags ^ asCmdHelperData[i].ui32PDumpFlags) & PDUMP_FLAGS_CONTINUOUS)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PDump continuous is not consistent (%s != %s) for command %d",
+ __FUNCTION__,
+ PDUMP_IS_CONTINUOUS(asCmdHelperData[0].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE",
+ PDUMP_IS_CONTINUOUS(asCmdHelperData[i].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE",
+ ui32CmdCount));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ /*
+ Acquire space in the CCB for all the command(s).
+ */
+ eError = RGXAcquireCCB(asCmdHelperData[0].psClientCCB,
+ ui32AllocSize,
+ (void **)&pui8StartPtr,
+ asCmdHelperData[0].ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ /*
+ For each command fill in the fence, DM, and update command
+
+ Note:
+ We only fill in the client fences here, the server fences (and updates)
+ will be filled in together at the end. This is because we might fail the
+ kernel CCB alloc and would then have to rollback the server syncs if
+ we took the operation here
+ */
+ for (i = 0; i < ui32CmdCount; i++)
+ {
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = & asCmdHelperData[i];
+ IMG_UINT8 *pui8CmdPtr;
+ IMG_UINT8 *pui8ServerFenceStart = 0;
+ IMG_UINT8 *pui8ServerUpdateStart = 0;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CtxAddr = FWCommonContextGetFWAddress(asCmdHelperData->psClientCCB->psServerCommonContext).ui32Addr;
+ IMG_UINT32 ui32CcbWoff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(asCmdHelperData->psClientCCB->psServerCommonContext));
+#endif
+
+ if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+ {
+ PDUMPCOMMENT("Start of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+ }
+
+
+
+ /*
+ Create the fence command.
+ */
+ if (psCmdHelperData->ui32FenceCmdSize)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psHeader;
+ IMG_UINT k;
+
+ /* Fences are at the start of the command */
+ pui8CmdPtr = pui8StartPtr;
+
+ psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FENCE;
+ psHeader->ui32CmdSize = psCmdHelperData->ui32FenceCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ psHeader->sWorkloadDataFWAddr.ui32Addr = 0;
+ psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64DeadlineInus = 0;
+ psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+
+ pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ /* Fill in the client fences */
+ for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++)
+ {
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8CmdPtr;
+
+ psUFOPtr->puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k];
+ psUFOPtr->ui32Value = psCmdHelperData->paui32FenceValue[k];
+ pui8CmdPtr += sizeof(RGXFWIF_UFO);
+
+#if defined SYNC_COMMAND_DEBUG
+ PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x",
+ psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+ PDUMPCOMMENT(".. %s client sync fence - 0x%x -> 0x%x",
+ psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+
+ }
+ pui8ServerFenceStart = pui8CmdPtr;
+ }
+
+ /* jump over the Server fences */
+ pui8CmdPtr = pui8StartPtr + psCmdHelperData->ui32FenceCmdSize;
+
+
+ /*
+ Create the pre DM timestamp commands. Pre and Post timestamp commands are supposed to
+ sandwich the DM cmd. The padding code with the CCB wrap upsets the FW if we don't have
+ the task type bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+ */
+ if (psCmdHelperData->ui32PreTimeStampCmdSize != 0)
+ {
+ RGXWriteTimestampCommand(& pui8CmdPtr,
+ RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP,
+ psCmdHelperData->pPreTimestampAddr);
+ }
+
+ /*
+ Create the DM command
+ */
+ if (psCmdHelperData->ui32DMCmdSize)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psHeader;
+
+ psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+ psHeader->eCmdType = psCmdHelperData->eType;
+ psHeader->ui32CmdSize = psCmdHelperData->ui32DMCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ psHeader->sWorkloadDataFWAddr.ui32Addr = 0;
+
+ if(psCmdHelperData->psWorkEstKickData != NULL)
+ {
+ PVR_ASSERT(psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_TA ||
+ psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_3D);
+ psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData;
+ }
+ else
+ {
+ psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64DeadlineInus = 0;
+ psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+ }
+#endif
+ pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ /* The buffer is write-combine, so no special device memory treatment required. */
+ OSCachedMemCopy(pui8CmdPtr, psCmdHelperData->pui8DMCmd, psCmdHelperData->ui32CmdSize);
+ pui8CmdPtr += psCmdHelperData->ui32CmdSize;
+ }
+
+ if (psCmdHelperData->ui32PostTimeStampCmdSize != 0)
+ {
+ RGXWriteTimestampCommand(& pui8CmdPtr,
+ RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP,
+ psCmdHelperData->pPostTimestampAddr);
+ }
+
+
+ if (psCmdHelperData->ui32RMWUFOCmdSize != 0)
+ {
+ RGXFWIF_CCB_CMD_HEADER * psHeader;
+ RGXFWIF_UFO * psUFO;
+
+ psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE;
+ psHeader->ui32CmdSize = psCmdHelperData->ui32RMWUFOCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+
+ psHeader->sWorkloadDataFWAddr.ui32Addr = 0;
+ psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64DeadlineInus = 0;
+ psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+ pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ psUFO = (RGXFWIF_UFO *) pui8CmdPtr;
+ psUFO->puiAddrUFO = psCmdHelperData->pRMWUFOAddr;
+
+ pui8CmdPtr += sizeof(RGXFWIF_UFO);
+ }
+
+
+ /*
+ Create the update command.
+
+ Note:
+ We only fill in the client updates here, the server updates (and fences)
+ will be filled in together at the end
+ */
+ if (psCmdHelperData->ui32UpdateCmdSize)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psHeader;
+ IMG_UINT k;
+
+ psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UPDATE;
+ psHeader->ui32CmdSize = psCmdHelperData->ui32UpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ psHeader->sWorkloadDataFWAddr.ui32Addr = 0;
+ psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64DeadlineInus = 0;
+ psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+ pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ /* Fill in the client updates */
+ for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++)
+ {
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8CmdPtr;
+
+ psUFOPtr->puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k];
+ psUFOPtr->ui32Value = psCmdHelperData->paui32UpdateValue[k];
+ pui8CmdPtr += sizeof(RGXFWIF_UFO);
+
+#if defined SYNC_COMMAND_DEBUG
+ PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x",
+ psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+ PDUMPCOMMENT(".. %s client sync update - 0x%x -> 0x%x",
+ psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+ }
+ pui8ServerUpdateStart = pui8CmdPtr;
+ }
+
+ /* Save the server sync fence & update offsets for submit time */
+ psCmdHelperData->pui8ServerFenceStart = pui8ServerFenceStart;
+ psCmdHelperData->pui8ServerUpdateStart = pui8ServerUpdateStart;
+
+ /* jump over the fenced update */
+ if (psCmdHelperData->ui32UnfencedUpdateCmdSize != 0)
+ {
+ RGXFWIF_CCB_CMD_HEADER * const psHeader = (RGXFWIF_CCB_CMD_HEADER * ) psCmdHelperData->pui8ServerUpdateStart + psCmdHelperData->ui32UpdateCmdSize;
+ /* set up the header for unfenced updates, */
+ PVR_ASSERT(psHeader); /* Could be zero if ui32UpdateCmdSize is 0 which is never expected */
+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE;
+ psHeader->ui32CmdSize = psCmdHelperData->ui32UnfencedUpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ psHeader->sWorkloadDataFWAddr.ui32Addr = 0;
+ psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64DeadlineInus = 0;
+ psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+
+ /* jump over the header */
+ psCmdHelperData->pui8ServerUnfencedUpdateStart = ((IMG_UINT8*) psHeader) + sizeof(RGXFWIF_CCB_CMD_HEADER);
+ }
+ else
+ {
+ psCmdHelperData->pui8ServerUnfencedUpdateStart = NULL;
+ }
+
+ /* Save start for sanity checking at submit time */
+ psCmdHelperData->pui8StartPtr = pui8StartPtr;
+
+ /* Set the start pointer for the next iteration around the loop */
+ pui8StartPtr +=
+ psCmdHelperData->ui32FenceCmdSize +
+ psCmdHelperData->ui32PreTimeStampCmdSize +
+ psCmdHelperData->ui32DMCmdSize +
+ psCmdHelperData->ui32PostTimeStampCmdSize +
+ psCmdHelperData->ui32RMWUFOCmdSize +
+ psCmdHelperData->ui32UpdateCmdSize +
+ psCmdHelperData->ui32UnfencedUpdateCmdSize;
+
+ if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+ {
+ PDUMPCOMMENT("End of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+ }
+ else
+ {
+ PDUMPCOMMENT("No %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+/*
+ Fill in the server syncs data and release the CCB space
+*/
+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+ const IMG_CHAR *pcszDMName,
+ IMG_UINT32 ui32CtxAddr)
+{
+ IMG_UINT32 ui32AllocSize = 0;
+ IMG_UINT32 i;
+#if defined(LINUX)
+ IMG_BOOL bTraceChecks = trace_rogue_are_fence_checks_traced();
+ IMG_BOOL bTraceUpdates = trace_rogue_are_fence_updates_traced();
+#endif
+
+ /*
+ Workout how much space we need for all the command(s)
+ */
+ ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+
+ /*
+ For each command fill in the server sync info
+ */
+ for (i=0;i<ui32CmdCount;i++)
+ {
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i];
+ IMG_UINT8 *pui8ServerFenceStart = psCmdHelperData->pui8ServerFenceStart;
+ IMG_UINT8 *pui8ServerUpdateStart = psCmdHelperData->pui8ServerUpdateStart;
+ IMG_UINT8 *pui8ServerUnfencedUpdateStart = psCmdHelperData->pui8ServerUnfencedUpdateStart;
+ IMG_UINT32 j;
+
+ /* Now fill in the server fence and updates together */
+ for (j = 0; j < psCmdHelperData->ui32ServerSyncCount; j++)
+ {
+ RGXFWIF_UFO *psUFOPtr;
+ IMG_UINT32 ui32UpdateValue;
+ IMG_UINT32 ui32FenceValue;
+ IMG_UINT32 ui32SyncAddr;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Flag = psCmdHelperData->paui32ServerSyncFlags[j] & psCmdHelperData->ui32ServerSyncFlagMask;
+ IMG_BOOL bFence = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)!=0)?IMG_TRUE:IMG_FALSE;
+ IMG_BOOL bUpdate = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)!=0)?IMG_TRUE:IMG_FALSE;
+ const IMG_BOOL bUnfencedUpdate = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE) == PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE)
+ ? IMG_TRUE
+ : IMG_FALSE;
+
+ eError = PVRSRVServerSyncQueueHWOpKM(psCmdHelperData->papsServerSyncs[j],
+ bUpdate,
+ &ui32FenceValue,
+ &ui32UpdateValue);
+ /* This function can't fail */
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /*
+ As server syncs always fence (we have a check in RGXCmcdHelperInitCmdCCB
+ which ensures the client is playing ball) the filling in of the fence
+ is unconditional.
+ */
+ eError = ServerSyncGetFWAddr(psCmdHelperData->papsServerSyncs[j], &ui32SyncAddr);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to read Server Sync FW address (%d)",
+ __FUNCTION__, eError));
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ if (bFence)
+ {
+ PVR_ASSERT(pui8ServerFenceStart != 0);
+
+ psUFOPtr = (RGXFWIF_UFO *) pui8ServerFenceStart;
+ psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+ psUFOPtr->ui32Value = ui32FenceValue;
+ pui8ServerFenceStart += sizeof(RGXFWIF_UFO);
+
+#if defined(LINUX)
+ if (bTraceChecks)
+ {
+ trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
+ pcszDMName,
+ ui32CtxAddr,
+ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+ 1,
+ &psUFOPtr->puiAddrUFO,
+ &psUFOPtr->ui32Value);
+ }
+#endif
+ }
+
+ /* If there is an update then fill that in as well */
+ if (bUpdate)
+ {
+ if (bUnfencedUpdate)
+ {
+ PVR_ASSERT(pui8ServerUnfencedUpdateStart != 0);
+
+ psUFOPtr = (RGXFWIF_UFO *) pui8ServerUnfencedUpdateStart;
+ psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+ psUFOPtr->ui32Value = ui32UpdateValue;
+ pui8ServerUnfencedUpdateStart += sizeof(RGXFWIF_UFO);
+ }
+ else
+ {
+ /* fenced update */
+ PVR_ASSERT(pui8ServerUpdateStart != 0);
+
+ psUFOPtr = (RGXFWIF_UFO *) pui8ServerUpdateStart;
+ psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+ psUFOPtr->ui32Value = ui32UpdateValue;
+ pui8ServerUpdateStart += sizeof(RGXFWIF_UFO);
+ }
+#if defined(LINUX)
+ if (bTraceUpdates)
+ {
+ trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
+ pcszDMName,
+ ui32CtxAddr,
+ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+ 1,
+ &psUFOPtr->puiAddrUFO,
+ &psUFOPtr->ui32Value);
+ }
+#endif
+
+#if defined(NO_HARDWARE)
+ /*
+ There is no FW so the host has to do any Sync updates
+ (client sync updates are done in the client
+ */
+ PVRSRVServerSyncPrimSetKM(psCmdHelperData->papsServerSyncs[j], ui32UpdateValue);
+#endif
+ }
+ }
+
+#if defined(LINUX)
+ if (bTraceChecks)
+ {
+ trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
+ pcszDMName,
+ ui32CtxAddr,
+ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+ psCmdHelperData->ui32ClientFenceCount,
+ psCmdHelperData->pauiFenceUFOAddress,
+ psCmdHelperData->paui32FenceValue);
+ }
+ if (bTraceUpdates)
+ {
+ trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
+ pcszDMName,
+ ui32CtxAddr,
+ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+ psCmdHelperData->ui32ClientUpdateCount,
+ psCmdHelperData->pauiUpdateUFOAddress,
+ psCmdHelperData->paui32UpdateValue);
+ }
+#endif
+
+ if (psCmdHelperData->ui32ServerSyncCount)
+ {
+ /*
+ Do some sanity checks to ensure we did the point math right
+ */
+ if (pui8ServerFenceStart != 0)
+ {
+ PVR_ASSERT(pui8ServerFenceStart ==
+ (psCmdHelperData->pui8StartPtr +
+ psCmdHelperData->ui32FenceCmdSize));
+ }
+
+ if (pui8ServerUpdateStart != 0)
+ {
+ PVR_ASSERT(pui8ServerUpdateStart ==
+ psCmdHelperData->pui8StartPtr +
+ psCmdHelperData->ui32FenceCmdSize +
+ psCmdHelperData->ui32PreTimeStampCmdSize +
+ psCmdHelperData->ui32DMCmdSize +
+ psCmdHelperData->ui32RMWUFOCmdSize +
+ psCmdHelperData->ui32PostTimeStampCmdSize +
+ psCmdHelperData->ui32UpdateCmdSize);
+ }
+
+ if (pui8ServerUnfencedUpdateStart != 0)
+ {
+ PVR_ASSERT(pui8ServerUnfencedUpdateStart ==
+ psCmdHelperData->pui8StartPtr +
+ psCmdHelperData->ui32FenceCmdSize +
+ psCmdHelperData->ui32PreTimeStampCmdSize +
+ psCmdHelperData->ui32DMCmdSize +
+ psCmdHelperData->ui32RMWUFOCmdSize +
+ psCmdHelperData->ui32PostTimeStampCmdSize +
+ psCmdHelperData->ui32UpdateCmdSize +
+ psCmdHelperData->ui32UnfencedUpdateCmdSize);
+ }
+ }
+
+ /*
+ All the commands have been filled in so release the CCB space.
+ The FW still won't run this command until we kick it
+ */
+ PDUMPCOMMENTWITHFLAGS(psCmdHelperData->ui32PDumpFlags,
+ "%s Command Server Release on FWCtx %08x",
+ psCmdHelperData->pszCommandName, ui32CtxAddr);
+ }
+
+ _RGXClientCCBDumpCommands(asCmdHelperData[0].psClientCCB,
+ asCmdHelperData[0].psClientCCB->ui32HostWriteOffset,
+ ui32AllocSize);
+
+ RGXReleaseCCB(asCmdHelperData[0].psClientCCB,
+ ui32AllocSize,
+ asCmdHelperData[0].ui32PDumpFlags);
+}
+
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData)
+{
+ IMG_UINT32 ui32AllocSize = 0;
+ IMG_UINT32 i;
+
+ /*
+ Workout how much space we need for all the command(s)
+ */
+ for (i = 0; i < ui32CmdCount; i++)
+ {
+ ui32AllocSize +=
+ asCmdHelperData[i].ui32FenceCmdSize +
+ asCmdHelperData[i].ui32DMCmdSize +
+ asCmdHelperData[i].ui32UpdateCmdSize +
+ asCmdHelperData[i].ui32UnfencedUpdateCmdSize +
+ asCmdHelperData[i].ui32PreTimeStampCmdSize +
+ asCmdHelperData[i].ui32PostTimeStampCmdSize +
+ asCmdHelperData[i].ui32RMWUFOCmdSize;
+ }
+
+ return ui32AllocSize;
+}
+
+/* Work out how much of an offset there is to a specific command. */
+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+ IMG_UINT32 ui32Cmdindex)
+{
+ IMG_UINT32 ui32Offset = 0;
+ IMG_UINT32 i;
+
+ for (i = 0; i < ui32Cmdindex; i++)
+ {
+ ui32Offset +=
+ asCmdHelperData[i].ui32FenceCmdSize +
+ asCmdHelperData[i].ui32DMCmdSize +
+ asCmdHelperData[i].ui32UpdateCmdSize +
+ asCmdHelperData[i].ui32UnfencedUpdateCmdSize +
+ asCmdHelperData[i].ui32PreTimeStampCmdSize +
+ asCmdHelperData[i].ui32PostTimeStampCmdSize +
+ asCmdHelperData[i].ui32RMWUFOCmdSize;
+ }
+
+ return ui32Offset;
+}
+
+/* Returns the offset of the data master command from a write offset */
+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData)
+{
+ return psCmdHelperData->ui32FenceCmdSize + psCmdHelperData->ui32PreTimeStampCmdSize;
+}
+
+
+static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType)
+{
+ switch (cmdType)
+ {
+ case RGXFWIF_CCB_CMD_TYPE_TA: return "TA";
+ case RGXFWIF_CCB_CMD_TYPE_3D: return "3D";
+ case RGXFWIF_CCB_CMD_TYPE_CDM: return "CDM";
+ case RGXFWIF_CCB_CMD_TYPE_TQ_3D: return "TQ_3D";
+ case RGXFWIF_CCB_CMD_TYPE_TQ_2D: return "TQ_2D";
+ case RGXFWIF_CCB_CMD_TYPE_3D_PR: return "3D_PR";
+ case RGXFWIF_CCB_CMD_TYPE_NULL: return "NULL";
+ case RGXFWIF_CCB_CMD_TYPE_SHG: return "SHG";
+ case RGXFWIF_CCB_CMD_TYPE_RTU: return "RTU";
+ case RGXFWIF_CCB_CMD_TYPE_RTU_FC: return "RTU_FC";
+ case RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP: return "PRE_TIMESTAMP";
+ case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: return "TQ_TDM";
+
+ case RGXFWIF_CCB_CMD_TYPE_FENCE: return "FENCE";
+ case RGXFWIF_CCB_CMD_TYPE_UPDATE: return "UPDATE";
+ case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: return "RMW_UPDATE";
+ case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: return "FENCE_PR";
+ case RGXFWIF_CCB_CMD_TYPE_PRIORITY: return "PRIORITY";
+
+ case RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP: return "POST_TIMESTAMP";
+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: return "UNFENCED_UPDATE";
+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: return "UNFENCED_RMW_UPDATE";
+
+ case RGXFWIF_CCB_CMD_TYPE_PADDING: return "PADDING";
+
+ default:
+ PVR_ASSERT(IMG_FALSE);
+ break;
+ }
+
+ return "INVALID";
+}
+
+PVRSRV_ERROR CheckForStalledCCB(RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM)
+{
+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl;
+ IMG_UINT32 ui32SampledRdOff, ui32SampledWrOff;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (psCurrentClientCCB == NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB is NULL"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+ ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+ ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+
+ if (ui32SampledRdOff > psClientCCBCtrl->ui32WrapMask ||
+ ui32SampledWrOff > psClientCCBCtrl->ui32WrapMask)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has invalid offset (ROFF=%d WOFF=%d)",
+ ui32SampledRdOff, ui32SampledWrOff));
+ return PVRSRV_ERROR_INVALID_OFFSET;
+ }
+
+ if (ui32SampledRdOff != ui32SampledWrOff &&
+ psCurrentClientCCB->ui32LastROff != psCurrentClientCCB->ui32LastWOff &&
+ ui32SampledRdOff == psCurrentClientCCB->ui32LastROff &&
+ (psCurrentClientCCB->ui32ByteCount - psCurrentClientCCB->ui32LastByteCount) < psCurrentClientCCB->ui32Size)
+ {
+ //RGXFWIF_DEV_VIRTADDR v = {0};
+ //DumpStalledCCBCommand(v,psCurrentClientCCB,NULL);
+
+ /* Don't log this by default unless debugging since a higher up
+ * function will log the stalled condition. Helps avoid double
+ * messages in the log.
+ */
+ PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has not progressed (ROFF=%d WOFF=%d) for DM: %s",
+ ui32SampledRdOff, ui32SampledWrOff, RGXStringifyKickTypeDM(eKickTypeDM)));
+ eError = PVRSRV_ERROR_CCCB_STALLED;
+ }
+
+ psCurrentClientCCB->ui32LastROff = ui32SampledRdOff;
+ psCurrentClientCCB->ui32LastWOff = ui32SampledWrOff;
+ psCurrentClientCCB->ui32LastByteCount = psCurrentClientCCB->ui32ByteCount;
+
+ return eError;
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+ RGX_CLIENT_CCB *psCurrentClientCCB,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+#endif
+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+ IMG_UINT8 *pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB;
+ IMG_UINT32 ui32Offset = psClientCCBCtrl->ui32ReadOffset;
+ IMG_UINT32 ui32DepOffset = psClientCCBCtrl->ui32DepOffset;
+ IMG_UINT32 ui32EndOffset = psCurrentClientCCB->ui32HostWriteOffset;
+ IMG_UINT32 ui32WrapMask = psClientCCBCtrl->ui32WrapMask;
+ IMG_CHAR * pszState = "Ready";
+
+ PVR_DUMPDEBUG_LOG("FWCtx 0x%08X (%s)", sFWCommonContext.ui32Addr,
+ (IMG_PCHAR)&psCurrentClientCCB->szName);
+ if (ui32Offset == ui32EndOffset)
+ {
+ PVR_DUMPDEBUG_LOG(" `--<Empty>");
+ }
+
+ while (ui32Offset != ui32EndOffset)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psCmdHeader = (RGXFWIF_CCB_CMD_HEADER*)(pui8ClientCCBBuff + ui32Offset);
+ IMG_UINT32 ui32NextOffset = (ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) & ui32WrapMask;
+ IMG_BOOL bLastCommand = (ui32NextOffset == ui32EndOffset)? IMG_TRUE: IMG_FALSE;
+ IMG_BOOL bLastUFO;
+ #define CCB_SYNC_INFO_LEN 80
+ IMG_CHAR pszSyncInfo[CCB_SYNC_INFO_LEN];
+ IMG_UINT32 ui32NoOfUpdates, i;
+ RGXFWIF_UFO *psUFOPtr;
+
+ ui32NoOfUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+ psUFOPtr = (RGXFWIF_UFO*)(pui8ClientCCBBuff + ui32Offset + sizeof(RGXFWIF_CCB_CMD_HEADER));
+ pszSyncInfo[0] = '\0';
+
+ if (ui32Offset == ui32DepOffset)
+ {
+ pszState = "Waiting";
+ }
+
+ PVR_DUMPDEBUG_LOG(" %s--%s %s @ %u Int=%u Ext=%u",
+ bLastCommand? "`": "|",
+ pszState, _CCBCmdTypename(psCmdHeader->eCmdType),
+ ui32Offset, psCmdHeader->ui32IntJobRef, psCmdHeader->ui32ExtJobRef
+ );
+
+ /* switch on type and write checks and updates */
+ switch (psCmdHeader->eCmdType)
+ {
+ case RGXFWIF_CCB_CMD_TYPE_UPDATE:
+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE:
+ case RGXFWIF_CCB_CMD_TYPE_FENCE:
+ case RGXFWIF_CCB_CMD_TYPE_FENCE_PR:
+ {
+ for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++)
+ {
+ bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE;
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+ pszSyncInfo, CCB_SYNC_INFO_LEN);
+#endif
+ PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val=0x%08x %s",
+ bLastCommand? " ": "|",
+ bLastUFO? "`": "|",
+ psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value,
+ pszSyncInfo
+ );
+ }
+ break;
+ }
+
+ case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE:
+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE:
+ {
+ for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++)
+ {
+ bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE;
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+ pszSyncInfo, CCB_SYNC_INFO_LEN);
+#endif
+ PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val++ %s",
+ bLastCommand? " ": "|",
+ bLastUFO? "`": "|",
+ psUFOPtr->puiAddrUFO.ui32Addr,
+ pszSyncInfo
+ );
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ ui32Offset = ui32NextOffset;
+ }
+
+}
+#endif /* defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP) */
+
+void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+ RGX_CLIENT_CCB *psCurrentClientCCB,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+ IMG_UINT8 *pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB;
+ volatile IMG_UINT8 *pui8Ptr;
+ IMG_UINT32 ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+ IMG_UINT32 ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset;
+ IMG_UINT32 ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+
+ pui8Ptr = pui8ClientCCBBuff + ui32SampledRdOff;
+
+ if ((ui32SampledRdOff == ui32SampledDepOff) &&
+ (ui32SampledRdOff != ui32SampledWrOff))
+ {
+ volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)(pui8ClientCCBBuff + ui32SampledRdOff);
+ RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType;
+ volatile IMG_UINT8 *pui8Ptr = (IMG_UINT8 *)psCommandHeader;
+
+ /* CCB is stalled on a fence... */
+ if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR))
+ {
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *)(pui8Ptr + sizeof(*psCommandHeader));
+ IMG_UINT32 jj;
+
+ /* Display details of the fence object on which the context is pending */
+ PVR_DUMPDEBUG_LOG("FWCtx 0x%08X @ %d (%s) pending on %s:",
+ sFWCommonContext.ui32Addr,
+ ui32SampledRdOff,
+ (IMG_PCHAR)&psCurrentClientCCB->szName,
+ _CCBCmdTypename(eCommandType));
+ for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+ {
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value);
+#else
+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+ psUFOPtr[jj].puiAddrUFO.ui32Addr,
+ psUFOPtr[jj].ui32Value,
+ RGXReadWithSP(psUFOPtr[jj].puiAddrUFO.ui32Addr));
+#endif
+ }
+
+ /* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */
+ pui8Ptr = (IMG_UINT8 *)psUFOPtr + psCommandHeader->ui32CmdSize;
+ psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr;
+ if( (uintptr_t)psCommandHeader != ((uintptr_t)pui8ClientCCBBuff + ui32SampledWrOff))
+ {
+ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType));
+ /* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */
+ pui8Ptr += sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize;
+ psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr;
+ /* If the next command is an update, display details of that so we can see what would then become unblocked */
+ if( (uintptr_t)psCommandHeader != ((uintptr_t)pui8ClientCCBBuff + ui32SampledWrOff))
+ {
+ eCommandType = psCommandHeader->eCmdType;
+
+ if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+ {
+ psUFOPtr = (RGXFWIF_UFO *)((IMG_UINT8 *)psCommandHeader + sizeof(*psCommandHeader));
+ PVR_DUMPDEBUG_LOG(" preventing %s:",_CCBCmdTypename(eCommandType));
+ for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+ {
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value);
+#else
+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+ psUFOPtr[jj].puiAddrUFO.ui32Addr,
+ psUFOPtr[jj].ui32Value,
+ RGXReadWithSP(psUFOPtr[jj].puiAddrUFO.ui32Addr));
+#endif
+ }
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr);
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr);
+ }
+ }
+ }
+}
+
+/******************************************************************************
+ End of file (rgxccb.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX Circular Command Buffer functionality.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX Circular Command Buffer functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXCCB_H__)
+#define __RGXCCB_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgx_fwif_shared.h"
+#include "rgxdebug.h"
+#include "rgxdefs_km.h"
+#include "pvr_notifier.h"
+
+#define MAX_CLIENT_CCB_NAME 30
+#define SYNC_FLAG_MASK_ALL IMG_UINT32_MAX
+
+typedef struct _RGX_CLIENT_CCB_ RGX_CLIENT_CCB;
+
+/*
+ This structure is declared here as it's allocated on the heap by
+ the callers
+*/
+
+typedef struct _RGX_CCB_CMD_HELPER_DATA_ {
+ /* Data setup at command init time */
+ RGX_CLIENT_CCB *psClientCCB;
+ IMG_CHAR *pszCommandName;
+ IMG_UINT32 ui32PDumpFlags;
+
+ IMG_UINT32 ui32ClientFenceCount;
+ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress;
+ IMG_UINT32 *paui32FenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress;
+ IMG_UINT32 *paui32UpdateValue;
+
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 *paui32ServerSyncFlags;
+ IMG_UINT32 ui32ServerSyncFlagMask;
+ SERVER_SYNC_PRIMITIVE **papsServerSyncs;
+
+ RGXFWIF_CCB_CMD_TYPE eType;
+ IMG_UINT32 ui32CmdSize;
+ IMG_UINT8 *pui8DMCmd;
+ IMG_UINT32 ui32FenceCmdSize;
+ IMG_UINT32 ui32DMCmdSize;
+ IMG_UINT32 ui32UpdateCmdSize;
+ IMG_UINT32 ui32UnfencedUpdateCmdSize;
+
+ /* timestamp commands */
+ PRGXFWIF_TIMESTAMP_ADDR pPreTimestampAddr;
+ IMG_UINT32 ui32PreTimeStampCmdSize;
+ PRGXFWIF_TIMESTAMP_ADDR pPostTimestampAddr;
+ IMG_UINT32 ui32PostTimeStampCmdSize;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+ IMG_UINT32 ui32RMWUFOCmdSize;
+
+ /* Data setup at command acquire time */
+ IMG_UINT8 *pui8StartPtr;
+ IMG_UINT8 *pui8ServerUpdateStart;
+ IMG_UINT8 *pui8ServerUnfencedUpdateStart;
+ IMG_UINT8 *pui8ServerFenceStart;
+ IMG_UINT32 ui32ServerFenceCount;
+ IMG_UINT32 ui32ServerUpdateCount;
+ IMG_UINT32 ui32ServerUnfencedUpdateCount;
+
+ /* Job reference fields */
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_UINT32 ui32IntJobRef;
+
+ /* Workload kick information */
+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData;
+} RGX_CCB_CMD_HELPER_DATA;
+
+#define PADDING_COMMAND_SIZE (sizeof(RGXFWIF_CCB_CMD_HEADER))
+
+
+#define RGX_CCB_REQUESTORS(TYPE) \
+ /* for debugging purposes */ TYPE(UNDEF) \
+ TYPE(TA) \
+ TYPE(3D) \
+ TYPE(CDM) \
+ TYPE(SH) \
+ TYPE(RS) \
+ TYPE(TQ_3D) \
+ TYPE(TQ_2D) \
+ TYPE(TQ_TDM) \
+ TYPE(KICKSYNC) \
+ /* Only used for validating the number of entries in this list */ TYPE(FIXED_COUNT) \
+ TYPE(FC0) \
+ TYPE(FC1) \
+ TYPE(FC2) \
+ TYPE(FC3) \
+
+/* Forms an enum constant for each type present in RGX_CCB_REQUESTORS list. The enum is mainly used as
+ an index to the aszCCBRequestors table defined in rgxccb.c. The total number of enums must adhere
+ to the following build assert.
+*/
+typedef enum _RGX_CCB_REQUESTOR_TYPE_
+{
+#define CONSTRUCT_ENUM(req) REQ_TYPE_##req,
+ RGX_CCB_REQUESTORS (CONSTRUCT_ENUM)
+#undef CONSTRUCT_ENUM
+
+ /* should always be at the end */
+ REQ_TYPE_TOTAL_COUNT,
+} RGX_CCB_REQUESTOR_TYPE;
+
+/* The number of enum constants in the above table is always equal to those provided in the RGX_CCB_REQUESTORS X macro list.
+ In an event of change in value of DPX_MAX_RAY_CONTEXTS to say 'n', appropriate entry/entries up to FC[n-1] must be added to
+ the RGX_CCB_REQUESTORS list.
+*/
+static_assert(REQ_TYPE_TOTAL_COUNT == REQ_TYPE_FIXED_COUNT + DPX_MAX_RAY_CONTEXTS + 1,
+ "Mismatch between DPX_MAX_RAY_CONTEXTS and RGX_CCB_REQUESTOR_TYPE enum");
+
+/* Tuple describing the columns of the following table */
+typedef enum _RGX_CCB_REQUESTOR_TUPLE_
+{
+ REQ_RGX_FW_CLIENT_CCB_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCB for this requestor */
+ REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCBControl for this requestor */
+ REQ_PDUMP_COMMENT, /* Index to comment to be dumped in PDUMPs */
+
+ /* should always be at the end */
+ REQ_TUPLE_CARDINALITY,
+} RGX_CCB_REQUESTOR_TUPLE;
+
+/* Table containing an array of strings for each requestor type in the list of RGX_CCB_REQUESTORS. In addition to its use in
+ this module (rgxccb.c), this table is also used to access string to be dumped in PDUMP comments, hence, marking it extern for
+ use in other modules.
+*/
+extern IMG_CHAR *const aszCCBRequestors[][REQ_TUPLE_CARDINALITY];
+
+PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32CCBSizeLog2,
+ CONNECTION_DATA *psConnectionData,
+ RGX_CCB_REQUESTOR_TYPE eCCBRequestor,
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ RGX_CLIENT_CCB **ppsClientCCB,
+ DEVMEM_MEMDESC **ppsClientCCBMemDesc,
+ DEVMEM_MEMDESC **ppsClientCCBCtlMemDesc);
+
+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32CmdSize,
+ void **ppvBufferSpace,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32ClientFenceCount,
+ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress,
+ IMG_UINT32 *paui32FenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress,
+ IMG_UINT32 *paui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ IMG_UINT32 ui32ServerSyncFlagMask,
+ SERVER_SYNC_PRIMITIVE **papsServerSyncs,
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr,
+ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr,
+ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE eType,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32IntJobRef,
+ IMG_UINT32 ui32PDumpFlags,
+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+ IMG_CHAR *pszCommandName,
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData);
+
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData);
+
+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+ const IMG_CHAR *pcszDMName,
+ IMG_UINT32 ui32CtxAddr);
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData);
+
+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+ IMG_UINT32 ui32Cmdindex);
+
+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData);
+
+void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+ RGX_CLIENT_CCB *psCurrentClientCCB,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+ RGX_CLIENT_CCB *psCurrentClientCCB,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+#endif
+
+PVRSRV_ERROR CheckForStalledCCB(RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM);
+#endif /* __RGXCCB_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX Compute routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Compute routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "srvkm.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxcompute.h"
+#include "rgx_bvnc_defs_km.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "rgxtimerquery.h"
+#include "htbuffer.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_memallocflags.h"
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+struct _RGX_SERVER_COMPUTE_CONTEXT_ {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ DEVMEM_MEMDESC *psFWComputeContextStateMemDesc;
+ PVRSRV_CLIENT_SYNC_PRIM *psSync;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListFence;
+ SYNC_ADDR_LIST sSyncAddrListUpdate;
+ ATOMIC_T hJobId;
+};
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sMCUFenceAddr,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pbyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ IMG_DEV_VIRTADDR sServicesSignalAddr,
+ RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext;
+ RGX_COMMON_CONTEXT_INFO sInfo;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Prepare cleanup struct */
+ *ppsComputeContext = NULL;
+ psComputeContext = OSAllocZMem(sizeof(*psComputeContext));
+ if (psComputeContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psComputeContext->psDeviceNode = psDeviceNode;
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psComputeContext->psSync,
+ "compute cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ /*
+ Allocate device memory for the firmware GPU context suspend state.
+ Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware compute context suspend state");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_COMPUTECTX_STATE),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwComputeContextState",
+ &psComputeContext->psFWComputeContextStateMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+ eError));
+ goto fail_contextsuspendalloc;
+ }
+
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+ &psComputeContext->psFWFrameworkMemDesc,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate firmware GPU framework state (%u)",
+ eError));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psComputeContext->psFWFrameworkMemDesc,
+ pbyFrameworkCommand,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to populate the framework buffer (%u)",
+ eError));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc;
+ sInfo.psMCUFenceAddr = &sMCUFenceAddr;
+
+ if((psDevInfo->sDevFeatureCfg.ui32CtrlStreamFormat == 2) && \
+ (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK))
+ {
+ sInfo.psResumeSignalAddr = &sServicesSignalAddr;
+ }else
+ {
+ PVR_UNREFERENCED_PARAMETER(sServicesSignalAddr);
+ }
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_CDM,
+ RGXFWIF_DM_CDM,
+ NULL,
+ 0,
+ psFWMemContextMemDesc,
+ psComputeContext->psFWComputeContextStateMemDesc,
+ RGX_CDM_CCB_SIZE_LOG2,
+ ui32Priority,
+ &sInfo,
+ &psComputeContext->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+ SyncAddrListInit(&psComputeContext->sSyncAddrListFence);
+ SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate);
+
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+ }
+
+ *ppsComputeContext = psComputeContext;
+ return PVRSRV_OK;
+
+fail_contextalloc:
+fail_frameworkcopy:
+ DevmemFwFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+ DevmemFwFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+fail_contextsuspendalloc:
+ SyncPrimFree(psComputeContext->psSync);
+fail_syncalloc:
+ OSFreeMem(psComputeContext);
+ return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode,
+ psComputeContext->psServerCommonContext,
+ psComputeContext->psSync,
+ RGXFWIF_DM_CDM,
+ PDUMP_FLAGS_NONE);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free its resources */
+
+ OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+ dllist_remove_node(&(psComputeContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+
+ FWCommonContextFree(psComputeContext->psServerCommonContext);
+ DevmemFwFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+ DevmemFwFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+ SyncPrimFree(psComputeContext->psSync);
+ OSFreeMem(psComputeContext);
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceSyncOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ IMG_INT32 i32CheckFenceFD,
+ IMG_INT32 i32UpdateTimelineFD,
+ IMG_INT32 *pi32UpdateFenceFD,
+ IMG_CHAR pszUpdateFenceName[32],
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef)
+{
+ RGXFWIF_KCCB_CMD sCmpKCCBCmd;
+ RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1];
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32CDMCmdOffset = 0;
+ IMG_UINT32 ui32JobId;
+ IMG_UINT32 ui32FWCtx;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+ PRGXFWIF_UFO_ADDR *pauiClientFenceUFOAddress;
+ PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress;
+ IMG_INT32 i32UpdateFenceFD = -1;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ /* Android fd sync update info */
+ struct pvr_sync_append_data *psFDData = NULL;
+ if (i32UpdateTimelineFD >= 0 && !pi32UpdateFenceFD)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#else
+ if (i32UpdateTimelineFD >= 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Providing native sync timeline (%d) in non native sync enabled driver",
+ __func__, i32UpdateTimelineFD));
+ }
+ if (i32CheckFenceFD >= 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Providing native check sync (%d) in non native sync enabled driver",
+ __func__, i32CheckFenceFD));
+ }
+#endif
+ /* Ensure the string is null-terminated (Required for safety) */
+ pszUpdateFenceName[31] = '\0';
+
+ ui32JobId = OSAtomicIncrement(&psComputeContext->hJobId);
+
+ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOSyncPrimBlock,
+ paui32ClientFenceSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ pauiClientFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+
+ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOSyncPrimBlock,
+ paui32ClientUpdateSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ pauiClientUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+
+
+ /* Sanity check the server fences */
+ for (i=0;i<ui32ServerSyncPrims;i++)
+ {
+ if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on CDM) must fence", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ goto err_populate_sync_addr_list;
+ }
+ }
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ if (i32CheckFenceFD >= 0 || i32UpdateTimelineFD >= 0)
+ {
+ eError =
+ pvr_sync_append_fences(pszUpdateFenceName,
+ i32CheckFenceFD,
+ i32UpdateTimelineFD,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOAddress,
+ paui32ClientUpdateValue,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOAddress,
+ paui32ClientFenceValue,
+ &psFDData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_fdsync;
+ }
+ pvr_sync_get_updates(psFDData, &ui32ClientUpdateCount,
+ &pauiClientUpdateUFOAddress, &paui32ClientUpdateValue);
+
+ pvr_sync_get_checks(psFDData, &ui32ClientFenceCount,
+ &pauiClientFenceUFOAddress, &paui32ClientFenceValue);
+ }
+#endif /* SUPPORT_NATIVE_FENCE_SYNC */
+
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext),
+ ui32ClientFenceCount,
+ pauiClientFenceUFOAddress,
+ paui32ClientFenceValue,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOAddress,
+ paui32ClientUpdateValue,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ ui32CmdSize,
+ pui8DMCmd,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_CDM,
+ ui32ExtJobRef,
+ ui32JobId,
+ ui32PDumpFlags,
+ NULL,
+ "Compute",
+ asCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_cmdinit;
+ }
+
+ eError = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asCmdHelperData),
+ asCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_cmdaquire;
+ }
+
+
+ /*
+ We should reserved space in the kernel CCB here and fill in the command
+ directly.
+ This is so if there isn't space in the kernel CCB we can return with
+ retry back to services client before we take any operations
+ */
+
+ /*
+ We might only be kicking for flush out a padding packet so only submit
+ the command if the create was successful
+ */
+
+ /*
+ All the required resources are ready at this point, we can't fail so
+ take the required server sync operations and commit all the resources
+ */
+
+ ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr);
+
+ /* Construct the kernel compute CCB command. */
+ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext));
+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr;
+
+ HTBLOGK(HTB_SF_MAIN_KICK_CDM,
+ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui32CDMCmdOffset
+ );
+ RGX_HWPERF_HOST_ENQ(psComputeContext, OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx, ui32ExtJobRef, ui32JobId,
+ RGX_HWPERF_KICK_TYPE_CDM);
+
+ /*
+ * Submit the compute command to the firmware.
+ */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_CDM,
+ &sCmpKCCBCmd,
+ sizeof(sCmpKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKickCDMKM failed to schedule kernel CCB command. (0x%x)", eError));
+ }
+ else
+ {
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psComputeContext->psDeviceNode->pvDevice,
+ ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_CDM);
+#endif
+ }
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ if (i32UpdateTimelineFD >= 0)
+ {
+ /* If we get here, this should never fail. Hitting that likely implies
+ * a code error above */
+ i32UpdateFenceFD = pvr_sync_get_update_fd(psFDData);
+ if (i32UpdateFenceFD < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+ __FUNCTION__));
+ /* If we fail here, we cannot rollback the syncs as the hw already
+ * has references to resources they may be protecting in the kick
+ * so fallthrough */
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_cmdaquire;
+ }
+ }
+#if defined(NO_HARDWARE)
+ pvr_sync_nohw_complete_fences(psFDData);
+#endif
+ pvr_sync_free_append_fences_data(psFDData);
+#endif
+
+ *pi32UpdateFenceFD = i32UpdateFenceFD;
+
+ return PVRSRV_OK;
+
+fail_cmdaquire:
+fail_cmdinit:
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ pvr_sync_rollback_append_fences(psFDData);
+ pvr_sync_free_append_fences_data(psFDData);
+fail_fdsync:
+#endif
+err_populate_sync_addr_list:
+ return eError;
+}
+
+IMG_EXPORT PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+ RGXFWIF_KCCB_CMD sFlushCmd;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush");
+#endif
+ sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+ sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE;
+ sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE;
+ sFlushCmd.uCmdData.sSLCFlushInvalData.eDM = RGXFWIF_DM_CDM;
+ sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sFlushCmd,
+ sizeof(sFlushCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Failed to schedule SLC flush command with error (%u)", eError));
+ }
+ else
+ {
+ /* Wait for the SLC flush to complete */
+ eError = RGXWaitForFWOp(psComputeContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ psComputeContext->psSync,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Compute flush aborted with error (%u)", eError));
+ }
+ }
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+ if (2 == psDevInfo->sDevFeatureCfg.ui32CtrlStreamFormat)
+ {
+
+ RGXFWIF_KCCB_CMD sKCCBCmd;
+ PVRSRV_ERROR eError;
+
+ /* Schedule the firmware command */
+ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+ sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_CDM,
+ &sKCCBCmd,
+ sizeof(sKCCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXNotifyWriteOffsetUpdateKM: Failed to schedule the FW command %d (%s)",
+ eError, PVRSRVGETERRORSTRING(eError)));
+ }
+
+ return eError;
+ }else
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+ eError = ContextSetPriority(psComputeContext->psServerCommonContext,
+ psConnection,
+ psComputeContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_CDM);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ }
+ return eError;
+}
+
+/*
+ * PVRSRVRGXGetLastComputeContextResetReasonKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 *peLastResetReason,
+ IMG_UINT32 *pui32LastResetJobRef)
+{
+ PVR_ASSERT(psComputeContext != NULL);
+ PVR_ASSERT(peLastResetReason != NULL);
+ PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+ *peLastResetReason = FWCommonContextGetLastResetReason(psComputeContext->psServerCommonContext,
+ pui32LastResetJobRef);
+
+ return PVRSRV_OK;
+}
+
+void CheckForStalledComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DLLIST_NODE *psNode, *psNext;
+ OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+ dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+ DumpStalledFWCommonContext(psCurrentServerComputeCtx->psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 ui32ContextBitMask = 0;
+ DLLIST_NODE *psNode, *psNext;
+ OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+ dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+
+ if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->psServerCommonContext, RGX_KICK_TYPE_DM_CDM)
+ == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM;
+ }
+ }
+ OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+ return ui32ContextBitMask;
+}
+
+/******************************************************************************
+ End of file (rgxcompute.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX compute functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX compute functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXCOMPUTE_H__)
+#define __RGXCOMPUTE_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+
+
+typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT;
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXCreateComputeContextKM
+
+ @Description
+
+
+ @Input pvDeviceNode
+ @Input psCmpCCBMemDesc -
+ @Input psCmpCCBCtlMemDesc -
+ @Output ppsFWComputeContextMemDesc -
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sMCUFenceAddr,
+ IMG_UINT32 ui32FrameworkRegisterSize,
+ IMG_PBYTE pbyFrameworkRegisters,
+ IMG_HANDLE hMemCtxPrivData,
+ IMG_DEV_VIRTADDR sServicesSignalAddr,
+ RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXDestroyComputeContextKM
+
+ @Description
+ Server-side implementation of RGXDestroyComputeContext
+
+ @Input psCleanupData -
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXKickCDMKM
+
+ @Description
+ Server-side implementation of RGXKickCDM
+
+ @Input psDeviceNode - RGX Device node
+ @Input psFWComputeContextMemDesc - Mem desc for firmware compute context
+ @Input ui32cCCBWoffUpdate - New fw Woff for the client CDM CCB
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceSyncOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ IMG_INT32 i32CheckFenceFd,
+ IMG_INT32 i32UpdateTimelineFd,
+ IMG_INT32 *pi32UpdateFenceFd,
+ IMG_CHAR pcszUpdateFenceName[32],
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXFlushComputeDataKM
+
+ @Description
+ Server-side implementation of RGXFlushComputeData
+
+ @Input psComputeContext - Compute context to flush
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXNotifyComputeWriteOffsetUpdateKM
+ @Description Server-side implementation of RGXNotifyComputeWriteOffsetUpdate
+
+ @Input psDeviceNode - RGX Device node
+ @Input psComputeContext - Compute context to flush
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 *peLastResetReason,
+ IMG_UINT32 *pui32LastResetJobRef);
+
+/* Debug - check if compute context is waiting on a fence */
+void CheckForStalledComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client compute contexts are stalled */
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXCOMPUTE_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Rgx debug information
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX debugging functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "rgxdefs_km.h"
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "osfunc.h"
+
+#include "lists.h"
+
+#include "rgxdebug.h"
+#include "pvrversion.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "rgxutils.h"
+#include "tlstream.h"
+#include "rgxfwutils.h"
+#include "pvrsrv.h"
+#include "services_km.h"
+
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_utils.h"
+#include "rgx_fwif.h"
+#include "rgx_fwif_sf.h"
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+#include "rgxfw_log_helper.h"
+#endif
+
+#include "rgxta3d.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#include "rgxray.h"
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "devicemem_history_server.h"
+#endif
+#include "rgx_bvnc_defs_km.h"
+#define PVR_DUMP_DRIVER_INFO(x, y) \
+ PVR_DUMPDEBUG_LOG("%s info: " \
+ "BuildOptions: 0x%08x " \
+ "BuildVersion: %d.%d " \
+ "BuildRevision: %8d " \
+ "BuildType: %s", \
+ (x), \
+ (y).ui32BuildOptions, \
+ PVRVERSION_UNPACK_MAJ((y).ui32BuildVersion), \
+ PVRVERSION_UNPACK_MIN((y).ui32BuildVersion), \
+ (y).ui32BuildRevision, \
+ (BUILD_TYPE_DEBUG == (y).ui32BuildType) ? "debug" : "release")
+
+
+#define RGX_DEBUG_STR_SIZE (150)
+
+#define RGX_CR_BIF_CAT_BASE0 (0x1200U)
+#define RGX_CR_BIF_CAT_BASE1 (0x1208U)
+
+#define RGX_CR_BIF_CAT_BASEN(n) \
+ RGX_CR_BIF_CAT_BASE0 + \
+ ((RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0) * n)
+
+
+#define RGXDBG_BIF_IDS \
+ X(BIF0)\
+ X(BIF1)\
+ X(TEXAS_BIF)\
+ X(DPX_BIF)
+
+#define RGXDBG_SIDEBAND_TYPES \
+ X(META)\
+ X(TLA)\
+ X(DMA)\
+ X(VDMM)\
+ X(CDM)\
+ X(IPP)\
+ X(PM)\
+ X(TILING)\
+ X(MCU)\
+ X(PDS)\
+ X(PBE)\
+ X(VDMS)\
+ X(IPF)\
+ X(ISP)\
+ X(TPF)\
+ X(USCS)\
+ X(PPP)\
+ X(VCE)\
+ X(TPF_CPF)\
+ X(IPF_CPF)\
+ X(FBCDC)
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+ RGXDBG_BIF_IDS
+#undef X
+} RGXDBG_BIF_ID;
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+ RGXDBG_SIDEBAND_TYPES
+#undef X
+} RGXDBG_SIDEBAND_TYPE;
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+static const IMG_CHAR *const pszPowStateName[] =
+{
+#define X(NAME) #NAME,
+ RGXFWIF_POW_STATES
+#undef X
+};
+
+static const IMG_CHAR *const pszBIFNames[] =
+{
+#define X(NAME) #NAME,
+ RGXDBG_BIF_IDS
+#undef X
+};
+#endif
+
+#if !defined(NO_HARDWARE)
+/* Translation of MIPS exception encoding */
+static const IMG_CHAR * const apszMIPSExcCodes[32] =
+{
+ "Interrupt",
+ "TLB modified exception",
+ "TLB exception (load/instruction fetch)",
+ "TLB exception (store)",
+ "Address error exception (load/instruction fetch)",
+ "Address error exception (store)",
+ "Bus error exception (instruction fetch)",
+ "Bus error exception (load/store)",
+ "Syscall exception",
+ "Breakpoint exception",
+ "Reserved instruction exception",
+ "Coprocessor Unusable exception",
+ "Arithmetic Overflow exception",
+ "Trap exception",
+ NULL,
+ NULL,
+ "Implementation-Specific Exception 1 (COP2)",
+ "CorExtend Unusable",
+ "Coprocessor 2 exceptions",
+ "TLB Read-Inhibit",
+ "TLB Execute-Inhibit",
+ NULL,
+ NULL,
+ "Reference to WatchHi/WatchLo address",
+ "Machine check",
+ NULL,
+ "DSP Module State Disabled exception",
+ NULL,
+ NULL,
+ NULL,
+ /* Can only happen in MIPS debug mode */
+ "Parity error",
+ NULL
+};
+#endif
+
+typedef struct _RGXMIPSFW_C0_DEBUG_TBL_ENTRY_
+{
+ IMG_UINT32 ui32Mask;
+ const IMG_CHAR * pszExplanation;
+} RGXMIPSFW_C0_DEBUG_TBL_ENTRY;
+
+#if !defined(NO_HARDWARE)
+static const RGXMIPSFW_C0_DEBUG_TBL_ENTRY sMIPS_C0_DebugTable[] =
+{
+ { RGXMIPSFW_C0_DEBUG_DSS, "Debug single-step exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DBP, "Debug software breakpoint exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DDBL, "Debug data break exception occurred on a load" },
+ { RGXMIPSFW_C0_DEBUG_DDBS, "Debug data break exception occurred on a store" },
+ { RGXMIPSFW_C0_DEBUG_DIB, "Debug instruction break exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DINT, "Debug interrupt exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DIBIMPR, "Imprecise debug instruction break exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DDBLIMPR, "Imprecise debug data break load exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DDBSIMPR, "Imprecise debug data break store exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_IEXI, "Imprecise error exception inhibit controls exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DBUSEP, "Data access Bus Error exception pending" },
+ { RGXMIPSFW_C0_DEBUG_CACHEEP, "Imprecise Cache Error pending" },
+ { RGXMIPSFW_C0_DEBUG_MCHECKP, "Imprecise Machine Check exception pending" },
+ { RGXMIPSFW_C0_DEBUG_IBUSEP, "Instruction fetch Bus Error exception pending" },
+ { RGXMIPSFW_C0_DEBUG_DBD, "Debug exception occurred in branch delay slot" }
+};
+#endif
+
+IMG_UINT32 RGXReadWithSP(IMG_UINT32 ui32FWAddr)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32Value = 0;
+ PVRSRV_ERROR eError;
+
+ eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32Value);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXReadWithSP error: %s", PVRSRVGetErrorStringKM(eError)));
+ }
+
+ return ui32Value;
+}
+
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+static PVRSRV_ERROR _ValidateFWImageWithSP(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DEVMEM_MEMDESC *psMemDesc,
+ RGXFWIF_DEV_VIRTADDR *psFWAddr,
+ const IMG_CHAR *pszDesc)
+{
+ PMR *psFWImagePMR;
+ IMG_UINT32 *pui32HostCodeAddr;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32FWCodeAddr, ui32FWImageLen, ui32Value, i;
+ IMG_HANDLE hFWImage;
+
+ eError = DevmemServerGetImportHandle(psMemDesc,
+ (void **)&psFWImagePMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ValidateFWImageWithSP: Error getting %s PMR (%u)",
+ pszDesc,
+ eError));
+ return eError;
+ }
+
+ /* Get a pointer to the FW code and the allocation size */
+ eError = PMRAcquireKernelMappingData(psFWImagePMR,
+ 0,
+ 0, /* Map whole PMR */
+ (void**)&pui32HostCodeAddr,
+ &ui32FWImageLen,
+ &hFWImage);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ValidateFWImageWithSP: Acquire mapping for %s failed (%u)",
+ pszDesc,
+ eError));
+ return eError;
+ }
+
+ ui32FWCodeAddr = psFWAddr->ui32Addr;
+ ui32FWImageLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */
+
+ for (i = 0; i < ui32FWImageLen; i++)
+ {
+ eError = RGXReadMETAAddr(psDevInfo, ui32FWCodeAddr, &ui32Value);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ValidateFWImageWithSP error: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ goto validatefwimage_release;
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE,
+ "0x%x: CPU 0x%08x, FW 0x%08x",
+ i * 4, pui32HostCodeAddr[i], ui32Value));
+
+ if (pui32HostCodeAddr[i] != ui32Value)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ValidateFWImageWithSP: Mismatch while validating %s at offset 0x%x: CPU 0x%08x, FW 0x%08x",
+ pszDesc,
+ i * 4, pui32HostCodeAddr[i], ui32Value));
+ eError = PVRSRV_ERROR_FW_IMAGE_MISMATCH;
+ goto validatefwimage_release;
+ }
+
+ ui32FWCodeAddr += 4;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "ValidateFWImageWithSP: Match between Host and Meta views of the %s",
+ pszDesc));
+
+validatefwimage_release:
+ PMRReleaseKernelMappingData(psFWImagePMR, hFWImage);
+
+ return eError;
+}
+
+PVRSRV_ERROR ValidateFWImageWithSP(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if !defined(NO_HARDWARE) && defined(DEBUG) && !defined(PVRSRV_GPUVIRT_GUESTDRV) && !defined(SUPPORT_TRUSTED_DEVICE)
+ RGXFWIF_DEV_VIRTADDR sFWAddr;
+ PVRSRV_ERROR eError;
+
+#define VALIDATEFWIMAGEWITHSP_NUM_CHECKS (1U)
+ static IMG_UINT32 ui32NumChecks = 0;
+
+ if (ui32NumChecks == VALIDATEFWIMAGEWITHSP_NUM_CHECKS)
+ {
+ return PVRSRV_OK;
+ }
+ ui32NumChecks++;
+
+ if (psDevInfo->pvRegsBaseKM == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ValidateFWImageWithSP: RGX registers not mapped yet!"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+ sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR;
+ eError = _ValidateFWImageWithSP(psDevInfo,
+ psDevInfo->psRGXFWCodeMemDesc,
+ &sFWAddr,
+ "FW code");
+ if (eError != PVRSRV_OK) return eError;
+
+#if !defined(SUPPORT_TRUSTED_DEVICE)
+ if (0 != psDevInfo->sDevFeatureCfg.ui32MCMS)
+ {
+ RGXSetFirmwareAddress(&sFWAddr,
+ psDevInfo->psRGXFWCorememMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = _ValidateFWImageWithSP(psDevInfo,
+ psDevInfo->psRGXFWCorememMemDesc,
+ &sFWAddr,
+ "FW coremem code");
+ if (eError != PVRSRV_OK) return eError;
+ }
+#endif
+
+#else
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+#endif
+
+ return PVRSRV_OK;
+}
+#endif /* defined(SUPPORT_EXTRA_METASP_DEBUG) */
+
+
+
+/*
+ Guest drivers have the following limitations:
+ - Cannot perform general device management (including debug)
+ - Cannot touch the hardware except OSID kick registers
+ - Guest driver do not support Firmware Trace log
+*/
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32VerbLevel)
+{
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_UNREFERENCED_PARAMETER(ui32VerbLevel);
+}
+
+void RGXDumpDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+}
+#else
+/*!
+*******************************************************************************
+
+ @Function _RGXDecodePMPC
+
+ @Description
+
+ Return the name for the PM managed Page Catalogues
+
+ @Input ui32PC - Page Catalogue number
+
+ @Return void
+
+******************************************************************************/
+static IMG_CHAR* _RGXDecodePMPC(IMG_UINT32 ui32PC)
+{
+ IMG_CHAR* pszPMPC = " (-)";
+
+ switch (ui32PC)
+ {
+ case 0x8: pszPMPC = " (PM-VCE0)"; break;
+ case 0x9: pszPMPC = " (PM-TE0)"; break;
+ case 0xA: pszPMPC = " (PM-ZLS0)"; break;
+ case 0xB: pszPMPC = " (PM-ALIST0)"; break;
+ case 0xC: pszPMPC = " (PM-VCE1)"; break;
+ case 0xD: pszPMPC = " (PM-TE1)"; break;
+ case 0xE: pszPMPC = " (PM-ZLS1)"; break;
+ case 0xF: pszPMPC = " (PM-ALIST1)"; break;
+ }
+
+ return pszPMPC;
+}
+
+/*!
+*******************************************************************************
+
+ @Function _DPXDecodeBIFReqTags
+
+ @Description
+
+ Decode the BIF Tag ID and sideband data fields from DPX_CR_BIF_FAULT_BANK_REQ_STATUS regs
+
+ @Input eBankID - BIF identifier
+ @Input ui32TagID - Tag ID value
+ @Input ui32TagSB - Tag Sideband data
+ @Output ppszTagID - Decoded string from the Tag ID
+ @Output ppszTagSB - Decoded string from the Tag SB
+ @Output pszScratchBuf - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize - Size of the provided buffer
+
+ @Return void
+
+******************************************************************************/
+static void _DPXDecodeBIFReqTags(RGXDBG_BIF_ID eBankID,
+ IMG_UINT32 ui32TagID,
+ IMG_UINT32 ui32TagSB,
+ IMG_CHAR **ppszTagID,
+ IMG_CHAR **ppszTagSB,
+ IMG_CHAR *pszScratchBuf,
+ IMG_UINT32 ui32ScratchBufSize)
+{
+ /* default to unknown */
+ IMG_CHAR *pszTagID = "-";
+ IMG_CHAR *pszTagSB = "-";
+
+ PVR_ASSERT(eBankID == RGXDBG_DPX_BIF);
+ PVR_ASSERT(ppszTagID != NULL);
+
+ PVR_UNREFERENCED_PARAMETER(ui32TagSB);
+ PVR_UNREFERENCED_PARAMETER(pszScratchBuf);
+ PVR_UNREFERENCED_PARAMETER(ui32ScratchBufSize);
+
+ switch (ui32TagID)
+ {
+ case 0x0:
+ {
+ pszTagID = "MMU";
+ break;
+ }
+ case 0x1:
+ {
+ pszTagID = "RS_READ";
+ break;
+ }
+ case 0x2:
+ {
+ pszTagID = "RS_WRITE";
+ break;
+ }
+ case 0x3:
+ {
+ pszTagID = "RQ";
+ break;
+ }
+ case 0x4:
+ {
+ pszTagID = "PU";
+ break;
+ }
+ } /* switch(TagID) */
+
+ *ppszTagID = pszTagID;
+ *ppszTagSB = pszTagSB;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDecodeBIFReqTags
+
+ @Description
+
+ Decode the BIF Tag ID and sideband data fields from BIF_FAULT_BANK_REQ_STATUS regs
+
+ @Input eBankID - BIF identifier
+ @Input ui32TagID - Tag ID value
+ @Input ui32TagSB - Tag Sideband data
+ @Output ppszTagID - Decoded string from the Tag ID
+ @Output ppszTagSB - Decoded string from the Tag SB
+ @Output pszScratchBuf - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize - Size of the provided buffer
+
+ @Return void
+
+******************************************************************************/
+static void _RGXDecodeBIFReqTags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXDBG_BIF_ID eBankID,
+ IMG_UINT32 ui32TagID,
+ IMG_UINT32 ui32TagSB,
+ IMG_CHAR **ppszTagID,
+ IMG_CHAR **ppszTagSB,
+ IMG_CHAR *pszScratchBuf,
+ IMG_UINT32 ui32ScratchBufSize)
+{
+ /* default to unknown */
+ IMG_CHAR *pszTagID = "-";
+ IMG_CHAR *pszTagSB = "-";
+
+ PVR_ASSERT(ppszTagID != NULL);
+ PVR_ASSERT(ppszTagSB != NULL);
+
+ if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK) && (eBankID == RGXDBG_DPX_BIF))
+ {
+ _DPXDecodeBIFReqTags(eBankID, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB, pszScratchBuf, ui32ScratchBufSize);
+ return;
+ }
+
+ switch (ui32TagID)
+ {
+ case 0x0:
+ {
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ if (eBankID == RGXDBG_BIF0)
+ {
+ pszTagID = "VRDM";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Control Stream"; break;
+ case 0x1: pszTagSB = "SHF State"; break;
+ case 0x2: pszTagSB = "Index Data"; break;
+ case 0x4: pszTagSB = "Call Stack"; break;
+ case 0x8: pszTagSB = "Context State"; break;
+ }
+ }
+ else
+ {
+ pszTagID = "MMU";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Table"; break;
+ case 0x1: pszTagSB = "Directory"; break;
+ case 0x2: pszTagSB = "Catalogue"; break;
+ }
+ }
+ }else
+ {
+ pszTagID = "MMU";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Table"; break;
+ case 0x1: pszTagSB = "Directory"; break;
+ case 0x2: pszTagSB = "Catalogue"; break;
+ }
+ }
+ break;
+ }
+ case 0x1:
+ {
+ pszTagID = "TLA";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Pixel data"; break;
+ case 0x1: pszTagSB = "Command stream data"; break;
+ case 0x2: pszTagSB = "Fence or flush"; break;
+ }
+ break;
+ }
+ case 0x2:
+ {
+ if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK) && (eBankID == RGXDBG_BIF0))
+ {
+ pszTagID = "SHF";
+ }else
+ {
+ pszTagID = "HOST";
+ }
+ break;
+ }
+ case 0x3:
+ {
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ if (eBankID == RGXDBG_BIF0)
+ {
+ pszTagID = "SHG";
+ }
+ }
+ else if (0 == (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK))
+ {
+ pszTagID = "META";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "DCache - Thread 0"; break;
+ case 0x1: pszTagSB = "ICache - Thread 0"; break;
+ case 0x2: pszTagSB = "JTag - Thread 0"; break;
+ case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+ case 0x4: pszTagSB = "DCache - Thread "; break;
+ case 0x5: pszTagSB = "ICache - Thread 1"; break;
+ case 0x6: pszTagSB = "JTag - Thread 1"; break;
+ case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+ }
+ }
+ else if (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_57596_BIT_MASK)
+ {
+ pszTagID="TCU";
+ }
+ else
+ {
+ /* Unreachable code */
+ PVR_ASSERT(IMG_FALSE);
+ }
+ break;
+ }
+ case 0x4:
+ {
+ pszTagID = "USC";
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+ "Cache line %d", (ui32TagSB & 0x3f));
+ pszTagSB = pszScratchBuf;
+ break;
+ }
+ case 0x5:
+ {
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+ {
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ if (eBankID == RGXDBG_TEXAS_BIF)
+ {
+ pszTagID = "PBE";
+ }
+ else
+ {
+ pszTagID = "RPM";
+ }
+ }else{
+ pszTagID = "PBE";
+ }
+ }else
+ {
+ pszTagID = "PBE";
+ break;
+ }
+ break;
+ }
+ case 0x6:
+ {
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+ {
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ if (eBankID == RGXDBG_TEXAS_BIF)
+ {
+ pszTagID = "ISP";
+ switch (ui32TagSB)
+ {
+ case 0x00: pszTagSB = "ZLS"; break;
+ case 0x20: pszTagSB = "Occlusion Query"; break;
+ }
+ }else
+ {
+ pszTagID = "FBA";
+ }
+ }else
+ {
+ pszTagID = "ISP";
+ switch (ui32TagSB)
+ {
+ case 0x00: pszTagSB = "ZLS"; break;
+ case 0x20: pszTagSB = "Occlusion Query"; break;
+ }
+ }
+ }else
+ {
+ pszTagID = "ISP";
+ switch (ui32TagSB)
+ {
+ case 0x00: pszTagSB = "ZLS"; break;
+ case 0x20: pszTagSB = "Occlusion Query"; break;
+ }
+ }
+ break;
+ }
+ case 0x7:
+ {
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+ {
+
+ if (eBankID == RGXDBG_TEXAS_BIF)
+ {
+ pszTagID = "IPF";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "CPF"; break;
+ case 0x1: pszTagSB = "DBSC"; break;
+ case 0x2:
+ case 0x4:
+ case 0x6:
+ case 0x8: pszTagSB = "Control Stream"; break;
+ case 0x3:
+ case 0x5:
+ case 0x7:
+ case 0x9: pszTagSB = "Primitive Block"; break;
+ }
+ }
+ else
+ {
+ pszTagID = "IPP";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Macrotile Header"; break;
+ case 0x1: pszTagSB = "Region Header"; break;
+ }
+ }
+ }else
+ {
+ pszTagID = "IPF";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Macrotile Header"; break;
+ case 0x1: pszTagSB = "Region Header"; break;
+ case 0x2: pszTagSB = "DBSC"; break;
+ case 0x3: pszTagSB = "CPF"; break;
+ case 0x4:
+ case 0x6:
+ case 0x8: pszTagSB = "Control Stream"; break;
+ case 0x5:
+ case 0x7:
+ case 0x9: pszTagSB = "Primitive Block"; break;
+ }
+ }
+ break;
+ }
+ case 0x8:
+ {
+ pszTagID = "CDM";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Control Stream"; break;
+ case 0x1: pszTagSB = "Indirect Data"; break;
+ case 0x2: pszTagSB = "Event Write"; break;
+ case 0x3: pszTagSB = "Context State"; break;
+ }
+ break;
+ }
+ case 0x9:
+ {
+ pszTagID = "VDM";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Control Stream"; break;
+ case 0x1: pszTagSB = "PPP State"; break;
+ case 0x2: pszTagSB = "Index Data"; break;
+ case 0x4: pszTagSB = "Call Stack"; break;
+ case 0x8: pszTagSB = "Context State"; break;
+ }
+ break;
+ }
+ case 0xA:
+ {
+ pszTagID = "PM";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+ case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+ case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+ case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+ case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+ case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+ case 0x6: pszTagSB = "PMA_MAVP"; break;
+ case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+ case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+ case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+ case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+ case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+ case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+ case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+ case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+ case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+ case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+ case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+ case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+ case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+ case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+ case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+ case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+ case 0x18: pszTagSB = "PMA_TAVFP"; break;
+ case 0x19: pszTagSB = "PMD_3DVFP"; break;
+ case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+ }
+ break;
+ }
+ case 0xB:
+ {
+ pszTagID = "TA";
+ switch (ui32TagSB)
+ {
+ case 0x1: pszTagSB = "VCE"; break;
+ case 0x2: pszTagSB = "TPC"; break;
+ case 0x3: pszTagSB = "TE Control Stream"; break;
+ case 0x4: pszTagSB = "TE Region Header"; break;
+ case 0x5: pszTagSB = "TE Render Target Cache"; break;
+ case 0x6: pszTagSB = "TEAC Render Target Cache"; break;
+ case 0x7: pszTagSB = "VCE Render Target Cache"; break;
+ case 0x8: pszTagSB = "PPP Context State"; break;
+ }
+ break;
+ }
+ case 0xC:
+ {
+ pszTagID = "TPF";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+ case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+ case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+ case 0x3: pszTagSB = "CPF - Tables"; break;
+ case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+ case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+ case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+ case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+ case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+ case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+ case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+ case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+ case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+ case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+ case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+ case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+ }
+ break;
+ }
+ case 0xD:
+ {
+ pszTagID = "PDS";
+ break;
+ }
+ case 0xE:
+ {
+ pszTagID = "MCU";
+ {
+ IMG_UINT32 ui32Burst = (ui32TagSB >> 5) & 0x7;
+ IMG_UINT32 ui32GroupEnc = (ui32TagSB >> 2) & 0x7;
+ IMG_UINT32 ui32Group = ui32TagSB & 0x3;
+
+ IMG_CHAR* pszBurst = "";
+ IMG_CHAR* pszGroupEnc = "";
+ IMG_CHAR* pszGroup = "";
+
+ switch (ui32Burst)
+ {
+ case 0x0:
+ case 0x1: pszBurst = "128bit word within the Lower 256bits"; break;
+ case 0x2:
+ case 0x3: pszBurst = "128bit word within the Upper 256bits"; break;
+ case 0x4: pszBurst = "Lower 256bits"; break;
+ case 0x5: pszBurst = "Upper 256bits"; break;
+ case 0x6: pszBurst = "512 bits"; break;
+ }
+ switch (ui32GroupEnc)
+ {
+ case 0x0: pszGroupEnc = "TPUA_USC"; break;
+ case 0x1: pszGroupEnc = "TPUB_USC"; break;
+ case 0x2: pszGroupEnc = "USCA_USC"; break;
+ case 0x3: pszGroupEnc = "USCB_USC"; break;
+ case 0x4: pszGroupEnc = "PDS_USC"; break;
+ case 0x5:
+ if(6 > psDevInfo->sDevFeatureCfg.ui32NumClusters)
+ {
+ pszGroupEnc = "PDSRW"; break;
+ }else if(6 == psDevInfo->sDevFeatureCfg.ui32NumClusters)
+ {
+ pszGroupEnc = "UPUC_USC"; break;
+ }
+ case 0x6:
+ if(6 == psDevInfo->sDevFeatureCfg.ui32NumClusters)
+ {
+ pszGroupEnc = "TPUC_USC"; break;
+ }
+ case 0x7:
+ if(6 == psDevInfo->sDevFeatureCfg.ui32NumClusters)
+ {
+ pszGroupEnc = "PDSRW"; break;
+ }
+ }
+ switch (ui32Group)
+ {
+ case 0x0: pszGroup = "Banks 0-3"; break;
+ case 0x1: pszGroup = "Banks 4-7"; break;
+ case 0x2: pszGroup = "Banks 8-11"; break;
+ case 0x3: pszGroup = "Banks 12-15"; break;
+ }
+
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+ "%s, %s, %s", pszBurst, pszGroupEnc, pszGroup);
+ pszTagSB = pszScratchBuf;
+ }
+ break;
+ }
+ case 0xF:
+ {
+ pszTagID = "FB_CDC";
+
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+ IMG_UINT32 ui32Req = (ui32TagSB >> 0) & 0xf;
+ IMG_UINT32 ui32MCUSB = (ui32TagSB >> 4) & 0x3;
+ IMG_CHAR* pszReqOrig = "";
+
+ switch (ui32Req)
+ {
+ case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break;
+ case 0x1: pszReqOrig = "FBC Request, originator PBE"; break;
+ case 0x2: pszReqOrig = "FBC Request, originator Host"; break;
+ case 0x3: pszReqOrig = "FBC Request, originator TLA"; break;
+ case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break;
+ case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break;
+ case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+ case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break;
+ case 0x8: pszReqOrig = "FBC Request, originator ZLS Requester Fence"; break;
+ case 0x9: pszReqOrig = "FBC Request, originator PBE Requester Fence"; break;
+ case 0xa: pszReqOrig = "FBC Request, originator Host Requester Fence"; break;
+ case 0xb: pszReqOrig = "FBC Request, originator TLA Requester Fence"; break;
+ case 0xc: pszReqOrig = "Reserved"; break;
+ case 0xd: pszReqOrig = "Reserved"; break;
+ case 0xe: pszReqOrig = "FBDC Request, originator FBCDC(Host) Memory Fence"; break;
+ case 0xf: pszReqOrig = "FBDC Request, originator FBCDC(TLA) Memory Fence"; break;
+ }
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+ "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+ pszTagSB = pszScratchBuf;
+ }
+ else
+ {
+ IMG_UINT32 ui32Req = (ui32TagSB >> 2) & 0x7;
+ IMG_UINT32 ui32MCUSB = (ui32TagSB >> 0) & 0x3;
+ IMG_CHAR* pszReqOrig = "";
+
+ switch (ui32Req)
+ {
+ case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break;
+ case 0x1: pszReqOrig = "FBC Request, originator PBE"; break;
+ case 0x2: pszReqOrig = "FBC Request, originator Host"; break;
+ case 0x3: pszReqOrig = "FBC Request, originator TLA"; break;
+ case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break;
+ case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break;
+ case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+ case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break;
+ }
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+ "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+ pszTagSB = pszScratchBuf;
+ }
+ break;
+ }
+ } /* switch(TagID) */
+
+ *ppszTagID = pszTagID;
+ *ppszTagSB = pszTagSB;
+}
+
+
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDecodeMMULevel
+
+ @Description
+
+ Return the name for the MMU level that faulted.
+
+ @Input ui32MMULevel - MMU level
+
+ @Return IMG_CHAR* to the sting describing the MMU level that faulted.
+
+******************************************************************************/
+static IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel)
+{
+ IMG_CHAR* pszMMULevel = "";
+
+ switch (ui32MMULevel)
+ {
+ case 0x0: pszMMULevel = " (Page Table)"; break;
+ case 0x1: pszMMULevel = " (Page Directory)"; break;
+ case 0x2: pszMMULevel = " (Page Catalog)"; break;
+ case 0x3: pszMMULevel = " (Cat Base)"; break;
+ }
+
+ return pszMMULevel;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDecodeMMUReqTags
+
+ @Description
+
+ Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and
+ RGX_CR_MMU_FAULT_STATUS regs.
+
+ @Input ui32TagID - Tag ID value
+ @Input ui32TagSB - Tag Sideband data
+ @Input bRead - Read flag
+ @Output ppszTagID - Decoded string from the Tag ID
+ @Output ppszTagSB - Decoded string from the Tag SB
+ @Output pszScratchBuf - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize - Size of the provided buffer
+
+ @Return void
+
+******************************************************************************/
+static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32TagID,
+ IMG_UINT32 ui32TagSB,
+ IMG_BOOL bRead,
+ IMG_CHAR **ppszTagID,
+ IMG_CHAR **ppszTagSB,
+ IMG_CHAR *pszScratchBuf,
+ IMG_UINT32 ui32ScratchBufSize)
+{
+ IMG_INT32 i32SideBandType = -1;
+ IMG_CHAR *pszTagID = "-";
+ IMG_CHAR *pszTagSB = "-";
+
+ PVR_ASSERT(ppszTagID != NULL);
+ PVR_ASSERT(ppszTagSB != NULL);
+
+
+ switch (ui32TagID)
+ {
+ case 0: pszTagID = "META (Jones)"; i32SideBandType = RGXDBG_META; break;
+ case 1: pszTagID = "TLA (Jones)"; i32SideBandType = RGXDBG_TLA; break;
+ case 2: pszTagID = "DMA (Jones)"; i32SideBandType = RGXDBG_DMA; break;
+ case 3: pszTagID = "VDMM (Jones)"; i32SideBandType = RGXDBG_VDMM; break;
+ case 4: pszTagID = "CDM (Jones)"; i32SideBandType = RGXDBG_CDM; break;
+ case 5: pszTagID = "IPP (Jones)"; i32SideBandType = RGXDBG_IPP; break;
+ case 6: pszTagID = "PM (Jones)"; i32SideBandType = RGXDBG_PM; break;
+ case 7: pszTagID = "Tiling (Jones)"; i32SideBandType = RGXDBG_TILING; break;
+ case 8: pszTagID = "MCU (Texas 0)"; i32SideBandType = RGXDBG_MCU; break;
+ case 12: pszTagID = "VDMS (Black Pearl 0)"; i32SideBandType = RGXDBG_VDMS; break;
+ case 13: pszTagID = "IPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF; break;
+ case 14: pszTagID = "ISP (Black Pearl 0)"; i32SideBandType = RGXDBG_ISP; break;
+ case 15: pszTagID = "TPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF; break;
+ case 16: pszTagID = "USCS (Black Pearl 0)"; i32SideBandType = RGXDBG_USCS; break;
+ case 17: pszTagID = "PPP (Black Pearl 0)"; i32SideBandType = RGXDBG_PPP; break;
+ case 20: pszTagID = "MCU (Texas 1)"; i32SideBandType = RGXDBG_MCU; break;
+ case 24: pszTagID = "MCU (Texas 2)"; i32SideBandType = RGXDBG_MCU; break;
+ case 28: pszTagID = "VDMS (Black Pearl 1)"; i32SideBandType = RGXDBG_VDMS; break;
+ case 29: pszTagID = "IPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF; break;
+ case 30: pszTagID = "ISP (Black Pearl 1)"; i32SideBandType = RGXDBG_ISP; break;
+ case 31: pszTagID = "TPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF; break;
+ case 32: pszTagID = "USCS (Black Pearl 1)"; i32SideBandType = RGXDBG_USCS; break;
+ case 33: pszTagID = "PPP (Black Pearl 1)"; i32SideBandType = RGXDBG_PPP; break;
+ case 36: pszTagID = "MCU (Texas 3)"; i32SideBandType = RGXDBG_MCU; break;
+ case 40: pszTagID = "MCU (Texas 4)"; i32SideBandType = RGXDBG_MCU; break;
+ case 44: pszTagID = "VDMS (Black Pearl 2)"; i32SideBandType = RGXDBG_VDMS; break;
+ case 45: pszTagID = "IPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF; break;
+ case 46: pszTagID = "ISP (Black Pearl 2)"; i32SideBandType = RGXDBG_ISP; break;
+ case 47: pszTagID = "TPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF; break;
+ case 48: pszTagID = "USCS (Black Pearl 2)"; i32SideBandType = RGXDBG_USCS; break;
+ case 49: pszTagID = "PPP (Black Pearl 2)"; i32SideBandType = RGXDBG_PPP; break;
+ case 52: pszTagID = "MCU (Texas 5)"; i32SideBandType = RGXDBG_MCU; break;
+ case 56: pszTagID = "MCU (Texas 6)"; i32SideBandType = RGXDBG_MCU; break;
+ case 60: pszTagID = "VDMS (Black Pearl 3)"; i32SideBandType = RGXDBG_VDMS; break;
+ case 61: pszTagID = "IPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF; break;
+ case 62: pszTagID = "ISP (Black Pearl 3)"; i32SideBandType = RGXDBG_ISP; break;
+ case 63: pszTagID = "TPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF; break;
+ case 64: pszTagID = "USCS (Black Pearl 3)"; i32SideBandType = RGXDBG_USCS; break;
+ case 65: pszTagID = "PPP (Black Pearl 3)"; i32SideBandType = RGXDBG_PPP; break;
+ case 68: pszTagID = "MCU (Texas 7)"; i32SideBandType = RGXDBG_MCU; break;
+ }
+ if(('-' == pszTagID[0]) && '\n' == pszTagID[1])
+ {
+
+ if((psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_50539_BIT_MASK) || \
+ (psDevInfo->sDevFeatureCfg.ui32FBCDCArch >= 3))
+ {
+ switch(ui32TagID)
+ {
+ case 18: pszTagID = "TPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+ case 19: pszTagID = "IPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+ case 34: pszTagID = "TPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+ case 35: pszTagID = "IPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+ case 50: pszTagID = "TPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+ case 51: pszTagID = "IPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+ case 66: pszTagID = "TPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+ case 67: pszTagID = "IPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_50539_BIT_MASK)
+ {
+ switch(ui32TagID)
+ {
+ case 9: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+ case 10: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+ case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 21: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+ case 22: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+ case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 25: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+ case 26: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+ case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 37: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+ case 38: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+ case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 41: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+ case 42: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+ case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 53: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+ case 54: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+ case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 57: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+ case 58: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+ case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 69: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+ case 70: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+ case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+ }
+ }else
+ {
+ switch(ui32TagID)
+ {
+ case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+ case 10: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+ case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+ case 22: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+ case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+ case 26: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+ case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+ case 38: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+ case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+ case 42: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+ case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+ case 54: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+ case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+ case 58: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+ case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+ case 70: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+ case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+ }
+ }
+ }else
+ {
+ switch(ui32TagID)
+ {
+ case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+ case 10: pszTagID = "PBE0 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+ case 11: pszTagID = "PBE1 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+ case 18: pszTagID = "VCE (Black Pearl 0)"; i32SideBandType = RGXDBG_VCE; break;
+ case 19: pszTagID = "FBCDC (Black Pearl 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+ case 22: pszTagID = "PBE0 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+ case 23: pszTagID = "PBE1 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+ case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+ case 26: pszTagID = "PBE0 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+ case 27: pszTagID = "PBE1 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+ case 34: pszTagID = "VCE (Black Pearl 1)"; i32SideBandType = RGXDBG_VCE; break;
+ case 35: pszTagID = "FBCDC (Black Pearl 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+ case 38: pszTagID = "PBE0 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+ case 39: pszTagID = "PBE1 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+ case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+ case 42: pszTagID = "PBE0 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+ case 43: pszTagID = "PBE1 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+ case 50: pszTagID = "VCE (Black Pearl 2)"; i32SideBandType = RGXDBG_VCE; break;
+ case 51: pszTagID = "FBCDC (Black Pearl 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+ case 54: pszTagID = "PBE0 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+ case 55: pszTagID = "PBE1 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+ case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+ case 58: pszTagID = "PBE0 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+ case 59: pszTagID = "PBE1 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+ case 66: pszTagID = "VCE (Black Pearl 3)"; i32SideBandType = RGXDBG_VCE; break;
+ case 67: pszTagID = "FBCDC (Black Pearl 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+ case 70: pszTagID = "PBE0 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+ case 71: pszTagID = "PBE1 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+ }
+ }
+
+ }
+
+ switch (i32SideBandType)
+ {
+ case RGXDBG_META:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "DCache - Thread 0"; break;
+ case 0x1: pszTagSB = "ICache - Thread 0"; break;
+ case 0x2: pszTagSB = "JTag - Thread 0"; break;
+ case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+ case 0x4: pszTagSB = "DCache - Thread 1"; break;
+ case 0x5: pszTagSB = "ICache - Thread 1"; break;
+ case 0x6: pszTagSB = "JTag - Thread 1"; break;
+ case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_TLA:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Pixel data"; break;
+ case 0x1: pszTagSB = "Command stream data"; break;
+ case 0x2: pszTagSB = "Fence or flush"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_VDMM:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Control Stream - Read Only"; break;
+ case 0x1: pszTagSB = "PPP State - Read Only"; break;
+ case 0x2: pszTagSB = "Indices - Read Only"; break;
+ case 0x4: pszTagSB = "Call Stack - Read/Write"; break;
+ case 0x6: pszTagSB = "DrawIndirect - Read Only"; break;
+ case 0xA: pszTagSB = "Context State - Write Only"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_CDM:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Control Stream"; break;
+ case 0x1: pszTagSB = "Indirect Data"; break;
+ case 0x2: pszTagSB = "Event Write"; break;
+ case 0x3: pszTagSB = "Context State"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_IPP:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Macrotile Header"; break;
+ case 0x1: pszTagSB = "Region Header"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_PM:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+ case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+ case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+ case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+ case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+ case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+ case 0x6: pszTagSB = "PMA_MAVP"; break;
+ case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+ case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+ case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+ case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+ case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+ case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+ case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+ case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+ case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+ case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+ case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+ case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+ case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+ case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+ case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+ case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+ case 0x18: pszTagSB = "PMA_TAVFP"; break;
+ case 0x19: pszTagSB = "PMD_3DVFP"; break;
+ case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_TILING:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "PSG Control Stream TP0"; break;
+ case 0x1: pszTagSB = "TPC TP0"; break;
+ case 0x2: pszTagSB = "VCE0"; break;
+ case 0x3: pszTagSB = "VCE1"; break;
+ case 0x4: pszTagSB = "PSG Control Stream TP1"; break;
+ case 0x5: pszTagSB = "TPC TP1"; break;
+ case 0x8: pszTagSB = "PSG Region Header TP0"; break;
+ case 0xC: pszTagSB = "PSG Region Header TP1"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_VDMS:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Context State - Write Only"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_IPF:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x00:
+ case 0x20: pszTagSB = "CPF"; break;
+ case 0x01: pszTagSB = "DBSC"; break;
+ case 0x02:
+ case 0x04:
+ case 0x06:
+ case 0x08:
+ case 0x0A:
+ case 0x0C:
+ case 0x0E:
+ case 0x10: pszTagSB = "Control Stream"; break;
+ case 0x03:
+ case 0x05:
+ case 0x07:
+ case 0x09:
+ case 0x0B:
+ case 0x0D:
+ case 0x0F:
+ case 0x11: pszTagSB = "Primitive Block"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_ISP:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x00: pszTagSB = "ZLS read/write"; break;
+ case 0x20: pszTagSB = "Occlusion query read/write"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_TPF:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+ case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+ case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+ case 0x3: pszTagSB = "CPF - Tables"; break;
+ case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+ case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+ case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+ case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+ case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+ case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+ case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+ case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+ case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+ case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+ case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+ case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_FBCDC:
+ {
+ /*
+ * FBC faults on a 4-cluster phantom does not always set SB
+ * bit 5, but since FBC is write-only and FBDC is read-only,
+ * we can set bit 5 if this is a write fault, before decoding.
+ */
+ if (bRead == IMG_FALSE)
+ {
+ ui32TagSB |= 0x20;
+ }
+
+ switch (ui32TagSB)
+ {
+ case 0x00: pszTagSB = "FBDC Request, originator ZLS"; break;
+ case 0x02: pszTagSB = "FBDC Request, originator MCU Dust 0"; break;
+ case 0x03: pszTagSB = "FBDC Request, originator MCU Dust 1"; break;
+ case 0x20: pszTagSB = "FBC Request, originator ZLS"; break;
+ case 0x22: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0"; break;
+ case 0x23: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1"; break;
+ case 0x24: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0"; break;
+ case 0x25: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1"; break;
+ case 0x28: pszTagSB = "FBC Request, originator ZLS Fence"; break;
+ case 0x2a: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0, Fence"; break;
+ case 0x2b: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1, Fence"; break;
+ case 0x2c: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0, Fence"; break;
+ case 0x2d: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1, Fence"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_MCU:
+ {
+ IMG_UINT32 ui32SetNumber = (ui32TagSB >> 5) & 0x7;
+ IMG_UINT32 ui32WayNumber = (ui32TagSB >> 2) & 0x7;
+ IMG_UINT32 ui32Group = ui32TagSB & 0x3;
+
+ IMG_CHAR* pszGroup = "";
+
+ switch (ui32Group)
+ {
+ case 0x0: pszGroup = "Banks 0-1"; break;
+ case 0x1: pszGroup = "Banks 2-3"; break;
+ case 0x2: pszGroup = "Banks 4-5"; break;
+ case 0x3: pszGroup = "Banks 6-7"; break;
+ }
+
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+ "Set=%d, Way=%d, %s", ui32SetNumber, ui32WayNumber, pszGroup);
+ pszTagSB = pszScratchBuf;
+ break;
+ }
+
+ default:
+ {
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "SB=0x%02x", ui32TagSB);
+ pszTagSB = pszScratchBuf;
+ break;
+ }
+ }
+
+ *ppszTagID = pszTagID;
+ *ppszTagSB = pszTagSB;
+}
+
+static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer,
+ IMG_UINT64 *pui64Seconds,
+ IMG_UINT64 *pui64Nanoseconds)
+{
+ IMG_UINT32 ui32Remainder;
+
+ *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder);
+ *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL);
+}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+
+typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_
+{
+ DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING,
+ DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED,
+ DEVICEMEM_HISTORY_QUERY_INDEX_NEXT,
+ DEVICEMEM_HISTORY_QUERY_INDEX_COUNT,
+} DEVICEMEM_HISTORY_QUERY_INDEX;
+
+/*!
+*******************************************************************************
+
+ @Function _PrintDevicememHistoryQueryResult
+
+ @Description
+
+ Print details of a single result from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf - Debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psFaultProcessInfo - The process info derived from the page fault
+ @Input psResult - The DevicememHistory result to be printed
+ @Input ui32Index - The index of the result
+
+ @Return void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+ DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult,
+ IMG_UINT32 ui32Index)
+{
+ IMG_UINT32 ui32Remainder;
+ IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+ ConvertOSTimestampToSAndNS(psResult->ui64When,
+ &ui64Seconds,
+ &ui64Nanoseconds);
+
+ if(psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE)
+ {
+ PVR_DUMPDEBUG_LOG(" [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+ " Size: " IMG_DEVMEM_SIZE_FMTSPEC
+ " Operation: %s Modified: %llu us ago (OS time %llu.%09llu us)",
+ ui32Index,
+ psResult->szString,
+ (unsigned long long) psResult->sBaseDevVAddr.uiAddr,
+ (unsigned long long) psResult->uiSize,
+ psResult->bMap ? "Map": "Unmap",
+ (unsigned long long) OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+ (unsigned long long) ui64Seconds,
+ (unsigned long long) ui64Nanoseconds);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+ " Size: " IMG_DEVMEM_SIZE_FMTSPEC
+ " Operation: %s Modified: %llu us ago (OS time %llu.%09llu) PID: %u (%s)",
+ ui32Index,
+ psResult->szString,
+ (unsigned long long) psResult->sBaseDevVAddr.uiAddr,
+ (unsigned long long) psResult->uiSize,
+ psResult->bMap ? "Map": "Unmap",
+ (unsigned long long) OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+ (unsigned long long) ui64Seconds,
+ (unsigned long long) ui64Nanoseconds,
+ (unsigned int) psResult->sProcessInfo.uiPID,
+ psResult->sProcessInfo.szProcessName);
+ }
+
+ if(!psResult->bRange)
+ {
+ PVR_DUMPDEBUG_LOG(" Whole allocation was %s", psResult->bMap ? "mapped": "unmapped");
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s",
+ psResult->ui32StartPage,
+ psResult->ui32StartPage + psResult->ui32PageCount - 1,
+ psResult->sMapStartAddr.uiAddr,
+ psResult->sMapEndAddr.uiAddr,
+ psResult->bAll ? "(whole allocation) " : "",
+ psResult->bMap ? "mapped": "unmapped");
+ }
+}
+
+/*!
+*******************************************************************************
+
+ @Function _PrintDevicememHistoryQueryOut
+
+ @Description
+
+ Print details of all the results from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf - Debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psFaultProcessInfo - The process info derived from the page fault
+ @Input psQueryOut - Storage for the query results
+
+ @Return void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut)
+{
+ IMG_UINT32 i;
+
+ if(psQueryOut->ui32NumResults == 0)
+ {
+ PVR_DUMPDEBUG_LOG(" No results");
+ }
+ else
+ {
+ for(i = 0; i < psQueryOut->ui32NumResults; i++)
+ {
+ _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile,
+ psFaultProcessInfo,
+ &psQueryOut->sResults[i],
+ i);
+ }
+ }
+}
+
+/* table of HW page size values and the equivalent */
+static const unsigned int aui32HWPageSizeTable[][2] =
+{
+ { 0, PVRSRV_4K_PAGE_SIZE },
+ { 1, PVRSRV_16K_PAGE_SIZE },
+ { 2, PVRSRV_64K_PAGE_SIZE },
+ { 3, PVRSRV_256K_PAGE_SIZE },
+ { 4, PVRSRV_1M_PAGE_SIZE },
+ { 5, PVRSRV_2M_PAGE_SIZE }
+};
+
+/*!
+*******************************************************************************
+
+ @Function _PageSizeHWToBytes
+
+ @Description
+
+ Convert a HW page size value to its size in bytes
+
+ @Input ui32PageSizeHW - The HW page size value
+
+ @Return IMG_UINT32 The page size in bytes
+
+******************************************************************************/
+static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW)
+{
+ if (ui32PageSizeHW > 5)
+ {
+ /* This is invalid, so return a default value as we cannot ASSERT in this code! */
+ return PVRSRV_4K_PAGE_SIZE;
+ }
+
+ return aui32HWPageSizeTable[ui32PageSizeHW][1];
+}
+
+/*!
+*******************************************************************************
+
+ @Function _GetDevicememHistoryData
+
+ @Description
+
+ Get the DevicememHistory results for the given PID and faulting device virtual address.
+ The function will query DevicememHistory for information about the faulting page, as well
+ as the page before and after.
+
+ @Input uiPID - The process ID to search for allocations belonging to
+ @Input sFaultDevVAddr - The device address to search for allocations at/before/after
+ @Input asQueryOut - Storage for the query results
+ @Input ui32PageSizeBytes - Faulted page size in bytes
+
+ @Return IMG_BOOL - IMG_TRUE if any results were found for this page fault
+
+******************************************************************************/
+static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr,
+ DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT],
+ IMG_UINT32 ui32PageSizeBytes)
+{
+ IMG_UINT32 i;
+ DEVICEMEM_HISTORY_QUERY_IN sQueryIn;
+ IMG_BOOL bAnyHits = IMG_FALSE;
+
+ /* if the page fault originated in the firmware then the allocation may
+ * appear to belong to any PID, because FW allocations are attributed
+ * to the client process creating the allocation, so instruct the
+ * devicemem_history query to search all available PIDs
+ */
+ if(uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+ {
+ sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY;
+ }
+ else
+ {
+ sQueryIn.uiPID = uiPID;
+ }
+
+ /* query the DevicememHistory about the preceding / faulting / next page */
+
+ for(i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+ {
+ IMG_BOOL bHits;
+
+ switch(i)
+ {
+ case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+ sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - 1;
+ break;
+ case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+ sQueryIn.sDevVAddr = sFaultDevVAddr;
+ break;
+ case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+ sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes;
+ break;
+ }
+
+ /* First try matching any record at the exact address... */
+ bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_FALSE);
+ if (!bHits)
+ {
+ /* If not matched then try matching any record in the same page... */
+ bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_TRUE);
+ }
+
+ if(bHits)
+ {
+ bAnyHits = IMG_TRUE;
+ }
+ }
+
+ return bAnyHits;
+}
+
+/* stored data about one page fault */
+typedef struct _FAULT_INFO_
+{
+ /* the process info of the memory context that page faulted */
+ RGXMEM_PROCESS_INFO sProcessInfo;
+ IMG_DEV_VIRTADDR sFaultDevVAddr;
+ DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT];
+ /* the CR timer value at the time of the fault, recorded by the FW.
+ * used to differentiate different page faults
+ */
+ IMG_UINT64 ui64CRTimer;
+ /* time when this FAULT_INFO entry was added. used for timing
+ * reference against the map/unmap information
+ */
+ IMG_UINT64 ui64When;
+} FAULT_INFO;
+
+/* history list of page faults.
+ * Keeps the first `n` page faults and the last `n` page faults, like the FW
+ * HWR log
+ */
+typedef struct _FAULT_INFO_LOG_
+{
+ IMG_UINT32 ui32Head;
+ IMG_UINT32 ui32NumWrites;
+ /* the number of faults in this log need not correspond exactly to
+ * the HWINFO number of the FW, as the FW HWINFO log may contain
+ * non-page fault HWRs
+ */
+ FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX];
+} FAULT_INFO_LOG;
+
+static FAULT_INFO_LOG gsFaultInfoLog = { 0 };
+
+/*!
+*******************************************************************************
+
+ @Function _QueryFaultInfo
+
+ @Description
+
+ Searches the local list of previously analysed page faults to see if the given
+ fault has already been analysed and if so, returns a pointer to the analysis
+ object (FAULT_INFO *), otherwise returns NULL.
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input sFaultDevVAddr - The faulting device virtual address
+ @Input ui64CRTimer - The CR timer value recorded by the FW at the time of the fault
+
+ @Return FAULT_INFO* Pointer to an existing fault analysis structure if found, otherwise NULL
+
+******************************************************************************/
+static FAULT_INFO *_QueryFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ IMG_DEV_VIRTADDR sFaultDevVAddr,
+ IMG_UINT64 ui64CRTimer)
+{
+ IMG_UINT32 i;
+
+ for(i = 0; i < MIN(gsFaultInfoLog.ui32NumWrites, RGXFWIF_HWINFO_MAX); i++)
+ {
+ if((gsFaultInfoLog.asFaults[i].ui64CRTimer == ui64CRTimer) &&
+ (gsFaultInfoLog.asFaults[i].sFaultDevVAddr.uiAddr == sFaultDevVAddr.uiAddr))
+ {
+ return &gsFaultInfoLog.asFaults[i];
+ }
+ }
+
+ return NULL;
+}
+
+/*!
+*******************************************************************************
+
+ @Function __AcquireNextFaultInfoElement
+
+ @Description
+
+ Gets a pointer to the next element in the fault info log
+ (requires the fault info lock be held)
+
+
+ @Return FAULT_INFO* Pointer to the next record for writing
+
+******************************************************************************/
+
+static FAULT_INFO *_AcquireNextFaultInfoElement(void)
+{
+ IMG_UINT32 ui32Head = gsFaultInfoLog.ui32Head;
+ FAULT_INFO *psInfo = &gsFaultInfoLog.asFaults[ui32Head];
+
+ return psInfo;
+}
+
+static void _CommitFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+ FAULT_INFO *psInfo,
+ RGXMEM_PROCESS_INFO *psProcessInfo,
+ IMG_DEV_VIRTADDR sFaultDevVAddr,
+ IMG_UINT64 ui64CRTimer)
+{
+ IMG_UINT32 i, j;
+
+ /* commit the page fault details */
+
+ psInfo->sProcessInfo = *psProcessInfo;
+ psInfo->sFaultDevVAddr = sFaultDevVAddr;
+ psInfo->ui64CRTimer = ui64CRTimer;
+ psInfo->ui64When = OSClockns64();
+
+ /* if the page fault was caused by the firmware then get information about
+ * which client application created the related allocations.
+ *
+ * Fill in the process info data for each query result.
+ */
+
+ if(psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+ {
+ for(i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+ {
+ for(j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++)
+ {
+ IMG_BOOL bFound;
+
+ RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo;
+ bFound = RGXPCPIDToProcessInfo(psDevInfo,
+ psProcInfo->uiPID,
+ psProcInfo);
+ if(!bFound)
+ {
+ OSStringNCopy(psProcInfo->szProcessName,
+ "(unknown)",
+ sizeof(psProcInfo->szProcessName) - 1);
+ psProcInfo->szProcessName[sizeof(psProcInfo->szProcessName) - 1] = '\0';
+ }
+ }
+ }
+ }
+
+ /* assert the faults circular buffer hasn't been moving and
+ * move the head along
+ */
+
+ PVR_ASSERT(psInfo == &gsFaultInfoLog.asFaults[gsFaultInfoLog.ui32Head]);
+
+ if(gsFaultInfoLog.ui32Head < RGXFWIF_HWINFO_MAX - 1)
+ {
+ gsFaultInfoLog.ui32Head++;
+ }
+ else
+ {
+ /* wrap back to the first of the 'LAST' entries */
+ gsFaultInfoLog.ui32Head = RGXFWIF_HWINFO_MAX_FIRST;
+ }
+
+ gsFaultInfoLog.ui32NumWrites++;
+
+
+}
+
+/*!
+*******************************************************************************
+
+ @Function _PrintFaultInfo
+
+ @Description
+
+ Print all the details of a page fault from a FAULT_INFO structure
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psInfo - The page fault occurrence to print
+ @Input pui32Index - (optional) index value to include in the print output
+
+ @Return void
+
+******************************************************************************/
+static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ FAULT_INFO *psInfo,
+ const IMG_UINT32 *pui32Index)
+{
+ IMG_UINT32 i;
+ IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+ IMG_PID uiPID;
+
+ uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE) ? 0 : psInfo->sProcessInfo.uiPID;
+
+ ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds);
+
+ if(pui32Index)
+ {
+ PVR_DUMPDEBUG_LOG("(%u) Device memory history for page fault address 0x%010llX, CRTimer: 0x%016llX, "
+ "PID: %u (%s, unregistered: %u) OS time: %llu.%09llu",
+ *pui32Index,
+ (unsigned long long) psInfo->sFaultDevVAddr.uiAddr,
+ psInfo->ui64CRTimer,
+ (unsigned int) uiPID,
+ psInfo->sProcessInfo.szProcessName,
+ psInfo->sProcessInfo.bUnregistered,
+ (unsigned long long) ui64Seconds,
+ (unsigned long long) ui64Nanoseconds);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("Device memory history for page fault address 0x%010llX, PID: %u "
+ "(%s, unregistered: %u) OS time: %llu.%09llu",
+ (unsigned long long) psInfo->sFaultDevVAddr.uiAddr,
+ (unsigned int) uiPID,
+ psInfo->sProcessInfo.szProcessName,
+ psInfo->sProcessInfo.bUnregistered,
+ (unsigned long long) ui64Seconds,
+ (unsigned long long) ui64Nanoseconds);
+ }
+
+ for(i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+ {
+ const IMG_CHAR *pszWhich;
+
+ switch(i)
+ {
+ case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+ pszWhich = "Preceding page";
+ break;
+ case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+ pszWhich = "Faulted page";
+ break;
+ case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+ pszWhich = "Next page";
+ break;
+ }
+
+ PVR_DUMPDEBUG_LOG("%s:", pszWhich);
+ _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile,
+ &psInfo->sProcessInfo,
+ &psInfo->asQueryOut[i]);
+ }
+}
+
+#endif
+
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDumpRGXBIFBank
+
+ @Description
+
+ Dump BIF Bank state in human readable form.
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+ @Input eBankID - BIF identifier
+ @Input ui64MMUStatus - MMU Status register value
+ @Input ui64ReqStatus - BIF request Status register value
+ @Input ui64PCAddress - Page catalogue base address of faulting access
+ @Input ui64CRTimer - RGX CR timer value at time of page fault
+ @Input bSummary - Flag to check whether the function is called
+ as a part of the debug dump summary or
+ as a part of a HWR log
+ @Return void
+
+******************************************************************************/
+static void _RGXDumpRGXBIFBank(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXDBG_BIF_ID eBankID,
+ IMG_UINT64 ui64MMUStatus,
+ IMG_UINT64 ui64ReqStatus,
+ IMG_UINT64 ui64PCAddress,
+ IMG_UINT64 ui64CRTimer,
+ IMG_BOOL bSummary)
+{
+ IMG_CHAR *pszIndent = (bSummary ? "" : " ");
+
+ if (ui64MMUStatus == 0x0)
+ {
+ PVR_DUMPDEBUG_LOG("%s - OK", pszBIFNames[eBankID]);
+ }
+ else
+ {
+ IMG_DEV_VIRTADDR sFaultDevVAddr;
+ IMG_DEV_PHYADDR sPCDevPAddr = { 0 };
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ IMG_BOOL bFound = IMG_FALSE;
+ RGXMEM_PROCESS_INFO sProcessInfo;
+ IMG_UINT32 ui32PageSizeBytes;
+ FAULT_INFO *psInfo;
+#endif
+ /* Bank 0 & 1 share the same fields */
+ PVR_DUMPDEBUG_LOG("%s%s - FAULT:",
+ pszIndent,
+ pszBIFNames[eBankID]);
+
+ /* MMU Status */
+ {
+ IMG_UINT32 ui32PC =
+ (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT;
+
+ IMG_UINT32 ui32PageSize =
+ (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT;
+
+ IMG_UINT32 ui32MMUDataType =
+ (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT;
+
+ IMG_BOOL bROFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN) != 0;
+ IMG_BOOL bProtFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN) != 0;
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ ui32PageSizeBytes = _PageSizeHWToBytes(ui32PageSize);
+#endif
+
+ PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016llX): PC = %d%s, Page Size = %d, MMU data type = %d%s%s.",
+ pszIndent,
+ ui64MMUStatus,
+ ui32PC,
+ (ui32PC < 0x8)?"":_RGXDecodePMPC(ui32PC),
+ ui32PageSize,
+ ui32MMUDataType,
+ (bROFault)?", Read Only fault":"",
+ (bProtFault)?", PM/META protection fault":"");
+ }
+
+ /* Req Status */
+ {
+ IMG_CHAR *pszTagID;
+ IMG_CHAR *pszTagSB;
+ IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+
+ IMG_BOOL bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN) != 0;
+ IMG_UINT32 ui32TagSB =
+ (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT;
+ IMG_UINT32 ui32TagID =
+ (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT;
+ IMG_UINT64 ui64Addr = ((ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT) <<
+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT;
+
+ /* RNW bit offset is different. The TAG_SB, TAG_ID and address fields are the same. */
+ if( (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK) && (eBankID == RGXDBG_DPX_BIF))
+ {
+ bRead = (ui64ReqStatus & DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN) != 0;
+ }
+
+ _RGXDecodeBIFReqTags(psDevInfo, eBankID, ui32TagID, ui32TagSB, &pszTagID, &pszTagSB, &aszScratch[0], RGX_DEBUG_STR_SIZE);
+
+ PVR_DUMPDEBUG_LOG("%s * Request (0x%016llX): %s (%s), %s 0x%010llX.",
+ pszIndent,
+ ui64ReqStatus,
+ pszTagID,
+ pszTagSB,
+ (bRead)?"Reading from":"Writing to",
+ ui64Addr);
+ }
+
+ /* Check if the host thinks this fault is valid */
+
+ sFaultDevVAddr.uiAddr = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK);
+
+ if (bSummary)
+ {
+ IMG_UINT32 ui32PC =
+ (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT;
+
+ /* Only the first 8 cat bases are application memory contexts which we can validate... */
+ if (ui32PC < 8)
+ {
+ sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32PC));
+ PVR_DUMPDEBUG_LOG("%sAcquired live PC address: 0x%016llX", pszIndent, sPCDevPAddr.uiAddr);
+ }
+ else
+ {
+ sPCDevPAddr.uiAddr = RGXFWIF_INVALID_PC_PHYADDR;
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("%sFW logged fault using PC Address: 0x%016llX", pszIndent, ui64PCAddress);
+ sPCDevPAddr.uiAddr = ui64PCAddress;
+ }
+
+ if (bSummary)
+ {
+ PVR_DUMPDEBUG_LOG("%sChecking faulting address 0x%010llX", pszIndent, sFaultDevVAddr.uiAddr);
+ RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+
+ /* look to see if we have already processed this fault.
+ * if so then use the previously acquired information.
+ */
+ OSLockAcquire(psDevInfo->hDebugFaultInfoLock);
+ psInfo = _QueryFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, sFaultDevVAddr, ui64CRTimer);
+
+ if(psInfo == NULL)
+ {
+ if(sPCDevPAddr.uiAddr != RGXFWIF_INVALID_PC_PHYADDR)
+ {
+ /* look up the process details for the faulting page catalogue */
+ bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo);
+
+ if(bFound)
+ {
+ IMG_BOOL bHits;
+
+ psInfo = _AcquireNextFaultInfoElement();
+
+ /* get any DevicememHistory data for the faulting address */
+ bHits = _GetDevicememHistoryData(sProcessInfo.uiPID,
+ sFaultDevVAddr,
+ psInfo->asQueryOut,
+ ui32PageSizeBytes);
+
+ if(bHits)
+ {
+ _CommitFaultInfo(psDevInfo,
+ psInfo,
+ &sProcessInfo,
+ sFaultDevVAddr,
+ ui64CRTimer);
+ }
+ else
+ {
+ /* no hits, so no data to present */
+ PVR_DUMPDEBUG_LOG("%sNo matching Devmem History for fault address", pszIndent);
+ psInfo = NULL;
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("%sCould not find PID for PC 0x%016llX", pszIndent, sPCDevPAddr.uiAddr);
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("%sPage fault not applicable to Devmem History", pszIndent);
+ }
+ }
+
+ if(psInfo != NULL)
+ {
+ _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, NULL);
+ }
+
+ OSLockRelease(psDevInfo->hDebugFaultInfoLock);
+#endif
+
+ }
+
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDumpRGXMMUFaultStatus
+
+ @Description
+
+ Dump MMU Fault status in human readable form.
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+ @Input ui64MMUStatus - MMU Status register value
+ @Input ui64PCAddress - Page catalogue base address of faulting access
+ @Input ui64CRTimer - RGX CR timer value at time of page fault
+ @Input bIsMetaMMUStatus - Is the status from MMU_FAULT_STATUS or MMU_FAULT_STATUS_META.
+ @Input bSummary - Flag to check whether the function is called
+ as a part of the debug dump summary or
+ as a part of a HWR log
+ @Return void
+
+******************************************************************************/
+static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT64 ui64MMUStatus,
+ IMG_UINT64 ui64PCAddress,
+ IMG_UINT64 ui64CRTimer,
+ IMG_BOOL bIsMetaMMUStatus,
+ IMG_BOOL bSummary)
+{
+ IMG_CHAR *pszMetaOrCore = (bIsMetaMMUStatus ? "Meta" : "Core");
+ IMG_CHAR *pszIndent = (bSummary ? "" : " ");
+
+ if (ui64MMUStatus == 0x0)
+ {
+ PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore);
+ }
+ else
+ {
+ IMG_UINT32 ui32PC = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT;
+ IMG_UINT64 ui64Addr = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT) << 4; /* align shift */
+ IMG_UINT32 ui32Requester = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT;
+ IMG_UINT32 ui32SideBand = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT;
+ IMG_UINT32 ui32MMULevel = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT;
+ IMG_BOOL bRead = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_RNW_EN) != 0;
+ IMG_BOOL bFault = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_FAULT_EN) != 0;
+ IMG_BOOL bROFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x2;
+ IMG_BOOL bProtFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x3;
+ IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+ IMG_CHAR *pszTagID;
+ IMG_CHAR *pszTagSB;
+ IMG_DEV_VIRTADDR sFaultDevVAddr;
+ IMG_DEV_PHYADDR sPCDevPAddr = { 0 };
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ IMG_BOOL bFound = IMG_FALSE;
+ RGXMEM_PROCESS_INFO sProcessInfo;
+ IMG_UINT32 ui32PageSizeBytes = _PageSizeHWToBytes(0);
+ FAULT_INFO *psInfo;
+#endif
+
+ _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32SideBand, bRead, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE);
+
+ PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore);
+ PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016llX): PC = %d, %s 0x%010llX, %s (%s)%s%s%s%s.",
+ pszIndent,
+ ui64MMUStatus,
+ ui32PC,
+ (bRead)?"Reading from":"Writing to",
+ ui64Addr,
+ pszTagID,
+ pszTagSB,
+ (bFault)?", Fault":"",
+ (bROFault)?", Read Only fault":"",
+ (bProtFault)?", PM/META protection fault":"",
+ _RGXDecodeMMULevel(ui32MMULevel));
+ /* Check if the host thinks this fault is valid */
+
+ sFaultDevVAddr.uiAddr = ui64Addr;
+
+ if (bSummary)
+ {
+ /*
+ * The first 7 or 8 cat bases are memory contexts used for PM
+ * or firmware. The rest are application contexts.
+ *
+ * It is not possible for the host to obtain the cat base
+ * address while the FW is running (since the cat bases are
+ * indirectly accessed), but in the case of the 'live' PC
+ * we can see if the FW has already logged it in the HWR log.
+ */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ if (ui32PC > 7)
+#else
+ if (ui32PC > 6)
+#endif
+ {
+ IMG_UINT32 ui32LatestHWRNumber = 0;
+ IMG_UINT64 ui64LatestMMUStatus = 0;
+ IMG_UINT64 ui64LatestPCAddress = 0;
+ IMG_UINT32 ui32HWRIndex;
+
+ for (ui32HWRIndex = 0 ; ui32HWRIndex < RGXFWIF_HWINFO_MAX ; ui32HWRIndex++)
+ {
+ RGX_HWRINFO *psHWRInfo = &psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex];
+
+ if (psHWRInfo->ui32HWRNumber > ui32LatestHWRNumber &&
+ psHWRInfo->eHWRType == RGX_HWRTYPE_MMUFAULT)
+ {
+ ui32LatestHWRNumber = psHWRInfo->ui32HWRNumber;
+ ui64LatestMMUStatus = psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus;
+ ui64LatestPCAddress = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress;
+ }
+ }
+
+ if (ui64LatestMMUStatus == ui64MMUStatus && ui64LatestPCAddress != 0)
+ {
+ sPCDevPAddr.uiAddr = ui64LatestPCAddress;
+ PVR_DUMPDEBUG_LOG("%sLocated PC address: 0x%016llX", pszIndent, sPCDevPAddr.uiAddr);
+ }
+ }
+ else
+ {
+ sPCDevPAddr.uiAddr = RGXFWIF_INVALID_PC_PHYADDR;
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("%sFW logged fault using PC Address: 0x%016llX",
+ pszIndent, ui64PCAddress);
+ sPCDevPAddr.uiAddr = ui64PCAddress;
+ }
+
+ if (bSummary && sPCDevPAddr.uiAddr != 0)
+ {
+ PVR_DUMPDEBUG_LOG("%sChecking faulting address 0x%010llX",
+ pszIndent, sFaultDevVAddr.uiAddr);
+ RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ /* look to see if we have already processed this fault.
+ * if so then use the previously acquired information.
+ */
+ OSLockAcquire(psDevInfo->hDebugFaultInfoLock);
+ psInfo = _QueryFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, sFaultDevVAddr, ui64CRTimer);
+
+ if(psInfo == NULL)
+ {
+ if(sPCDevPAddr.uiAddr != RGXFWIF_INVALID_PC_PHYADDR)
+ {
+ /* look up the process details for the faulting page catalogue */
+ bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo);
+
+ if(bFound)
+ {
+ IMG_BOOL bHits;
+
+ psInfo = _AcquireNextFaultInfoElement();
+
+ /* get any DevicememHistory data for the faulting address */
+ bHits = _GetDevicememHistoryData(sProcessInfo.uiPID,
+ sFaultDevVAddr,
+ psInfo->asQueryOut,
+ ui32PageSizeBytes);
+
+ if(bHits)
+ {
+ _CommitFaultInfo(psDevInfo,
+ psInfo,
+ &sProcessInfo,
+ sFaultDevVAddr,
+ ui64CRTimer);
+ }
+ else
+ {
+ /* no hits, so no data to present */
+ PVR_DUMPDEBUG_LOG("%sNo matching Devmem History for fault address", pszIndent);
+ psInfo = NULL;
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("%sCould not find PID for PC 0x%016llX",
+ pszIndent, sPCDevPAddr.uiAddr);
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("%sPage fault not applicable to Devmem History",
+ pszIndent);
+ }
+ }
+
+ if(psInfo != NULL)
+ {
+ _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, NULL);
+ }
+
+ OSLockRelease(psDevInfo->hDebugFaultInfoLock);
+#endif
+ }
+}
+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+
+
+
+#if !defined(NO_HARDWARE)
+static PVRSRV_ERROR _RGXMipsExtraDebug(PVRSRV_RGXDEV_INFO *psDevInfo, PVRSRV_DEVICE_CONFIG *psDevConfig, RGX_MIPS_STATE *psMIPSState)
+{
+ void *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+ IMG_UINT32 ui32RegRead;
+ IMG_UINT32 eError = PVRSRV_OK;
+ /* This pointer contains a kernel mapping of a particular memory area shared
+ between the driver and the firmware. This area is used for exchanging info
+ about the internal state of the MIPS*/
+ IMG_UINT32 *pui32NMIMemoryPointer;
+ IMG_UINT32 *pui32NMIPageBasePointer;
+ IMG_BOOL bValid;
+ IMG_CPU_PHYADDR sCPUPhyAddrStart;
+ IMG_CPU_PHYADDR sCPUPhyAddrEnd;
+ PMR *psPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+
+ /* Map the FW code area to the kernel */
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+ (void **)&pui32NMIMemoryPointer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_RGXMipsExtraDebug: Failed to acquire NMI shared memory area (%u)", eError));
+ goto map_error_fail;
+ }
+
+ eError = PMR_CpuPhysAddr(psPMR,
+ RGXMIPSFW_LOG2_PAGE_SIZE,
+ 1,
+ RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE,
+ &sCPUPhyAddrStart,
+ &bValid);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXBootldrDataInit: PMR_CpuPhysAddr failed (%u)",
+ eError));
+ return eError;
+ }
+
+ sCPUPhyAddrEnd.uiAddr = sCPUPhyAddrStart.uiAddr + RGXMIPSFW_PAGE_SIZE;
+
+ /* Jump to the boot/NMI data page */
+ pui32NMIMemoryPointer += RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE);
+ pui32NMIPageBasePointer = pui32NMIMemoryPointer;
+
+ /* Jump to the NMI shared data area within the page above */
+ pui32NMIMemoryPointer += RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_NMI_SHARED_DATA_BASE);
+
+ /* Acquire the NMI operations lock */
+ OSLockAcquire(psDevInfo->hNMILock);
+
+ /* Make sure the synchronization flag is set to 0 */
+ pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET] = 0;
+
+ /* Flush out the dirty locations of the NMI page */
+ OSFlushCPUCacheRangeKM(PMR_DeviceNode(psPMR),
+ pui32NMIPageBasePointer,
+ pui32NMIPageBasePointer + RGXMIPSFW_PAGE_SIZE/(sizeof(IMG_UINT32)),
+ sCPUPhyAddrStart,
+ sCPUPhyAddrEnd);
+
+ /* Enable NMI issuing in the MIPS wrapper */
+ OSWriteHWReg64(pvRegsBaseKM,
+ RGX_CR_MIPS_WRAPPER_NMI_ENABLE,
+ RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN);
+
+ /* Check the MIPS is not in error state already (e.g. it is booting or an NMI has already been requested) */
+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_EXCEPTION_STATUS);
+ if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) || (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN))
+ {
+
+ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+ goto fail;
+ }
+ ui32RegRead = 0;
+
+ /* Issue NMI */
+ OSWriteHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_WRAPPER_NMI_EVENT,
+ RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN);
+
+
+ /* Wait for NMI Taken to be asserted */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_EXCEPTION_STATUS);
+ if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) == 0)
+ {
+ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+ goto fail;
+ }
+ ui32RegRead = 0;
+
+ /* Allow the firmware to proceed */
+ pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET] = 1;
+
+ /* Flush out the dirty locations of the NMI page */
+ OSFlushCPUCacheRangeKM(PMR_DeviceNode(psPMR),
+ pui32NMIPageBasePointer,
+ pui32NMIPageBasePointer + RGXMIPSFW_PAGE_SIZE/(sizeof(IMG_UINT32)),
+ sCPUPhyAddrStart,
+ sCPUPhyAddrEnd);
+
+ /* Wait for the FW to have finished the NMI routine */
+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_EXCEPTION_STATUS);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_EXCEPTION_STATUS);
+ if (!(ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN))
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN)
+ {
+ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+ goto fail;
+ }
+ ui32RegRead = 0;
+
+ /* Copy state */
+ OSDeviceMemCopy(psMIPSState, pui32NMIMemoryPointer + RGXMIPSFW_NMI_STATE_OFFSET, sizeof(*psMIPSState));
+
+ --(psMIPSState->ui32ErrorEPC);
+ --(psMIPSState->ui32EPC);
+
+ /* Disable NMI issuing in the MIPS wrapper */
+ OSWriteHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_WRAPPER_NMI_ENABLE,
+ 0);
+
+fail:
+ /* Release the NMI operations lock */
+ OSLockRelease(psDevInfo->hNMILock);
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+map_error_fail:
+ return eError;
+}
+
+/* Print decoded information from cause register */
+static void _RGXMipsDumpCauseDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile, IMG_UINT32 ui32Cause)
+{
+#define INDENT " "
+ const IMG_UINT32 ui32ExcCode = RGXMIPSFW_C0_CAUSE_EXCCODE(ui32Cause);
+ const IMG_CHAR * const pszException = apszMIPSExcCodes[ui32ExcCode];
+
+ if (pszException != NULL)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Cause exception: %s", pszException);
+ }
+
+ /* IP Bits */
+ {
+ IMG_UINT32 ui32HWIRQStatus = RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(ui32Cause);
+ IMG_UINT32 i;
+
+ for (i = 0; i < RGXMIPSFW_C0_NBHWIRQ; ++i)
+ {
+ if (ui32HWIRQStatus & (1 << i))
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Hardware interrupt %d pending", i);
+ /* Can there be more than one HW irq pending or should we break? */
+ }
+ }
+ }
+
+ if (ui32Cause & RGXMIPSFW_C0_CAUSE_FDCIPENDING)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "FDC interrupt pending");
+ }
+
+ if (ui32Cause & RGXMIPSFW_C0_CAUSE_IV)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Interrupt uses special interrupt vector");
+ }
+
+ if (ui32Cause & RGXMIPSFW_C0_CAUSE_PCIPENDING)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Performance Counter Interrupt pending");
+ }
+
+ /* Unusable Coproc exception */
+ if (ui32ExcCode == 11)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Unusable Coprocessor: %d", RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(ui32Cause));
+ }
+
+ if (ui32Cause & RGXMIPSFW_C0_CAUSE_TIPENDING)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Timer Interrupt pending");
+ }
+
+#undef INDENT
+}
+
+static void _RGXMipsDumpDebugDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile, IMG_UINT32 ui32Debug, IMG_UINT32 ui32DEPC)
+{
+ const IMG_CHAR *pszDException = NULL;
+ IMG_UINT32 i;
+#define INDENT " "
+
+ if (!(ui32Debug & RGXMIPSFW_C0_DEBUG_DM))
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Debug Mode is OFF");
+ return;
+ }
+
+ pszDException = apszMIPSExcCodes[RGXMIPSFW_C0_DEBUG_EXCCODE(ui32Debug)];
+
+ if (pszDException != NULL)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Debug exception: %s", pszDException);
+ }
+
+ for (i = 0; i < IMG_ARR_NUM_ELEMS(sMIPS_C0_DebugTable); ++i)
+ {
+ const RGXMIPSFW_C0_DEBUG_TBL_ENTRY * const psDebugEntry = &sMIPS_C0_DebugTable[i];
+
+ if (ui32Debug & psDebugEntry->ui32Mask)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "%s", psDebugEntry->pszExplanation);
+ }
+ }
+#undef INDENT
+ PVR_DUMPDEBUG_LOG("DEPC :0x%08X", ui32DEPC);
+}
+
+static inline void _RGXMipsDumpTLBEntry(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile, const RGX_MIPS_TLB_ENTRY *psEntry, IMG_UINT32 ui32Index)
+{
+#define INDENT " "
+#define DUMP_TLB_LO(ENTRY_LO, ENTRY_NUM) \
+ PVR_DUMPDEBUG_LOG(INDENT "EntryLo" #ENTRY_NUM \
+ ":%s PFN = 0x%05X, %s%s", \
+ apszPermissionInhibit[RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO)], \
+ RGXMIPSFW_TLB_GET_PFN(ENTRY_LO), \
+ apszCoherencyTBL[RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO)], \
+ apszDirtyGlobalValid[RGXMIPSFW_TLB_GET_DGV(ENTRY_LO)])
+
+ static const IMG_CHAR * const apszPermissionInhibit[4] =
+ {
+ "",
+ " XI,",
+ " RI,",
+ " RI/XI,"
+ };
+
+ static const IMG_CHAR * const apszCoherencyTBL[8] =
+ {
+ "Cacheable",
+ "Cacheable",
+ "Uncached",
+ "Cacheable",
+ "Cacheable",
+ "Cacheable",
+ "Cacheable",
+ "Uncached"
+ };
+
+ static const IMG_CHAR * const apszDirtyGlobalValid[8] =
+ {
+ "",
+ ", V",
+ ", G",
+ ", GV",
+ ", D",
+ ", DV",
+ ", DG",
+ ", DGV"
+ };
+
+ PVR_DUMPDEBUG_LOG("Entry %u, Page Mask: 0x%04X, EntryHi: VPN2 = 0x%05X", ui32Index, RGXMIPSFW_TLB_GET_MASK(psEntry->ui32TLBPageMask),
+ RGXMIPSFW_TLB_GET_VPN2(psEntry->ui32TLBHi));
+
+ DUMP_TLB_LO(psEntry->ui32TLBLo0, 0);
+
+ DUMP_TLB_LO(psEntry->ui32TLBLo1, 1);
+
+#undef DUMP_TLB_LO
+}
+
+#endif /* defined(RGX_FEATURE_MIPS) && !defined(NO_HARDWARE) */
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDumpFWAssert
+
+ @Description
+
+ Dump FW assert strings when a thread asserts.
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer
+
+ @Return void
+
+******************************************************************************/
+static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+ IMG_CHAR *pszTraceAssertPath;
+ IMG_CHAR *pszTraceAssertInfo;
+ IMG_INT32 ui32TraceAssertLine;
+ IMG_UINT32 i;
+
+ for (i = 0; i < RGXFW_THREAD_NUM; i++)
+ {
+ pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath;
+ pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo;
+ ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum;
+
+ /* print non null assert strings */
+ if (*pszTraceAssertInfo)
+ {
+ PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)",
+ i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine);
+ }
+ }
+}
+
+static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+ IMG_UINT32 i;
+ for (i = 0; i < RGXFW_THREAD_NUM; i++)
+ {
+ if (psRGXFWIfTraceBufCtl->aui32CrPollAddr[i])
+ {
+ PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)",
+ i,
+ ((psRGXFWIfTraceBufCtl->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+ psRGXFWIfTraceBufCtl->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET,
+ psRGXFWIfTraceBufCtl->aui32CrPollMask[i]);
+ }
+ }
+
+}
+
+static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_BOOL bAnyLocked = IMG_FALSE;
+ IMG_UINT32 dm, i;
+ IMG_UINT32 ui32LineSize;
+ IMG_CHAR *pszLine, *pszTemp;
+ IMG_CHAR *apszDmNames[] = { "GP(", "TDM(", "TA(", "3D(", "CDM(",
+ "RTU(", "SHG(",NULL };
+
+ const IMG_CHAR *pszMsgHeader = "Number of HWR: ";
+ IMG_CHAR *pszLockupType = "";
+ RGXFWIF_HWRINFOBUF *psHWInfoBuf = psDevInfo->psRGXFWIfHWRInfoBuf;
+ RGX_HWRINFO *psHWRInfo;
+ IMG_UINT32 ui32MsgHeaderSize = OSStringLength(pszMsgHeader);
+ IMG_UINT32 ui32HWRRecoveryFlags;
+ IMG_UINT32 ui32ReadIndex;
+
+ if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ apszDmNames[RGXFWIF_DM_TDM] = "2D(";
+ }
+
+ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+ {
+ if (psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[dm] ||
+ psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[dm])
+ {
+ bAnyLocked = IMG_TRUE;
+ break;
+ }
+ }
+
+ if (!bAnyLocked && (psRGXFWIfTraceBufCtl->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK))
+ {
+ /* No HWR situation, print nothing */
+ return;
+ }
+
+ ui32LineSize = sizeof(IMG_CHAR) * ( ui32MsgHeaderSize +
+ (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*( 4/*DM name + left parenthesis*/ +
+ 10/*UINT32 max num of digits*/ +
+ 1/*slash*/ +
+ 10/*UINT32 max num of digits*/ +
+ 3/*right parenthesis + comma + space*/)) +
+ 7 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6)/* FALSE() + (UINT16 max num + comma) per DM */ +
+ 1/* \0 */);
+
+ pszLine = OSAllocMem(ui32LineSize);
+ if (pszLine == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_RGXDumpRGXDebugSummary: Out of mem allocating line string (size: %d)", ui32LineSize));
+ return;
+ }
+
+ OSStringCopy(pszLine,pszMsgHeader);
+ pszTemp = pszLine + ui32MsgHeaderSize;
+
+ for (dm = 0; (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount) && (apszDmNames[dm] != NULL); dm++)
+ {
+ OSStringCopy(pszTemp,apszDmNames[dm]);
+ pszTemp += OSStringLength(apszDmNames[dm]);
+ pszTemp += OSSNPrintf(pszTemp,
+ 10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1 /* UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */,
+ "%u/%u+%u), ",
+ psRGXFWIfTraceBufCtl->aui32HwrDmRecoveredCount[dm],
+ psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[dm],
+ psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[dm]);
+ }
+
+ OSStringCopy(pszTemp, "FALSE(");
+ pszTemp += 6;
+
+ for (dm = 0; (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount) && (apszDmNames[dm] != NULL); dm++)
+ {
+ pszTemp += OSSNPrintf(pszTemp,
+ 10 + 1 + 1 /* UINT32 max num + comma + \0 */,
+ (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1 ? "%u," : "%u)"),
+ psRGXFWIfTraceBufCtl->aui32HwrDmFalseDetectCount[dm]);
+ }
+
+ PVR_DUMPDEBUG_LOG(pszLine);
+
+ OSFreeMem(pszLine);
+
+ /* Print out per HWR info */
+ for (dm = 0; (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount) && (apszDmNames[dm] != NULL); dm++)
+ {
+ if (dm == RGXFWIF_DM_GP)
+ {
+ PVR_DUMPDEBUG_LOG("DM %d (GP)", dm);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x)", dm, psRGXFWIfTraceBufCtl->aui32HWRRecoveryFlags[dm]);
+ }
+
+ ui32ReadIndex = 0;
+ for(i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+ {
+ psHWRInfo = &psHWInfoBuf->sHWRInfo[ui32ReadIndex];
+
+ if((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0))
+ {
+ IMG_CHAR aui8RecoveryNum[10+10+1];
+ IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+ /* Split OS timestamp in seconds and nanoseconds */
+ ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds);
+
+ ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags;
+ if(ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; }
+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; }
+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; }
+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Innocent Overrun"; }
+
+ OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber);
+ PVR_DUMPDEBUG_LOG(" %s PID = %d, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+ aui8RecoveryNum,
+ psHWRInfo->ui32PID,
+ psHWRInfo->ui32FrameNum,
+ psHWRInfo->ui32ActiveHWRTData,
+ psHWRInfo->ui32EventStatus,
+ pszLockupType);
+ pszTemp = &aui8RecoveryNum[0];
+ while (*pszTemp != '\0')
+ {
+ *pszTemp++ = ' ';
+ }
+ PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012llX, OSTimer = %llu.%09llu, CyclesElapsed = %lld",
+ aui8RecoveryNum,
+ psHWRInfo->ui64CRTimer,
+ ui64Seconds,
+ ui64Nanoseconds,
+ (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256);
+ if (psHWRInfo->ui64CRTimeHWResetFinish != 0)
+ {
+ if (psHWRInfo->ui64CRTimeFreelistReady != 0)
+ {
+ PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %lld, HWResetTimeInCycles = %lld, FreelistReconTimeInCycles = %lld, TotalRecoveryTimeInCycles = %lld",
+ aui8RecoveryNum,
+ (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+ (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256,
+ (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %lld, HWResetTimeInCycles = %lld, TotalRecoveryTimeInCycles = %lld",
+ aui8RecoveryNum,
+ (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256);
+ }
+ }
+
+ switch(psHWRInfo->eHWRType)
+ {
+ case RGX_HWRTYPE_BIF0FAULT:
+ case RGX_HWRTYPE_BIF1FAULT:
+ {
+ if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXFWIF_HWRTYPE_BIF_BANK_GET(psHWRInfo->eHWRType),
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+ psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress,
+ psHWRInfo->ui64CRTimer,
+ IMG_FALSE);
+ }
+ }
+ break;
+ case RGX_HWRTYPE_TEXASBIF0FAULT:
+ {
+ if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+ {
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF,
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+ psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress,
+ psHWRInfo->ui64CRTimer,
+ IMG_FALSE);
+ }
+ }
+ }
+ break;
+ case RGX_HWRTYPE_DPXMMUFAULT:
+ {
+ if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_DPX_BIF,
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+ psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress,
+ psHWRInfo->ui64CRTimer,
+ IMG_FALSE);
+ }
+ }
+ }
+ break;
+ case RGX_HWRTYPE_MMUFAULT:
+ {
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+ psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus,
+ psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress,
+ psHWRInfo->ui64CRTimer,
+ IMG_FALSE,
+ IMG_FALSE);
+ }
+ }
+ break;
+
+ case RGX_HWRTYPE_MMUMETAFAULT:
+ {
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+
+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+ psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus,
+ psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress,
+ psHWRInfo->ui64CRTimer,
+ IMG_TRUE,
+ IMG_FALSE);
+ }
+ }
+ break;
+
+
+ case RGX_HWRTYPE_POLLFAILURE:
+ {
+ PVR_DUMPDEBUG_LOG(" T%u polling %s (reg:0x%08X mask:0x%08X)",
+ psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum,
+ ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+ psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET,
+ psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask);
+ }
+ break;
+
+ case RGX_HWRTYPE_OVERRUN:
+ case RGX_HWRTYPE_UNKNOWNFAILURE:
+ {
+ /* Nothing to dump */
+ }
+ break;
+
+ default:
+ {
+ PVR_ASSERT(IMG_FALSE);
+ }
+ break;
+ }
+ }
+
+ if(ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1)
+ ui32ReadIndex = psHWInfoBuf->ui32WriteIndex;
+ else
+ ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST;
+ }
+ }
+}
+
+#if !defined(NO_HARDWARE)
+
+/*!
+*******************************************************************************
+
+ @Function _CheckForPendingPage
+
+ @Description
+
+ Check if the MMU indicates it is blocked on a pending page
+
+ @Input psDevInfo - RGX device info
+
+ @Return IMG_BOOL - IMG_TRUE if there is a pending page
+
+******************************************************************************/
+static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 ui32BIFMMUEntry;
+
+ ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY);
+
+ if(ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN)
+ {
+ return IMG_TRUE;
+ }
+ else
+ {
+ return IMG_FALSE;
+ }
+}
+
+/*!
+*******************************************************************************
+
+ @Function _GetPendingPageInfo
+
+ @Description
+
+ Get information about the pending page from the MMU status registers
+
+ @Input psDevInfo - RGX device info
+ @Output psDevVAddr - The device virtual address of the pending MMU address translation
+ @Output pui32CatBase - The page catalog base
+ @Output pui32DataType - The MMU entry data type
+
+ @Return void
+
+******************************************************************************/
+static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_UINT32 *pui32CatBase,
+ IMG_UINT32 *pui32DataType)
+{
+ IMG_UINT64 ui64BIFMMUEntryStatus;
+
+ ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY_STATUS);
+
+ psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK);
+
+ *pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK) >>
+ RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT;
+
+ *pui32DataType = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK) >>
+ RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT;
+}
+
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDumpRGXDebugSummary
+
+ @Description
+
+ Dump a summary in human readable form with the RGX state
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+ @Input bRGXPoweredON - IMG_TRUE if RGX device is on
+
+ @Return void
+
+******************************************************************************/
+static void _RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_BOOL bRGXPoweredON)
+{
+ IMG_CHAR *pszState, *pszReason;
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_UINT32 ui32OSid;
+
+#if defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(bRGXPoweredON);
+#else
+ if (bRGXPoweredON)
+ {
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+
+ IMG_UINT64 ui64RegValMMUStatus;
+
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS);
+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, 0, 0, IMG_FALSE, IMG_TRUE);
+
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META);
+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, 0, 0, IMG_TRUE, IMG_TRUE);
+ }else
+ {
+ IMG_UINT64 ui64RegValMMUStatus, ui64RegValREQStatus;
+
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_MMU_STATUS);
+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_REQ_STATUS);
+
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF0, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+
+ if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SINGLE_BIF_BIT_MASK))
+ {
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_MMU_STATUS);
+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_REQ_STATUS);
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF1, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+ {
+ IMG_UINT32 ui32PhantomCnt = RGX_GET_NUM_PHANTOMS(psDevInfo->sDevFeatureCfg.ui32NumClusters);
+
+ if(ui32PhantomCnt > 1)
+ {
+ IMG_UINT32 ui32Phantom;
+ for (ui32Phantom = 0; ui32Phantom < ui32PhantomCnt; ui32Phantom++)
+ {
+ /* This can't be done as it may interfere with the FW... */
+ /*OSWriteHWReg64(RGX_CR_TEXAS_INDIRECT, ui32Phantom);*/
+
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+ }
+ }else
+ {
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+ }
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, DPX_CR_BIF_FAULT_BANK_MMU_STATUS);
+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, DPX_CR_BIF_FAULT_BANK_REQ_STATUS);
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_DPX_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+ }
+
+ }
+
+ if(_CheckForPendingPage(psDevInfo))
+ {
+ IMG_UINT32 ui32CatBase;
+ IMG_UINT32 ui32DataType;
+ IMG_DEV_VIRTADDR sDevVAddr;
+
+ PVR_DUMPDEBUG_LOG("MMU Pending page: Yes");
+
+ _GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase, &ui32DataType);
+
+ if(ui32CatBase >= 8)
+ {
+ PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase);
+ }
+ else
+ {
+ IMG_DEV_PHYADDR sPCDevPAddr;
+
+ sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase));
+
+ PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC
+ " on cat base %u. PC Addr = 0x%llX",
+ (unsigned long long) sDevVAddr.uiAddr,
+ ui32CatBase,
+ (unsigned long long) sPCDevPAddr.uiAddr);
+ RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ }
+ }
+#endif /* NO_HARDWARE */
+
+ /* Firmware state */
+ switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus))
+ {
+ case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszState = "OK"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszState = "NOT RESPONDING"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszState = "DEAD"; break;
+ default: pszState = "UNKNOWN"; break;
+ }
+
+ switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason))
+ {
+ case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " - FW Assert"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " - Poll failure"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " - Global Event Object timeouts rising"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " - KCCB offset invalid"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " - KCCB stalled"; break;
+ default: pszReason = " - Unknown reason"; break;
+ }
+
+ if (psRGXFWIfTraceBuf == NULL)
+ {
+ PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason);
+
+ /* can't dump any more information */
+ return;
+ }
+
+ PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x)", pszState, pszReason, psRGXFWIfTraceBuf->ui32HWRStateFlags);
+ PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d other, %d total)",
+ pszPowStateName[psRGXFWIfTraceBuf->ePowState],
+ (psDevInfo->pvAPMISRData)?"enabled":"disabled",
+ psDevInfo->ui32ActivePMReqOk,
+ psDevInfo->ui32ActivePMReqDenied,
+ psDevInfo->ui32ActivePMReqTotal - psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqDenied,
+ psDevInfo->ui32ActivePMReqTotal);
+
+ for (ui32OSid = 0; ui32OSid < RGXFW_NUM_OS; ui32OSid++)
+ {
+ IMG_UINT32 ui32OSStateFlags = psRGXFWIfTraceBuf->ui32OSStateFlags[ui32OSid];
+
+ PVR_DUMPDEBUG_LOG("RGX FW OS %u State: 0x%08x (Active: %s%s, Freelists: %s)", ui32OSid, ui32OSStateFlags,
+ ((ui32OSStateFlags & RGXFW_OS_STATE_ACTIVE_OS) != 0)?"Yes":"No",
+ ((ui32OSStateFlags & RGXFW_OS_STATE_OFFLOADING) != 0)?"- offloading":"",
+ ((ui32OSStateFlags & RGXFW_OS_STATE_FREELIST_OK) != 0)?"Ok":"Not Ok"
+ );
+ }
+ _RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf);
+
+ _RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf);
+
+ _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf, psDevInfo);
+}
+
+static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+/* List of extra META Slave Port debug registers */
+#define RGX_META_SP_EXTRA_DEBUG \
+ X(RGX_CR_META_SP_MSLVCTRL0) \
+ X(RGX_CR_META_SP_MSLVCTRL1) \
+ X(RGX_CR_META_SP_MSLVDATAX) \
+ X(RGX_CR_META_SP_MSLVIRQSTATUS) \
+ X(RGX_CR_META_SP_MSLVIRQENABLE) \
+ X(RGX_CR_META_SP_MSLVIRQLEVEL)
+
+ IMG_UINT32 ui32Idx, ui32RegIdx;
+ IMG_UINT32 ui32RegVal;
+ IMG_UINT32 ui32RegAddr;
+
+ const IMG_UINT32 aui32DebugRegAddr [] = {
+#define X(A) A,
+ RGX_META_SP_EXTRA_DEBUG
+#undef X
+ };
+
+ const IMG_CHAR* apszDebugRegName [] = {
+#define X(A) #A,
+ RGX_META_SP_EXTRA_DEBUG
+#undef X
+ };
+
+ const IMG_UINT32 aui32Debug2RegAddr [] = {0xA28, 0x0A30, 0x0A38};
+
+ PVR_DUMPDEBUG_LOG("META Slave Port extra debug:");
+
+ /* dump first set of Slave Port debug registers */
+ for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++)
+ {
+ const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx];
+
+ ui32RegAddr = aui32DebugRegAddr[ui32Idx];
+ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+ PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal);
+ }
+
+ /* dump second set of Slave Port debug registers */
+ for (ui32Idx = 0; ui32Idx < 4; ui32Idx++)
+ {
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx);
+ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20);
+ PVR_DUMPDEBUG_LOG(" * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal);
+
+ }
+
+ for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++)
+ {
+ ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx];
+ for (ui32Idx = 0; ui32Idx < 2; ui32Idx++)
+ {
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx);
+ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+ PVR_DUMPDEBUG_LOG(" * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal);
+ }
+ }
+
+}
+
+void RGXDumpDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 i;
+
+ for(i=0;i<=DEBUG_REQUEST_VERBOSITY_MAX;i++)
+ {
+ RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile,
+ psDevInfo, i);
+ }
+}
+
+/*
+ * Array of all the Firmware Trace log IDs used to convert the trace data.
+ */
+typedef struct _TRACEBUF_LOG_ {
+ RGXFW_LOG_SFids eSFId;
+ IMG_CHAR *pszName;
+ IMG_CHAR *pszFmt;
+ IMG_UINT32 ui32ArgNum;
+} TRACEBUF_LOG;
+
+static TRACEBUF_LOG aLogDefinitions[] =
+{
+#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e},
+ RGXFW_LOG_SFIDLIST
+#undef X
+};
+
+#define NARGS_MASK ~(0xF<<16)
+static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ TRACEBUF_LOG *psLogDef = &aLogDefinitions[0];
+ IMG_BOOL bIntegrityOk = IMG_TRUE;
+
+ /*
+ * For every log ID, check the format string and number of arguments is valid.
+ */
+ while (psLogDef->eSFId != RGXFW_SF_LAST)
+ {
+ IMG_UINT32 ui32Count;
+ IMG_CHAR *pszString;
+ TRACEBUF_LOG *psLogDef2;
+
+ /*
+ * Check the number of arguments matches the number of '%' in the string and
+ * check that no string uses %s which is not supported as it requires a
+ * pointer to memory that is not going to be valid.
+ */
+ pszString = psLogDef->pszFmt;
+ ui32Count = 0;
+
+ while (*pszString != '\0')
+ {
+ if (*pszString++ == '%')
+ {
+ ui32Count++;
+ if (*pszString == 's')
+ {
+ bIntegrityOk = IMG_FALSE;
+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.",
+ psLogDef->pszName, *pszString);
+ }
+ else if (*pszString == '%')
+ {
+ /* Double % is a printable % sign and not a format string... */
+ ui32Count--;
+ }
+ }
+ }
+
+ if (ui32Count != psLogDef->ui32ArgNum)
+ {
+ bIntegrityOk = IMG_FALSE;
+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.",
+ psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum);
+ }
+
+ /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */
+ if (ui32Count > 20)
+ {
+ bIntegrityOk = IMG_FALSE;
+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.",
+ psLogDef->pszName, ui32Count);
+ }
+
+ /* Check the id number is unique (don't take into account the number of arguments) */
+ ui32Count = 0;
+ psLogDef2 = &aLogDefinitions[0];
+
+ while (psLogDef2->eSFId != RGXFW_SF_LAST)
+ {
+ if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK))
+ {
+ ui32Count++;
+ }
+ psLogDef2++;
+ }
+
+ if (ui32Count != 1)
+ {
+ bIntegrityOk = IMG_FALSE;
+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.",
+ psLogDef->pszName, psLogDef->eSFId, ui32Count - 1);
+ }
+
+ /* Move to the next log ID... */
+ psLogDef++;
+ }
+
+ return bIntegrityOk;
+}
+
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+ static IMG_BOOL bIntegrityCheckPassed = IMG_FALSE;
+
+ /* Check that the firmware trace is correctly defined... */
+ if (!bIntegrityCheckPassed)
+ {
+ bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile);
+ if (!bIntegrityCheckPassed)
+ {
+ return;
+ }
+ }
+
+ /* Dump FW trace information... */
+ if (psRGXFWIfTraceBufCtl != NULL)
+ {
+ IMG_UINT32 tid;
+
+ /* Print the log type settings... */
+ if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+ {
+ PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+ ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+ RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+ );
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("Debug log type: none");
+ }
+
+ /* Print the decoded log for each thread... */
+ for (tid = 0; tid < RGXFW_THREAD_NUM; tid++)
+ {
+ IMG_UINT32 *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+ IMG_UINT32 ui32TracePtr = psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer;
+ IMG_UINT32 ui32Count = 0;
+
+ if (pui32TraceBuf == NULL)
+ {
+ /* trace buffer not yet allocated */
+ continue;
+ }
+
+ while (ui32Count < RGXFW_TRACE_BUFFER_SIZE)
+ {
+ IMG_UINT32 ui32Data, ui32DataToId;
+
+ /* Find the first valid log ID, skipping whitespace... */
+ do
+ {
+ ui32Data = pui32TraceBuf[ui32TracePtr];
+ ui32DataToId = idToStringID(ui32Data);
+
+ /* If an unrecognized id is found check if it is valid, if it is tracebuf needs updating. */
+ if (ui32DataToId == RGXFW_SF_LAST && RGXFW_LOG_VALIDID(ui32Data))
+ {
+ PVR_DUMPDEBUG_LOG("ERROR: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data);
+ return;
+ }
+
+ /* Update the trace pointer... */
+ ui32TracePtr = (ui32TracePtr + 1) % RGXFW_TRACE_BUFFER_SIZE;
+ ui32Count++;
+ } while ((RGXFW_SF_LAST == ui32DataToId || ui32DataToId >= RGXFW_SF_FIRST) &&
+ ui32Count < RGXFW_TRACE_BUFFER_SIZE);
+
+ if (ui32Count < RGXFW_TRACE_BUFFER_SIZE)
+ {
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%llu:T%u-%s> ";
+ IMG_UINT64 ui64Timestamp;
+ IMG_UINT uiLen;
+
+ /* If we hit the ASSERT message then this is the end of the log... */
+ if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED)
+ {
+ PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u",
+ psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo,
+ psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath,
+ psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum);
+ break;
+ }
+
+ /*
+ * Print the trace string and provide up to 20 arguments which
+ * printf function will be able to use. We have already checked
+ * that no string uses more than this.
+ */
+ OSStringCopy(&szBuffer[OSStringLength(szBuffer)], SFs[ui32DataToId].name);
+ uiLen = OSStringLength(szBuffer);
+ szBuffer[uiLen ? uiLen - 1 : 0] = '\0';
+ ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 0) % RGXFW_TRACE_BUFFER_SIZE]) << 32 |
+ (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % RGXFW_TRACE_BUFFER_SIZE]);
+ PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)],
+ pui32TraceBuf[(ui32TracePtr + 2) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 3) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 4) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 5) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 6) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 7) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 8) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 9) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 10) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 11) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 12) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 13) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 14) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 15) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 16) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 17) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 18) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 19) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 20) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 21) % RGXFW_TRACE_BUFFER_SIZE]);
+
+ /* Update the trace pointer... */
+ ui32TracePtr = (ui32TracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data)) % RGXFW_TRACE_BUFFER_SIZE;
+ ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data));
+ }
+ }
+ }
+ }
+}
+
+static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState)
+{
+ switch (eDevState)
+ {
+ case PVRSRV_DEVICE_STATE_INIT:
+ return "Initialising";
+ case PVRSRV_DEVICE_STATE_ACTIVE:
+ return "Active";
+ case PVRSRV_DEVICE_STATE_DEINIT:
+ return "De-initialising";
+ case PVRSRV_DEVICE_STATE_UNDEFINED:
+ PVR_ASSERT(!"Device has undefined state");
+ default:
+ return "Unknown";
+ }
+}
+
+static IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState)
+{
+ switch(ePowerState)
+ {
+ case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT";
+ case PVRSRV_DEV_POWER_STATE_OFF: return "OFF";
+ case PVRSRV_DEV_POWER_STATE_ON: return "ON";
+ default: return "UNKNOWN";
+ }
+}
+
+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32VerbLevel)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+ PVRSRV_ERROR eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDebugRequestProcess : failed to acquire lock, error:0x%x", eError));
+ return;
+ }
+
+ switch (ui32VerbLevel)
+ {
+ case DEBUG_REQUEST_VERBOSITY_LOW :
+ {
+ PVRSRV_DEV_POWER_STATE ePowerState;
+ IMG_BOOL bRGXPoweredON;
+
+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDebugRequestProcess: Error retrieving RGX power state. No debug info dumped."));
+ goto Exit;
+ }
+
+ bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+ if(psPVRSRVData->sDriverInfo.bIsNoMatch)
+ {
+ PVR_DUMPDEBUG_LOG("------[ Driver Info ]------");
+ PVR_DUMP_DRIVER_INFO("UM", psPVRSRVData->sDriverInfo.sUMBuildInfo);
+ PVR_DUMP_DRIVER_INFO("KM", psPVRSRVData->sDriverInfo.sKMBuildInfo);
+ }
+
+ PVR_DUMPDEBUG_LOG("------[ RGX summary ]------");
+ PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d", psDevInfo->sDevFeatureCfg.ui32B, \
+ psDevInfo->sDevFeatureCfg.ui32V, \
+ psDevInfo->sDevFeatureCfg.ui32N, \
+ psDevInfo->sDevFeatureCfg.ui32C);
+ PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState));
+ PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState));
+
+ _RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON);
+
+ if (bRGXPoweredON)
+ {
+
+ PVR_DUMPDEBUG_LOG("------[ RGX registers ]------");
+ PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM);
+ PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr);
+
+ if(psDevInfo->sDevFeatureCfg.ui32META)
+ {
+ /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0);
+ }
+
+ eError = RGXRunScript(psDevInfo, psDevInfo->psScripts->asDbgCommands, RGX_MAX_DEBUG_COMMANDS, PDUMP_FLAGS_CONTINUOUS, pfnDumpDebugPrintf, pvDumpDebugFile);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: RGXRunScript failed (%d)", eError));
+ if(psDevInfo->sDevFeatureCfg.ui32META)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Dump Slave Port debug information"));
+ _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+ }
+ }
+#if !defined(NO_HARDWARE)
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ RGX_MIPS_STATE sMIPSState;
+ PVRSRV_ERROR eError;
+ OSCachedMemSet((void *)&sMIPSState, 0x00, sizeof(RGX_MIPS_STATE));
+ eError = _RGXMipsExtraDebug(psDevInfo, psDeviceNode->psDevConfig, &sMIPSState);
+ PVR_DUMPDEBUG_LOG("---- [ MIPS internal state ] ----");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DUMPDEBUG_LOG("MIPS extra debug not available");
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("PC :0x%08X", sMIPSState.ui32ErrorEPC);
+ PVR_DUMPDEBUG_LOG("STATUS_REGISTER :0x%08X", sMIPSState.ui32StatusRegister);
+ PVR_DUMPDEBUG_LOG("CAUSE_REGISTER :0x%08X", sMIPSState.ui32CauseRegister);
+ _RGXMipsDumpCauseDecode(pfnDumpDebugPrintf, pvDumpDebugFile, sMIPSState.ui32CauseRegister);
+ PVR_DUMPDEBUG_LOG("BAD_REGISTER :0x%08X", sMIPSState.ui32BadRegister);
+ PVR_DUMPDEBUG_LOG("EPC :0x%08X", sMIPSState.ui32EPC);
+ PVR_DUMPDEBUG_LOG("SP :0x%08X", sMIPSState.ui32SP);
+ PVR_DUMPDEBUG_LOG("BAD_INSTRUCTION :0x%08X", sMIPSState.ui32BadInstr);
+ PVR_DUMPDEBUG_LOG("DEBUG :");
+ _RGXMipsDumpDebugDecode(pfnDumpDebugPrintf, pvDumpDebugFile, sMIPSState.ui32Debug, sMIPSState.ui32DEPC);
+
+ {
+ IMG_UINT32 ui32Idx;
+
+ PVR_DUMPDEBUG_LOG("TLB :");
+ for (ui32Idx = 0;
+ ui32Idx < IMG_ARR_NUM_ELEMS(sMIPSState.asTLB);
+ ++ui32Idx)
+ {
+ _RGXMipsDumpTLBEntry(pfnDumpDebugPrintf, pvDumpDebugFile, &sMIPSState.asTLB[ui32Idx], ui32Idx);
+ }
+ }
+ }
+ PVR_DUMPDEBUG_LOG("--------------------------------");
+ }
+#endif
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" (!) RGX power is down. No registers dumped");
+ }
+
+ /* Dump out the kernel CCB. */
+ {
+ RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+ if (psKCCBCtl != NULL)
+ {
+ PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X",
+ psKCCBCtl->ui32WriteOffset,
+ psKCCBCtl->ui32ReadOffset);
+ }
+ }
+
+ /* Dump out the firmware CCB. */
+ {
+ RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl;
+
+ if (psFCCBCtl != NULL)
+ {
+ PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X",
+ psFCCBCtl->ui32WriteOffset,
+ psFCCBCtl->ui32ReadOffset);
+ }
+ }
+
+ /* Dump the KCCB commands executed */
+ {
+ PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d",
+ psDevInfo->psRGXFWIfTraceBuf->ui32KCCBCmdsExecuted);
+ }
+
+ /* Dump the IRQ info for threads*/
+ {
+ IMG_UINT32 ui32TID;
+
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+ {
+ PVR_DUMPDEBUG_LOG("RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u",
+ ui32TID,
+ psDevInfo->psRGXFWIfTraceBuf->aui32InterruptCount[ui32TID],
+ psDevInfo->aui32SampleIRQCount[ui32TID]);
+ }
+ }
+
+ /* Dump the FW config flags */
+ {
+ RGXFWIF_INIT *psRGXFWInit;
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+ (void **)&psRGXFWInit);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: Failed to acquire kernel fw if ctl (%u)",
+ eError));
+ goto Exit;
+ }
+
+ PVR_DUMPDEBUG_LOG("RGX FW config flags = 0x%X", psRGXFWInit->ui32ConfigFlags);
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ }
+
+ break;
+
+ }
+ case DEBUG_REQUEST_VERBOSITY_MEDIUM :
+ {
+ IMG_INT tid;
+ /* Dump FW trace information */
+ if (psDevInfo->psRGXFWIfTraceBuf != NULL)
+ {
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+ for ( tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++)
+ {
+ IMG_UINT32 i;
+ IMG_BOOL bPrevLineWasZero = IMG_FALSE;
+ IMG_BOOL bLineIsAllZeros = IMG_FALSE;
+ IMG_UINT32 ui32CountLines = 0;
+ IMG_UINT32 *pui32TraceBuffer;
+ IMG_CHAR *pszLine;
+
+ if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+ {
+ PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+ ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+ RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+ );
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("Debug log type: none");
+ }
+
+ pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+
+ /* Skip if trace buffer is not allocated */
+ if (pui32TraceBuffer == NULL)
+ {
+ PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid);
+ continue;
+ }
+
+ /* each element in the line is 8 characters plus a space. The '+1' is because of the final trailing '\0'. */
+ pszLine = OSAllocMem(9*RGXFW_TRACE_BUFFER_LINESIZE+1);
+ if (pszLine == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: Out of mem allocating line string (size: %d)", 9*RGXFW_TRACE_BUFFER_LINESIZE));
+ goto Exit;
+ }
+
+ PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid);
+ PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer);
+ PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", RGXFW_TRACE_BUFFER_SIZE);
+
+ for (i = 0; i < RGXFW_TRACE_BUFFER_SIZE; i += RGXFW_TRACE_BUFFER_LINESIZE)
+ {
+ IMG_UINT32 k = 0;
+ IMG_UINT32 ui32Line = 0x0;
+ IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32);
+ IMG_CHAR *pszBuf = pszLine;
+
+ for (k = 0; k < RGXFW_TRACE_BUFFER_LINESIZE; k++)
+ {
+ ui32Line |= pui32TraceBuffer[i + k];
+
+ /* prepare the line to print it. The '+1' is because of the trailing '\0' added */
+ OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]);
+ pszBuf += 9; /* write over the '\0' */
+ }
+
+ bLineIsAllZeros = (ui32Line == 0x0);
+
+ if (bLineIsAllZeros)
+ {
+ if (bPrevLineWasZero)
+ {
+ ui32CountLines++;
+ }
+ else
+ {
+ bPrevLineWasZero = IMG_TRUE;
+ ui32CountLines = 1;
+ PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset);
+ }
+ }
+ else
+ {
+ if (bPrevLineWasZero && ui32CountLines > 1)
+ {
+ PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines);
+ }
+ bPrevLineWasZero = IMG_FALSE;
+
+ PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine);
+ }
+
+ }
+ if (bPrevLineWasZero)
+ {
+ PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines);
+ }
+
+ PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid);
+
+ OSFreeMem(pszLine);
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui32META)
+ {
+ RGXFWIF_INIT *psRGXFWInit;
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+ (void **)&psRGXFWInit);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXDebugRequestProcess: Failed to acquire kernel fw if ctl (%u)",
+ eError));
+ goto Exit;
+ }
+
+ if ((psRGXFWInit->ui32ConfigFlags & RGXFWIF_INICFG_METAT1_DUMMY) != 0)
+ {
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_UINT32 *pui32T1PCX = &psRGXFWIfTraceBufCtl->ui32T1PCX[0];
+ IMG_UINT32 ui32T1PCXWOff = psRGXFWIfTraceBufCtl->ui32T1PCXWOff;
+ IMG_UINT32 i = ui32T1PCXWOff;
+
+ PVR_DUMPDEBUG_LOG("------[ FW Thread 1 PCX list (most recent first) ]------");
+ do
+ {
+ PVR_DUMPDEBUG_LOG(" 0x%08x", pui32T1PCX[i]);
+ i = (i == 0) ? (RGXFWIF_MAX_PCX - 1) : (i - 1);
+
+ } while (i != ui32T1PCXWOff);
+
+ PVR_DUMPDEBUG_LOG("------[ FW Thread 1 PCX list [END] ]------");
+ }
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ }
+ }
+
+ {
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+ PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------");
+#else
+ PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------");
+#endif
+ CheckForStalledTransferCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+ CheckForStalledRenderCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+ {
+ CheckForStalledComputeCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ CheckForStalledRayCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ }
+ break;
+ }
+ case DEBUG_REQUEST_VERBOSITY_HIGH:
+ {
+ PVRSRV_ERROR eError;
+ PVRSRV_DEV_POWER_STATE ePowerState;
+ IMG_BOOL bRGXPoweredON;
+
+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDebugRequestProcess: Error retrieving RGX power state. No debug info dumped."));
+ return;
+ }
+
+ bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+
+ PVR_DUMPDEBUG_LOG("------[ Debug summary ]------");
+
+ _RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON);
+ }
+ default:
+ break;
+ }
+
+Exit:
+ PVRSRVPowerUnlock(psDeviceNode);
+}
+#endif
+
+/*
+ RGXPanic
+*/
+void RGXPanic(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PVR_LOG(("RGX panic"));
+ PVRSRVDebugRequest(psDevInfo->psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+ NULL, NULL);
+ OSPanic();
+}
+
+
+/******************************************************************************
+ End of file (rgxdebug.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX debug header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX debugging functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXDEBUG_H__)
+#define __RGXDEBUG_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+
+
+/**
+ * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in
+ * LISR for each RGX FW thread.
+ * Macro takes pointer to PVRSRV_RGXDEV_INFO as input.
+ */
+#define RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo) \
+ do \
+ { \
+ IMG_UINT32 ui32TID; \
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) \
+ { \
+ PVR_DPF((DBGPRIV_VERBOSE, \
+ "RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u)", \
+ ui32TID, \
+ (psRgxDevInfo)->psRGXFWIfTraceBuf->aui32InterruptCount[ui32TID], \
+ (psRgxDevInfo)->aui32SampleIRQCount[ui32TID])); \
+ } \
+ } while(0)
+
+/*!
+*******************************************************************************
+
+ @Function RGXPanic
+
+ @Description
+
+ Called when an unrecoverable situation is detected. Dumps RGX debug
+ information and tells the OS to panic.
+
+ @Input psDevInfo - RGX device info
+
+ @Return void
+
+******************************************************************************/
+void RGXPanic(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDumpDebugInfo
+
+ @Description
+
+ Dump useful debugging info. Dumps lesser information than PVRSRVDebugRequest.
+ Does not dump debugging information for all requester types.(SysDebug, ServerSync info)
+
+ @Input pfnDumpDebugPrintf - Optional replacement print function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+
+ @Return void
+
+******************************************************************************/
+void RGXDumpDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDebugRequestProcess
+
+ @Description
+
+ This function will print out the debug for the specificed level of
+ verbosity
+
+ @Input pfnDumpDebugPrintf - Optional replacement print function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+ @Input ui32VerbLevel - Verbosity level
+
+ @Return void
+
+******************************************************************************/
+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32VerbLevel);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDumpFirmwareTrace
+
+ @Description Dumps the decoded version of the firmware trace buffer.
+
+ Dump useful debugging info
+
+ @Input pfnDumpDebugPrintf - Optional replacement print function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+
+ @Return void
+
+******************************************************************************/
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function RGXReadWithSP
+
+ @Description
+
+ Reads data from a memory location (FW memory map) using the META Slave Port
+
+ @Input ui32FWAddr - 32 bit FW address
+
+ @Return IMG_UINT32
+******************************************************************************/
+IMG_UINT32 RGXReadWithSP(IMG_UINT32 ui32FWAddr);
+
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+/*!
+*******************************************************************************
+
+ @Function ValidateFWImageWithSP
+
+ @Description Compare the Firmware image as seen from the CPU point of view
+ against the same memory area as seen from the META point of view
+
+ @Input psDevInfo - Device Info
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR ValidateFWImageWithSP(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif /* defined(SUPPORT_EXTRA_METASP_DEBUG) */
+
+#endif /* __RGXDEBUG_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX device node header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX device node
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXDEVICE_H__)
+#define __RGXDEVICE_H__
+
+#include "img_types.h"
+#include "pvrsrv_device_types.h"
+#include "mmu_common.h"
+#include "rgx_fwif_km.h"
+#include "rgx_fwif.h"
+#include "rgxscript.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "osfunc.h"
+#include "rgxlayer_km_impl.h"
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "hash.h"
+#endif
+typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT;
+
+typedef struct {
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ IMG_DEV_VIRTADDR *psMCUFenceAddr;
+ IMG_DEV_VIRTADDR *psResumeSignalAddr;
+} RGX_COMMON_CONTEXT_INFO;
+
+
+/*!
+ ******************************************************************************
+ * Device state flags
+ *****************************************************************************/
+#define RGXKM_DEVICE_STATE_ZERO_FREELIST (0x1 << 0) /*!< Zeroing the physical pages of reconstructed free lists */
+#define RGXKM_DEVICE_STATE_FTRACE_EN (0x1 << 1) /*!< Used to enable device FTrace thread to consume HWPerf data */
+#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN (0x1 << 2) /*!< Used to disable the Devices Watchdog logging */
+#define RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN (0x1 << 3) /*!< Used for validation to inject dust requests every TA/3D kick */
+
+/*!
+ ******************************************************************************
+ * GPU DVFS Table
+ *****************************************************************************/
+
+#define RGX_GPU_DVFS_TABLE_SIZE 100 /* DVFS Table size */
+#define RGX_GPU_DVFS_GET_INDEX(clockfreq) ((clockfreq) / 10000000) /* Assuming different GPU clocks are separated by at least 10MHz
+ * WARNING: this macro must be used only with nominal values of
+ * the GPU clock speed (the ones provided by the customer code) */
+#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US 25000 /* Time required to calibrate a clock frequency the first time */
+#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US 150000 /* Time required for a recalibration after a DVFS transition */
+#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US 10000000 /* Time before the next periodic calibration and correlation */
+
+typedef struct _RGX_GPU_DVFS_TABLE_
+{
+ IMG_UINT64 ui64CalibrationCRTimestamp; /*!< CR timestamp used to calibrate GPU frequencies (beginning of a calibration period) */
+ IMG_UINT64 ui64CalibrationOSTimestamp; /*!< OS timestamp used to calibrate GPU frequencies (beginning of a calibration period) */
+ IMG_UINT64 ui64CalibrationCRTimediff; /*!< CR timediff used to calibrate GPU frequencies (calibration period) */
+ IMG_UINT64 ui64CalibrationOSTimediff; /*!< OS timediff used to calibrate GPU frequencies (calibration period) */
+ IMG_UINT32 ui32CalibrationPeriod; /*!< Threshold used to determine whether the current GPU frequency should be calibrated */
+ IMG_UINT32 ui32CurrentDVFSId; /*!< Current table entry index */
+ IMG_BOOL bAccumulatePeriod; /*!< Accumulate many consecutive periods to get a better calibration at the end */
+ IMG_UINT32 aui32DVFSClock[RGX_GPU_DVFS_TABLE_SIZE]; /*!< DVFS clocks table (clocks in Hz) */
+} RGX_GPU_DVFS_TABLE;
+
+
+/*!
+ ******************************************************************************
+ * GPU utilisation statistics
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_GPU_UTIL_STATS_
+{
+ IMG_BOOL bValid; /* If TRUE, statistics are valid.
+ FALSE if the driver couldn't get reliable stats. */
+ IMG_UINT64 ui64GpuStatActiveHigh; /* GPU active high statistic */
+ IMG_UINT64 ui64GpuStatActiveLow; /* GPU active low (i.e. TLA active only) statistic */
+ IMG_UINT64 ui64GpuStatBlocked; /* GPU blocked statistic */
+ IMG_UINT64 ui64GpuStatIdle; /* GPU idle statistic */
+ IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */
+} RGXFWIF_GPU_UTIL_STATS;
+
+
+typedef struct _RGX_REG_CONFIG_
+{
+ IMG_BOOL bEnabled;
+ RGXFWIF_REG_CFG_TYPE eRegCfgTypeToPush;
+ IMG_UINT32 ui32NumRegRecords;
+} RGX_REG_CONFIG;
+
+typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
+
+typedef struct
+{
+ IMG_UINT32 ui32DustCount1;
+ IMG_UINT32 ui32DustCount2;
+ IMG_BOOL bToggle;
+} RGX_DUST_STATE;
+
+typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_
+{
+ IMG_UINT64 ui64ErnsBrns;
+ IMG_UINT64 ui64Features;
+ IMG_UINT32 ui32B;
+ IMG_UINT32 ui32V;
+ IMG_UINT32 ui32N;
+ IMG_UINT32 ui32C;
+ IMG_UINT32 ui32NumClusters;
+ IMG_UINT32 ui32CtrlStreamFormat;
+ IMG_UINT32 ui32FBCDCArch;
+ IMG_UINT32 ui32META;
+ IMG_UINT32 ui32MCMB;
+ IMG_UINT32 ui32MCMS;
+ IMG_UINT32 ui32MDMACount;
+ IMG_UINT32 ui32NIIP;
+ IMG_UINT32 ui32PBW;
+ IMG_UINT32 ui32STEArch;
+ IMG_UINT32 ui32SVCE;
+ IMG_UINT32 ui32SLCBanks;
+ IMG_UINT32 ui32CacheLineSize;
+ IMG_UINT32 ui32SLCSize;
+ IMG_UINT32 ui32VASB;
+ IMG_UINT32 ui32MAXDMCount;
+ IMG_UINT32 ui32MAXDMMTSCount;
+ IMG_UINT32 ui32MAXDustCount;
+#define MAX_BVNC_STRING_LEN (50)
+ IMG_PCHAR pszBVNCString;
+}PVRSRV_DEVICE_FEATURE_CONFIG;
+
+/* there is a corresponding define in rgxapi.h */
+#define RGX_MAX_TIMER_QUERIES 16
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+/* For the workload estimation return data array */
+/* The max amount of commands the MTS can have is 255, therefore 512 (LOG2 = 9)
+ * is large enough to account for all corner cases.
+ */
+#define RETURN_DATA_ARRAY_SIZE_LOG2 (9)
+#define RETURN_DATA_ARRAY_SIZE ((1UL) << RETURN_DATA_ARRAY_SIZE_LOG2)
+#define RETURN_DATA_ARRAY_WRAP_MASK (RETURN_DATA_ARRAY_SIZE - 1)
+
+#define WORKLOAD_HASH_SIZE 64
+
+typedef struct _WORKEST_HOST_DATA_ WORKEST_HOST_DATA;
+
+typedef struct _RGX_WORKLOAD_TA3D_
+{
+ IMG_UINT32 ui32RenderTargetSize;
+ IMG_UINT32 ui32NumberOfDrawCalls;
+ IMG_UINT32 ui32NumberOfIndices;
+ IMG_UINT32 ui32NumberOfMRTs;
+} RGX_WORKLOAD_TA3D;
+
+typedef struct _WORKLOAD_MATCHING_DATA_
+{
+ HASH_TABLE *psWorkloadDataHash;
+ RGX_WORKLOAD_TA3D asWorkloadHashKeys[WORKLOAD_HASH_SIZE];
+ IMG_UINT64 aui64HashCycleData[WORKLOAD_HASH_SIZE];
+ IMG_UINT32 ui32HashArrayWO;
+ POS_LOCK psWorkEstHashLock;
+} WORKLOAD_MATCHING_DATA;
+
+struct _WORKEST_HOST_DATA_{
+ WORKLOAD_MATCHING_DATA sWorkloadMatchingDataTA;
+ WORKLOAD_MATCHING_DATA sWorkloadMatchingData3D;
+ IMG_UINT32 ui32WorkEstCCBReceived;
+};
+typedef struct _WORKEST_RETURN_DATA_ {
+ WORKEST_HOST_DATA *psWorkEstHostData;
+ WORKLOAD_MATCHING_DATA *psWorkloadMatchingData;
+ RGX_WORKLOAD_TA3D sWorkloadCharacteristics;
+} WORKEST_RETURN_DATA;
+#endif
+
+
+typedef struct
+{
+#if defined(PDUMP)
+ IMG_HANDLE hPdumpPages;
+#endif
+ PG_HANDLE sPages;
+ IMG_DEV_PHYADDR sPhysAddr;
+} RGX_MIPS_ADDRESS_TRAMPOLINE;
+
+
+/*!
+ ******************************************************************************
+ * RGX Device info
+ *****************************************************************************/
+
+typedef struct _PVRSRV_RGXDEV_INFO_
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ PVRSRV_DEVICE_FEATURE_CONFIG sDevFeatureCfg;
+
+ /* FIXME: This is a workaround due to having 2 inits but only 1 deinit */
+ IMG_BOOL bDevInit2Done;
+
+ IMG_BOOL bFirmwareInitialised;
+ IMG_BOOL bPDPEnabled;
+
+ IMG_HANDLE hDbgReqNotify;
+
+ /* Kernel mode linear address of device registers */
+ void *pvRegsBaseKM;
+
+ /* FIXME: The alloc for this should go through OSAllocMem in future */
+ IMG_HANDLE hRegMapping;
+
+ /* System physical address of device registers*/
+ IMG_CPU_PHYADDR sRegsPhysBase;
+ /* Register region size in bytes */
+ IMG_UINT32 ui32RegSize;
+
+ PVRSRV_STUB_PBDESC *psStubPBDescListKM;
+
+ /* Firmware memory context info */
+ DEVMEM_CONTEXT *psKernelDevmemCtx;
+ DEVMEM_HEAP *psFirmwareHeap;
+ MMU_CONTEXT *psKernelMMUCtx;
+
+ void *pvDeviceMemoryHeap;
+
+ /* Kernel CCB */
+ DEVMEM_MEMDESC *psKernelCCBCtlMemDesc; /*!< memdesc for Kernel CCB control */
+ RGXFWIF_CCB_CTL *psKernelCCBCtl; /*!< kernel mapping for Kernel CCB control */
+ DEVMEM_MEMDESC *psKernelCCBMemDesc; /*!< memdesc for Kernel CCB */
+ IMG_UINT8 *psKernelCCB; /*!< kernel mapping for Kernel CCB */
+
+ /* Firmware CCB */
+ DEVMEM_MEMDESC *psFirmwareCCBCtlMemDesc; /*!< memdesc for Firmware CCB control */
+ RGXFWIF_CCB_CTL *psFirmwareCCBCtl; /*!< kernel mapping for Firmware CCB control */
+ DEVMEM_MEMDESC *psFirmwareCCBMemDesc; /*!< memdesc for Firmware CCB */
+ IMG_UINT8 *psFirmwareCCB; /*!< kernel mapping for Firmware CCB */
+
+ /* Workload Estimation Firmware CCB */
+ DEVMEM_MEMDESC *psWorkEstFirmwareCCBCtlMemDesc; /*!< memdesc for Workload Estimation Firmware CCB control */
+ RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtl; /*!< kernel mapping for Workload Estimation Firmware CCB control */
+ DEVMEM_MEMDESC *psWorkEstFirmwareCCBMemDesc; /*!< memdesc for Workload Estimation Firmware CCB */
+ IMG_UINT8 *psWorkEstFirmwareCCB; /*!< kernel mapping for Workload Estimation Firmware CCB */
+
+ IMG_BOOL bEnableFWPoisonOnFree; /*!< Enable poisoning of FW allocations when freed */
+ IMG_BYTE ubFWPoisonOnFreeValue; /*!< Byte value used when poisoning FW allocations */
+
+ /*
+ if we don't preallocate the pagetables we must
+ insert newly allocated page tables dynamically
+ */
+ void *pvMMUContextList;
+
+ IMG_UINT32 ui32ClkGateStatusReg;
+ IMG_UINT32 ui32ClkGateStatusMask;
+ RGX_SCRIPTS *psScripts;
+
+ DEVMEM_MEMDESC *psRGXFWCodeMemDesc;
+ IMG_DEV_VIRTADDR sFWCodeDevVAddrBase;
+ DEVMEM_MEMDESC *psRGXFWDataMemDesc;
+ IMG_DEV_VIRTADDR sFWDataDevVAddrBase;
+ RGX_MIPS_ADDRESS_TRAMPOLINE sTrampoline;
+
+ DEVMEM_MEMDESC *psRGXFWCorememMemDesc;
+ IMG_DEV_VIRTADDR sFWCorememCodeDevVAddrBase;
+ RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr;
+
+#if defined(RGXFW_ALIGNCHECKS)
+ DEVMEM_MEMDESC *psRGXFWAlignChecksMemDesc;
+#endif
+
+ DEVMEM_MEMDESC *psRGXFWSigTAChecksMemDesc;
+ IMG_UINT32 ui32SigTAChecksSize;
+
+ DEVMEM_MEMDESC *psRGXFWSig3DChecksMemDesc;
+ IMG_UINT32 ui32Sig3DChecksSize;
+
+ DEVMEM_MEMDESC *psRGXFWSigRTChecksMemDesc;
+ IMG_UINT32 ui32SigRTChecksSize;
+
+ DEVMEM_MEMDESC *psRGXFWSigSHChecksMemDesc;
+ IMG_UINT32 ui32SigSHChecksSize;
+
+#if defined (PDUMP)
+ IMG_BOOL bDumpedKCCBCtlAlready;
+#endif
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /*
+ Guest drivers do not support these functionalities:
+ - H/W perf & device power management
+ - F/W initialization & management
+ - GPU dvfs, trace & utilization
+ */
+ DEVMEM_MEMDESC *psRGXFWIfInitMemDesc;
+ RGXFWIF_DEV_VIRTADDR sFWInitFWAddr;
+#else
+ DEVMEM_MEMDESC *psRGXFWIfTraceBufCtlMemDesc; /*!< memdesc of trace buffer control structure */
+ DEVMEM_MEMDESC *psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM]; /*!< memdesc of actual FW trace (log) buffer(s) */
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf; /* structure containing trace control data and actual trace buffer */
+
+ DEVMEM_MEMDESC *psRGXFWIfHWRInfoBufCtlMemDesc;
+ RGXFWIF_HWRINFOBUF *psRGXFWIfHWRInfoBuf;
+
+ DEVMEM_MEMDESC *psRGXFWIfGpuUtilFWCbCtlMemDesc;
+ RGXFWIF_GPU_UTIL_FWCB *psRGXFWIfGpuUtilFWCb;
+
+ DEVMEM_MEMDESC *psRGXFWIfHWPerfBufMemDesc;
+ IMG_BYTE *psRGXFWIfHWPerfBuf;
+ IMG_UINT32 ui32RGXFWIfHWPerfBufSize; /* in bytes */
+
+ DEVMEM_MEMDESC *psRGXFWIfCorememDataStoreMemDesc;
+
+ DEVMEM_MEMDESC *psRGXFWIfRegCfgMemDesc;
+
+ DEVMEM_MEMDESC *psRGXFWIfHWPerfCountersMemDesc;
+ DEVMEM_MEMDESC *psRGXFWIfInitMemDesc;
+ RGXFWIF_DEV_VIRTADDR sFWInitFWAddr;
+
+ DEVMEM_MEMDESC *psRGXFWIfRuntimeCfgMemDesc;
+ RGXFWIF_RUNTIME_CFG *psRGXFWIfRuntimeCfg;
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ /* Additional guest firmware memory context info */
+ DEVMEM_HEAP *psGuestFirmwareHeap[RGXFW_NUM_OS-1];
+ DEVMEM_MEMDESC *psGuestFirmwareMemDesc[RGXFW_NUM_OS-1];
+#endif
+ DEVMEM_MEMDESC *psMETAT1StackMemDesc;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Array to store data needed for workload estimation when a workload
+ * has finished and its cycle time is returned to the host.
+ */
+ WORKEST_RETURN_DATA asReturnData[RETURN_DATA_ARRAY_SIZE];
+ IMG_UINT32 ui32ReturnDataWO;
+#endif
+
+#if defined (SUPPORT_PDVFS)
+ /**
+ * Host memdesc and pointer to memory containing core clock rate in Hz.
+ * Firmware (PDVFS) updates the memory on changing the core clock rate over
+ * GPIO.
+ * Note: Shared memory needs atomic access from Host driver and firmware,
+ * hence size should not be greater than memory transaction granularity.
+ * Currently it's is chosen to be 32 bits.
+ */
+ DEVMEM_MEMDESC *psRGXFWIFCoreClkRateMemDesc;
+ volatile IMG_UINT32 *pui32RGXFWIFCoreClkRate;
+ /**
+ * Last sampled core clk rate.
+ */
+ volatile IMG_UINT32 ui32CoreClkRateSnapshot;
+#endif
+ /*
+ HWPerf data for the RGX device
+ */
+
+ POS_LOCK hHWPerfLock; /*! Critical section lock that protects HWPerf code
+ * from multiple thread duplicate init/deinit
+ * and loss/freeing of FW & Host resources while in
+ * use in another thread e.g. MSIR. */
+
+ IMG_UINT64 ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */
+ IMG_HANDLE hHWPerfStream; /*! TL Stream buffer (L2) for firmware event stream */
+
+ IMG_UINT32 ui32HWPerfHostFilter; /*! Event filter for HWPerfHost stream (settable by AppHint) */
+ POS_LOCK hLockHWPerfHostStream; /*! Lock guarding access to HWPerfHost stream from multiple threads */
+ IMG_HANDLE hHWPerfHostStream; /*! TL Stream buffer for host only event stream */
+ IMG_UINT32 ui32HWPerfHostBufSize; /*! Host side buffer size in bytes */
+ IMG_UINT32 ui32HWPerfHostNextOrdinal; /*! Ordinal number for HWPerfHost events */
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ void *pvGpuFtraceData;
+#endif
+
+ /* Poll data for detecting firmware fatal errors */
+ IMG_UINT32 aui32CrLastPollAddr[RGXFW_THREAD_NUM];
+ IMG_UINT32 ui32KCCBCmdsExecutedLastTime;
+ IMG_BOOL bKCCBCmdsWaitingLastTime;
+ IMG_UINT32 ui32GEOTimeoutsLastTime;
+
+ /* Client stall detection */
+ IMG_UINT32 ui32StalledClientMask;
+#endif
+
+ IMG_BOOL bWorkEstEnabled;
+ IMG_BOOL bPDVFSEnabled;
+
+ void *pvLISRData;
+ void *pvMISRData;
+ void *pvAPMISRData;
+ RGX_ACTIVEPM_CONF eActivePMConf;
+
+ volatile IMG_UINT32 aui32SampleIRQCount[RGXFW_THREAD_NUM];
+
+ DEVMEM_MEMDESC *psRGXFaultAddressMemDesc;
+
+ DEVMEM_MEMDESC *psRGXFWHWBRN37200MemDesc;
+
+ DEVMEM_MEMDESC *psSLC3FenceMemDesc;
+
+ /* If we do 10 deferred memory allocations per second, then the ID would wrap around after 13 years */
+ IMG_UINT32 ui32ZSBufferCurrID; /*!< ID assigned to the next deferred devmem allocation */
+ IMG_UINT32 ui32FreelistCurrID; /*!< ID assigned to the next freelist */
+ IMG_UINT32 ui32RPMFreelistCurrID; /*!< ID assigned to the next RPM freelist */
+
+ POS_LOCK hLockZSBuffer; /*!< Lock to protect simultaneous access to ZSBuffers */
+ DLLIST_NODE sZSBufferHead; /*!< List of on-demand ZSBuffers */
+ POS_LOCK hLockFreeList; /*!< Lock to protect simultaneous access to Freelists */
+ DLLIST_NODE sFreeListHead; /*!< List of growable Freelists */
+ POS_LOCK hLockRPMFreeList; /*!< Lock to protect simultaneous access to RPM Freelists */
+ DLLIST_NODE sRPMFreeListHead; /*!< List of growable RPM Freelists */
+ POS_LOCK hLockRPMContext; /*!< Lock to protect simultaneous access to RPM contexts */
+ PSYNC_PRIM_CONTEXT hSyncPrimContext;
+ PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim;
+
+ IMG_UINT32 ui32ActivePMReqOk;
+ IMG_UINT32 ui32ActivePMReqDenied;
+ IMG_UINT32 ui32ActivePMReqTotal;
+
+ IMG_HANDLE hProcessQueuesMISR;
+
+ IMG_UINT32 ui32DeviceFlags; /*!< Flags to track general device state */
+
+ /* Timer Queries */
+ IMG_UINT32 ui32ActiveQueryId; /*!< id of the active line */
+ IMG_BOOL bSaveStart; /*!< save the start time of the next kick on the device*/
+ IMG_BOOL bSaveEnd; /*!< save the end time of the next kick on the device*/
+
+ DEVMEM_MEMDESC *psStartTimeMemDesc; /*!< memdesc for Start Times */
+ IMG_UINT64 *pui64StartTimeById; /*!< CPU mapping of the above */
+
+ DEVMEM_MEMDESC *psEndTimeMemDesc; /*!< memdesc for End Timer */
+ IMG_UINT64 *pui64EndTimeById; /*!< CPU mapping of the above */
+
+ IMG_UINT32 aui32ScheduledOnId[RGX_MAX_TIMER_QUERIES]; /*!< kicks Scheduled on QueryId */
+ DEVMEM_MEMDESC *psCompletedMemDesc; /*!< kicks Completed on QueryId */
+ IMG_UINT32 *pui32CompletedById; /*!< CPU mapping of the above */
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* GPU DVFS Table */
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable;
+
+ /* Pointer to function returning the GPU utilisation statistics since the last
+ * time the function was called. Supports different users at the same time.
+ *
+ * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked)
+ * in microseconds since the last time the function was called
+ * by a specific user (identified by hGpuUtilUser)
+ *
+ * Returns PVRSRV_OK in case the call completed without errors,
+ * some other value otherwise.
+ */
+ PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hGpuUtilUser,
+ RGXFWIF_GPU_UTIL_STATS *psReturnStats);
+#endif
+
+ POS_LOCK hGPUUtilLock;
+
+ /* Register configuration */
+ RGX_REG_CONFIG sRegCongfig;
+
+ IMG_BOOL bRGXPowered;
+ DLLIST_NODE sMemoryContextList;
+
+ POSWR_LOCK hRenderCtxListLock;
+ POSWR_LOCK hComputeCtxListLock;
+ POSWR_LOCK hTransferCtxListLock;
+ POSWR_LOCK hTDMCtxListLock;
+ POSWR_LOCK hRaytraceCtxListLock;
+ POSWR_LOCK hMemoryCtxListLock;
+ POSWR_LOCK hKickSyncCtxListLock;
+
+ /* Linked list of deferred KCCB commands due to a full KCCB */
+ DLLIST_NODE sKCCBDeferredCommandsListHead;
+
+ /* Linked lists of contexts on this device */
+ DLLIST_NODE sRenderCtxtListHead;
+ DLLIST_NODE sComputeCtxtListHead;
+ DLLIST_NODE sTransferCtxtListHead;
+ DLLIST_NODE sTDMCtxtListHead;
+ DLLIST_NODE sRaytraceCtxtListHead;
+ DLLIST_NODE sKickSyncCtxtListHead;
+
+ DLLIST_NODE sCommonCtxtListHead;
+ IMG_UINT32 ui32CommonCtxtCurrentID; /*!< ID assigned to the next common context */
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ POS_LOCK hDebugFaultInfoLock; /*!< Lock to protect the debug fault info list */
+ POS_LOCK hMMUCtxUnregLock; /*!< Lock to protect list of unregistered MMU contexts */
+#endif
+
+ POS_LOCK hNMILock; /*!< Lock to protect NMI operations */
+
+ RGX_DUST_STATE sDustReqState;
+
+ RGX_POWER_LAYER_PARAMS sPowerParams;
+
+ RGXFWIF_DM eBPDM; /*!< Current breakpoint data master */
+ IMG_BOOL bBPSet; /*!< A Breakpoint has been set */
+
+ IMG_UINT32 ui32CoherencyTestsDone;
+} PVRSRV_RGXDEV_INFO;
+
+
+
+typedef struct _RGX_TIMING_INFORMATION_
+{
+ /*! GPU default voltage */
+ IMG_UINT32 ui32CoreVoltage;
+
+ /*! GPU default core clock speed in Hz */
+ IMG_UINT32 ui32CoreClockSpeed;
+
+ /*! Active Power Management: GPU actively requests the host driver to be powered off */
+ IMG_BOOL bEnableActivePM;
+
+ /*! Enable the GPU to power off internal Power Islands independently from the host driver */
+ IMG_BOOL bEnableRDPowIsland;
+
+ /*! Active Power Management: Delay between the GPU idle and the request to the host */
+ IMG_UINT32 ui32ActivePMLatencyms;
+
+} RGX_TIMING_INFORMATION;
+
+typedef struct _RGX_DATA_
+{
+ /*! Timing information */
+ RGX_TIMING_INFORMATION *psRGXTimingInfo;
+ IMG_BOOL bHasTDFWCodePhysHeap;
+ IMG_UINT32 uiTDFWCodePhysHeapID;
+ IMG_BOOL bHasTDSecureBufPhysHeap;
+ IMG_UINT32 uiTDSecureBufPhysHeapID;
+} RGX_DATA;
+
+
+/*
+ RGX PDUMP register bank name (prefix)
+*/
+#define RGX_PDUMPREG_NAME "RGXREG"
+
+#endif /* __RGXDEVICE_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxfw_log_helper.h
+@Title Firmware TBI logging helper function
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform Generic
+@Description This file contains some helper code to make TBI logging possible
+ Specifically, it uses the SFIDLIST xmacro to trace ids back to
+ the original strings.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _RGXFW_LOG_HELPER_H_
+#define _RGXFW_LOG_HELPER_H_
+
+#include "rgx_fwif_sf.h"
+
+static IMG_CHAR *const groups[]= {
+#define X(A,B) #B,
+ RGXFW_LOG_SFGROUPLIST
+#undef X
+};
+
+typedef struct {
+ IMG_UINT32 id;
+ IMG_CHAR *name;
+} tuple; /* pair of string format id and string formats */
+
+/* The tuple pairs that will be generated using XMacros will be stored here.
+ * This macro definition must match the definition of SFids in rgx_fwif_sf.h */
+static const tuple SFs[]= {
+#define X(a, b, c, d, e) { RGXFW_LOG_CREATESFID(a,b,e) , d },
+ RGXFW_LOG_SFIDLIST
+#undef X
+};
+
+/* idToStringID : Search SFs tuples {id,string} for a matching id.
+ * return index to array if found or RGXFW_SF_LAST if none found.
+ * bsearch could be used as ids are in increasing order. */
+static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData)
+{
+ IMG_UINT32 i = 0 ;
+ for ( i = 0 ; SFs[i].id != RGXFW_SF_LAST ; i++)
+ {
+ if ( ui32CheckData == SFs[i].id )
+ {
+ return i;
+ }
+ }
+ /* Nothing found, return max value */
+ return RGXFW_SF_LAST;
+}
+
+#endif /* _RGXFW_LOG_HELPER_H_ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services Firmware image utilities used at init time
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services Firmware image utilities used at init time
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* The routines implemented here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when trusted device is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxfwimageutils.h"
+
+
+/************************************************************************
+* FW Segments configuration
+************************************************************************/
+typedef struct _RGX_FW_SEGMENT_
+{
+ IMG_UINT32 ui32SegId; /*!< Segment Id */
+ IMG_UINT32 ui32SegStartAddr; /*!< Segment Start Addr */
+ IMG_UINT32 ui32SegAllocSize; /*!< Amount of memory to allocate for that segment */
+ IMG_UINT32 ui32FWMemOffset; /*!< Offset of this segment in the collated FW mem allocation */
+ const IMG_CHAR *pszSegName;
+} RGX_FW_SEGMENT;
+
+typedef struct _RGX_FW_SEGMENT_LIST_
+{
+ RGX_FW_SEGMENT *psRGXFWCodeSeg;
+ RGX_FW_SEGMENT *psRGXFWDataSeg;
+ IMG_UINT32 ui32CodeSegCount;
+ IMG_UINT32 ui32DataSegCount;
+} RGX_FW_SEGMENT_LIST;
+
+
+#if defined(RGX_FEATURE_META) || defined(SUPPORT_KERNEL_SRVINIT)
+static RGX_FW_SEGMENT asRGXMetaFWCodeSegments[] = {
+/* Seg ID Seg Start Addr Alloc size FWMem offset Name */
+{RGXFW_SEGMMU_TEXT_ID, RGXFW_BOOTLDR_META_ADDR, 0x31000, 0, "Bootldr and Code"}, /* Has to be the first one to get the proper DevV addr */
+};
+static RGX_FW_SEGMENT asRGXMetaFWDataSegments[] = {
+/* Seg ID Seg Start Addr Alloc size FWMem offset Name */
+{RGXFW_SEGMMU_DATA_ID, 0x38880000, 0x17000, 0, "Local Shared and Data"},
+};
+#define RGXFW_META_NUM_CODE_SEGMENTS (sizeof(asRGXMetaFWCodeSegments)/sizeof(asRGXMetaFWCodeSegments[0]))
+#define RGXFW_META_NUM_DATA_SEGMENTS (sizeof(asRGXMetaFWDataSegments)/sizeof(asRGXMetaFWDataSegments[0]))
+#endif
+
+#if defined(RGX_FEATURE_MIPS) || defined(SUPPORT_KERNEL_SRVINIT)
+static RGX_FW_SEGMENT asRGXMipsFWCodeSegments[] = {
+/* Seg ID Seg Start Addr Alloc size FWMem offset Name */
+{ 0, RGXMIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE, RGXMIPSFW_BOOT_NMI_CODE_SIZE, RGXMIPSFW_BOOT_NMI_CODE_OFFSET, "Bootldr and NMI code"},
+{ 1, RGXMIPSFW_EXCEPTIONS_VIRTUAL_BASE, RGXMIPSFW_EXCEPTIONSVECTORS_SIZE, RGXMIPSFW_EXCEPTIONSVECTORS_OFFSET, "Exception vectors"},
+{ 2, RGXMIPSFW_CODE_VIRTUAL_BASE, RGXMIPSFW_CODE_SIZE, RGXMIPSFW_CODE_OFFSET, "Text"},
+};
+static RGX_FW_SEGMENT asRGXMipsFWDataSegments[] = {
+/* Seg ID Seg Start Addr Alloc size FWMem offset Name */
+{ 3, RGXMIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE, RGXMIPSFW_BOOT_NMI_DATA_SIZE, RGXMIPSFW_BOOT_NMI_DATA_OFFSET, "Bootldr and NMI data"},
+{ 4, RGXMIPSFW_DATA_VIRTUAL_BASE, RGXMIPSFW_DATA_SIZE, RGXMIPSFW_DATA_OFFSET, "Local Data"},
+{ 5, RGXMIPSFW_STACK_VIRTUAL_BASE, RGXMIPSFW_STACK_SIZE, RGXMIPSFW_DATA_SIZE, "Stack"},
+};
+
+#define RGXFW_MIPS_NUM_CODE_SEGMENTS (sizeof(asRGXMipsFWCodeSegments)/sizeof(asRGXMipsFWCodeSegments[0]))
+#define RGXFW_MIPS_NUM_DATA_SEGMENTS (sizeof(asRGXMipsFWDataSegments)/sizeof(asRGXMipsFWDataSegments[0]))
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function FindMMUSegment
+
+ @Description Given a 32 bit FW address attempt to find the corresponding
+ pointer to FW allocation
+
+ @Input ui32OffsetIn : 32 bit FW address
+ @Input pvHostFWCodeAddr : Pointer to FW code
+ @Input pvHostFWDataAddr : Pointer to FW data
+ @Input uiHostAddrOut : CPU pointer equivalent to ui32OffsetIn
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn,
+ void *pvHostFWCodeAddr,
+ void *pvHostFWDataAddr,
+ void **uiHostAddrOut,
+ RGX_FW_SEGMENT_LIST *psRGXFWSegList)
+{
+ RGX_FW_SEGMENT *psSegArr;
+ IMG_UINT32 i;
+
+ psSegArr = psRGXFWSegList->psRGXFWCodeSeg;
+ for (i = 0; i < psRGXFWSegList->ui32CodeSegCount; i++)
+ {
+ if ((ui32OffsetIn >= psSegArr[i].ui32SegStartAddr) &&
+ (ui32OffsetIn < (psSegArr[i].ui32SegStartAddr + psSegArr[i].ui32SegAllocSize)))
+ {
+ *uiHostAddrOut = pvHostFWCodeAddr;
+ goto found;
+ }
+ }
+
+ psSegArr = psRGXFWSegList->psRGXFWDataSeg;
+ for (i = 0; i < psRGXFWSegList->ui32DataSegCount; i++)
+ {
+ if ((ui32OffsetIn >= psSegArr[i].ui32SegStartAddr) &&
+ (ui32OffsetIn < (psSegArr[i].ui32SegStartAddr + psSegArr[i].ui32SegAllocSize)))
+ {
+ *uiHostAddrOut = pvHostFWDataAddr;
+ goto found;
+ }
+ }
+
+ return PVRSRV_ERROR_INIT_FAILURE;
+
+found:
+ /* Direct Mem write to mapped memory */
+ ui32OffsetIn -= psSegArr[i].ui32SegStartAddr;
+ ui32OffsetIn += psSegArr[i].ui32FWMemOffset;
+
+ /* Add offset to pointer to FW allocation only if
+ * that allocation is available
+ */
+ if (*uiHostAddrOut)
+ {
+ *(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn;
+ }
+
+ return PVRSRV_OK;
+}
+
+#if defined(RGX_FEATURE_META) || defined(SUPPORT_KERNEL_SRVINIT)
+
+/*!
+*******************************************************************************
+
+ @Function RGXFWConfigureSegID
+
+ @Description Configures a single segment of the Segment MMU
+ (base, limit and out_addr)
+
+ @Input hPrivate : Implementation specific data
+ @Input ui64SegOutAddr : Segment output base address (40 bit devVaddr)
+ @Input ui32SegBase : Segment input base address (32 bit FW address)
+ @Input ui32SegLimit : Segment size
+ @Input ui32SegID : Segment ID
+ @Input pszName : Segment name
+ @Input ppui32BootConf : Pointer to bootloader data
+
+ @Return void
+
+******************************************************************************/
+static void RGXFWConfigureSegID(const void *hPrivate,
+ IMG_UINT64 ui64SegOutAddr,
+ IMG_UINT32 ui32SegBase,
+ IMG_UINT32 ui32SegLimit,
+ IMG_UINT32 ui32SegID,
+ const IMG_CHAR *pszName,
+ IMG_UINT32 **ppui32BootConf)
+{
+ IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+ IMG_UINT32 ui32SegOutAddr0 = ui64SegOutAddr & 0x00000000FFFFFFFFUL;
+ IMG_UINT32 ui32SegOutAddr1 = (ui64SegOutAddr >> 32) & 0x00000000FFFFFFFFUL;
+
+ /* META segments have a minimum size */
+ IMG_UINT32 ui32LimitOff = (ui32SegLimit < RGXFW_SEGMMU_ALIGN) ?
+ RGXFW_SEGMMU_ALIGN : ui32SegLimit;
+ /* the limit is an offset, therefore off = size - 1 */
+ ui32LimitOff -= 1;
+
+ RGXCommentLogInit(hPrivate,
+ "* FW %s - seg%d: meta_addr = 0x%08x, devv_addr = 0x%llx, limit = 0x%x",
+ pszName, ui32SegID,
+ ui32SegBase, (unsigned long long)ui64SegOutAddr,
+ ui32LimitOff);
+
+ ui32SegBase |= RGXFW_SEGMMU_ALLTHRS_WRITEABLE;
+
+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_BASE(ui32SegID);
+ *pui32BootConf++ = ui32SegBase;
+
+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_LIMIT(ui32SegID);
+ *pui32BootConf++ = ui32LimitOff;
+
+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA0(ui32SegID);
+ *pui32BootConf++ = ui32SegOutAddr0;
+
+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA1(ui32SegID);
+ *pui32BootConf++ = ui32SegOutAddr1;
+
+ *ppui32BootConf = pui32BootConf;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXFWConfigureSegMMU
+
+ @Description Configures META's Segment MMU
+
+ @Input hPrivate : Implementation specific data
+ @Input psFWCodeDevVAddrBase : FW code base device virtual address
+ @Input psFWDataDevVAddrBase : FW data base device virtual address
+ @Input ppui32BootConf : Pointer to bootloader data
+
+ @Return void
+
+******************************************************************************/
+static void RGXFWConfigureSegMMU(const void *hPrivate,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ IMG_UINT32 **ppui32BootConf)
+{
+ IMG_UINT64 ui64SegOutAddr;
+ IMG_UINT32 i;
+
+ PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase);
+
+ /* Configure Segment MMU */
+ RGXCommentLogInit(hPrivate, "********** FW configure Segment MMU **********");
+
+ for (i = 0; i < RGXFW_META_NUM_DATA_SEGMENTS ; i++)
+ {
+ ui64SegOutAddr = (psFWDataDevVAddrBase->uiAddr |
+ RGXFW_SEGMMU_OUTADDR_TOP(META_MMU_CONTEXT_MAPPING, RGXFW_SEGMMU_META_DM_ID)) +
+ asRGXMetaFWDataSegments[i].ui32FWMemOffset;
+
+ RGXFWConfigureSegID(hPrivate,
+ ui64SegOutAddr,
+ asRGXMetaFWDataSegments[i].ui32SegStartAddr,
+ asRGXMetaFWDataSegments[i].ui32SegAllocSize,
+ asRGXMetaFWDataSegments[i].ui32SegId,
+ asRGXMetaFWDataSegments[i].pszSegName,
+ ppui32BootConf); /*write the sequence to the bootldr */
+ }
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXFWConfigureMetaCaches
+
+ @Description Configure and enable the Meta instruction and data caches
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32NumThreads : Number of FW threads in use
+ @Input ui32MainThreadID : ID of the FW thread in use
+ (only meaningful if ui32NumThreads == 1)
+ @Input ppui32BootConf : Pointer to bootloader data
+
+ @Return void
+
+******************************************************************************/
+static void RGXFWConfigureMetaCaches(const void *hPrivate,
+ IMG_UINT32 ui32NumThreads,
+ IMG_UINT32 ui32MainThreadID,
+ IMG_UINT32 **ppui32BootConf)
+{
+ IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+ IMG_UINT32 ui32DCacheT0, ui32ICacheT0;
+ IMG_UINT32 ui32DCacheT1, ui32ICacheT1;
+ IMG_UINT32 ui32DCacheT2, ui32ICacheT2;
+ IMG_UINT32 ui32DCacheT3, ui32ICacheT3;
+
+#define META_CR_MMCU_LOCAL_EBCTRL (0x04830600)
+#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN (0x3 << 14)
+#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN (0x3 << 6)
+#define META_CR_SYSC_DCPART(n) (0x04830200 + (n)*0x8)
+#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE (0x1 << 31)
+#define META_CR_SYSC_ICPART(n) (0x04830220 + (n)*0x8)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF (0x8 << 16)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE (0xF)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE (0x7)
+#define META_CR_MMCU_DCACHE_CTRL (0x04830018)
+#define META_CR_MMCU_ICACHE_CTRL (0x04830020)
+#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN (0x1)
+
+ RGXCommentLogInit(hPrivate, "********** Meta caches configuration *********");
+
+ /* Initialise I/Dcache settings */
+ ui32DCacheT0 = ui32DCacheT1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ ui32DCacheT2 = ui32DCacheT3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ ui32ICacheT0 = ui32ICacheT1 = ui32ICacheT2 = ui32ICacheT3 = 0;
+
+ if (ui32NumThreads == 1)
+ {
+ if (ui32MainThreadID == 0)
+ {
+ ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+ ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+ }
+ else
+ {
+ ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+ ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+ }
+ }
+ else
+ {
+ ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE;
+ ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE;
+
+ ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE |
+ META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF;
+ ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE |
+ META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF;
+ }
+
+ /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */
+ *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL;
+ *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL_ICWIN |
+ META_CR_MMCU_LOCAL_EBCTRL_DCWIN;
+
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_MMCU_LOCAL_EBCTRL,
+ META_CR_MMCU_LOCAL_EBCTRL_ICWIN | META_CR_MMCU_LOCAL_EBCTRL_DCWIN);
+
+ /* Data cache partitioning thread 0 to 3 */
+ *pui32BootConf++ = META_CR_SYSC_DCPART(0);
+ *pui32BootConf++ = ui32DCacheT0;
+ *pui32BootConf++ = META_CR_SYSC_DCPART(1);
+ *pui32BootConf++ = ui32DCacheT1;
+ *pui32BootConf++ = META_CR_SYSC_DCPART(2);
+ *pui32BootConf++ = ui32DCacheT2;
+ *pui32BootConf++ = META_CR_SYSC_DCPART(3);
+ *pui32BootConf++ = ui32DCacheT3;
+
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_DCPART(0), ui32DCacheT0);
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_DCPART(1), ui32DCacheT1);
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_DCPART(2), ui32DCacheT2);
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_DCPART(3), ui32DCacheT3);
+
+ /* Enable data cache hits */
+ *pui32BootConf++ = META_CR_MMCU_DCACHE_CTRL;
+ *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN;
+
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_MMCU_DCACHE_CTRL,
+ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+ /* Instruction cache partitioning thread 0 to 3 */
+ *pui32BootConf++ = META_CR_SYSC_ICPART(0);
+ *pui32BootConf++ = ui32ICacheT0;
+ *pui32BootConf++ = META_CR_SYSC_ICPART(1);
+ *pui32BootConf++ = ui32ICacheT1;
+ *pui32BootConf++ = META_CR_SYSC_ICPART(2);
+ *pui32BootConf++ = ui32ICacheT2;
+ *pui32BootConf++ = META_CR_SYSC_ICPART(3);
+ *pui32BootConf++ = ui32ICacheT3;
+
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_ICPART(0), ui32ICacheT0);
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_ICPART(1), ui32ICacheT1);
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_ICPART(2), ui32ICacheT2);
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_ICPART(3), ui32ICacheT3);
+
+ /* Enable instruction cache hits */
+ *pui32BootConf++ = META_CR_MMCU_ICACHE_CTRL;
+ *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN;
+
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_MMCU_ICACHE_CTRL,
+ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+ *pui32BootConf++ = 0x040000C0;
+ *pui32BootConf++ = 0;
+
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ 0x040000C0, 0);
+
+ *ppui32BootConf = pui32BootConf;
+}
+
+/*!
+*******************************************************************************
+
+ @Function ProcessLDRCommandStream
+
+ @Description Process the output of the Meta toolchain in the .LDR format
+ copying code and data sections into their final location and
+ passing some information to the Meta bootloader
+
+ @Input hPrivate : Implementation specific data
+ @Input pbLDR : Pointer to FW blob
+ @Input pvHostFWCodeAddr : Pointer to FW code
+ @Input pvHostFWDataAddr : Pointer to FW data
+ @Input pvHostFWCorememAddr : Pointer to FW coremem code
+ @Input ppui32BootConf : Pointer to bootloader data
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate,
+ const IMG_BYTE* pbLDR,
+ void* pvHostFWCodeAddr,
+ void* pvHostFWDataAddr,
+ void* pvHostFWCorememAddr,
+ IMG_UINT32 **ppui32BootConf)
+{
+ RGX_META_LDR_BLOCK_HDR *psHeader = (RGX_META_LDR_BLOCK_HDR *) pbLDR;
+ RGX_META_LDR_L1_DATA_BLK *psL1Data =
+ (RGX_META_LDR_L1_DATA_BLK*) ((IMG_UINT8 *) pbLDR + psHeader->ui32SLData);
+
+ IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+ IMG_UINT32 ui32CorememSize = RGXGetFWCorememSize(hPrivate);
+ IMG_UINT32 ui32CorememCodeStartAddr = 0xFFFFFFFF;
+
+ RGXCommentLogInit(hPrivate, "**********************************************");
+ RGXCommentLogInit(hPrivate, "************** Begin LDR Parsing *************");
+ RGXCommentLogInit(hPrivate, "**********************************************");
+
+ while (psL1Data != NULL)
+ {
+ RGX_FW_SEGMENT_LIST sRGXFWSegList;
+ sRGXFWSegList.psRGXFWCodeSeg = asRGXMetaFWCodeSegments;
+ sRGXFWSegList.psRGXFWDataSeg = asRGXMetaFWDataSegments;
+ sRGXFWSegList.ui32CodeSegCount = RGXFW_META_NUM_CODE_SEGMENTS;
+ sRGXFWSegList.ui32DataSegCount = RGXFW_META_NUM_DATA_SEGMENTS;
+
+ if (RGX_META_LDR_BLK_IS_COMMENT(psL1Data->ui16Cmd))
+ {
+ /* Don't process comment blocks */
+ goto NextBlock;
+ }
+
+ switch (psL1Data->ui16Cmd & RGX_META_LDR_CMD_MASK)
+ {
+ case RGX_META_LDR_CMD_LOADMEM:
+ {
+ RGX_META_LDR_L2_DATA_BLK *psL2Block =
+ (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[1]);
+ IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0];
+ IMG_UINT32 ui32DataSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */;
+ void *pvWriteAddr;
+ PVRSRV_ERROR eError;
+
+ if (RGX_META_IS_COREMEM_CODE(ui32Offset, ui32CorememSize))
+ {
+ if (ui32Offset < ui32CorememCodeStartAddr)
+ {
+ if (ui32CorememCodeStartAddr == 0xFFFFFFFF)
+ {
+ /* Take the first coremem code address as the coremem code start address */
+ ui32CorememCodeStartAddr = ui32Offset;
+
+ /* Also check that there is a valid allocation for the coremem code */
+ if (pvHostFWCorememAddr == NULL)
+ {
+ RGXErrorLogInit(hPrivate,
+ "ProcessLDRCommandStream: Coremem code found"
+ "but no coremem allocation available!");
+
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ }
+ else
+ {
+ /* The coremem addresses should be ordered in the LDR command stream */
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ }
+
+ /* Copy coremem data to buffer. The FW copies it to the actual coremem */
+ ui32Offset -= ui32CorememCodeStartAddr;
+
+ RGXMemCopy(hPrivate,
+ (void*)((IMG_UINT8 *)pvHostFWCorememAddr + ui32Offset),
+ psL2Block->aui32BlockData,
+ ui32DataSize);
+ }
+ else
+ {
+ /* Global range is aliased to local range */
+ ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+ eError = FindMMUSegment(ui32Offset,
+ pvHostFWCodeAddr,
+ pvHostFWDataAddr,
+ &pvWriteAddr,
+ &sRGXFWSegList);
+
+ if (eError != PVRSRV_OK)
+ {
+ RGXErrorLogInit(hPrivate,
+ "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment",
+ ui32Offset, ui32DataSize);
+ return eError;
+ }
+
+ /* Write to FW allocation only if available */
+ if (pvWriteAddr)
+ {
+ RGXMemCopy(hPrivate,
+ pvWriteAddr,
+ psL2Block->aui32BlockData,
+ ui32DataSize);
+ }
+ }
+
+ break;
+ }
+ case RGX_META_LDR_CMD_LOADCORE:
+ case RGX_META_LDR_CMD_LOADMMREG:
+ {
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ case RGX_META_LDR_CMD_START_THREADS:
+ {
+ /* Don't process this block */
+ break;
+ }
+ case RGX_META_LDR_CMD_ZEROMEM:
+ {
+ IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0];
+ IMG_UINT32 ui32ByteCount = psL1Data->aui32CmdData[1];
+ void *pvWriteAddr;
+ PVRSRV_ERROR eError;
+
+ if (RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize))
+ {
+ /* cannot zero coremem directly */
+ break;
+ }
+
+ /* Global range is aliased to local range */
+ ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+ eError = FindMMUSegment(ui32Offset,
+ pvHostFWCodeAddr,
+ pvHostFWDataAddr,
+ &pvWriteAddr,
+ &sRGXFWSegList);
+
+ if (eError != PVRSRV_OK)
+ {
+ RGXErrorLogInit(hPrivate,
+ "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment",
+ ui32Offset, ui32ByteCount);
+ return eError;
+ }
+
+ /* Write to FW allocation only if available */
+ if (pvWriteAddr)
+ {
+ RGXMemSet(hPrivate, pvWriteAddr, 0, ui32ByteCount);
+ }
+
+ break;
+ }
+ case RGX_META_LDR_CMD_CONFIG:
+ {
+ RGX_META_LDR_L2_DATA_BLK *psL2Block =
+ (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[0]);
+ RGX_META_LDR_CFG_BLK *psConfigCommand = (RGX_META_LDR_CFG_BLK*) psL2Block->aui32BlockData;
+ IMG_UINT32 ui32L2BlockSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */;
+ IMG_UINT32 ui32CurrBlockSize = 0;
+
+ while (ui32L2BlockSize)
+ {
+ switch (psConfigCommand->ui32Type)
+ {
+ case RGX_META_LDR_CFG_PAUSE:
+ case RGX_META_LDR_CFG_READ:
+ {
+ ui32CurrBlockSize = 8;
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ case RGX_META_LDR_CFG_WRITE:
+ {
+ IMG_UINT32 ui32RegisterOffset = psConfigCommand->aui32BlockData[0];
+ IMG_UINT32 ui32RegisterValue = psConfigCommand->aui32BlockData[1];
+
+ /* Only write to bootloader if we got a valid
+ * pointer to the FW code allocation
+ */
+ if (pui32BootConf)
+ {
+ /* Do register write */
+ *pui32BootConf++ = ui32RegisterOffset;
+ *pui32BootConf++ = ui32RegisterValue;
+ }
+
+ RGXCommentLogInit(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ ui32RegisterOffset, ui32RegisterValue);
+
+ ui32CurrBlockSize = 12;
+ break;
+ }
+ case RGX_META_LDR_CFG_MEMSET:
+ case RGX_META_LDR_CFG_MEMCHECK:
+ {
+ ui32CurrBlockSize = 20;
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ default:
+ {
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ }
+ ui32L2BlockSize -= ui32CurrBlockSize;
+ psConfigCommand = (RGX_META_LDR_CFG_BLK*) (((IMG_UINT8*) psConfigCommand) + ui32CurrBlockSize);
+ }
+
+ break;
+ }
+ default:
+ {
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ }
+
+NextBlock:
+
+ if (psL1Data->ui32Next == 0xFFFFFFFF)
+ {
+ psL1Data = NULL;
+ }
+ else
+ {
+ psL1Data = (RGX_META_LDR_L1_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->ui32Next);
+ }
+ }
+
+ *ppui32BootConf = pui32BootConf;
+
+ RGXCommentLogInit(hPrivate, "**********************************************");
+ RGXCommentLogInit(hPrivate, "************** End Loader Parsing ************");
+ RGXCommentLogInit(hPrivate, "**********************************************");
+
+ return PVRSRV_OK;
+}
+#endif /* RGX_FEATURE_META */
+
+#if defined(RGX_FEATURE_MIPS) || defined(SUPPORT_KERNEL_SRVINIT)
+/*!
+*******************************************************************************
+
+ @Function ProcessELFCommandStream
+
+ @Description Process the output of the Mips toolchain in the .ELF format
+ copying code and data sections into their final location
+
+ @Input hPrivate : Implementation specific data
+ @Input pbELF : Pointer to FW blob
+ @Input pvHostFWCodeAddr : Pointer to FW code
+ @Input pvHostFWDataAddr : Pointer to FW data
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate,
+ const IMG_BYTE *pbELF,
+ void *pvHostFWCodeAddr,
+ void *pvHostFWDataAddr)
+{
+ IMG_UINT32 ui32Entry;
+ RGX_MIPS_ELF_HDR *psHeader = (RGX_MIPS_ELF_HDR *)pbELF;
+ RGX_MIPS_ELF_PROGRAM_HDR *psProgramHeader =
+ (RGX_MIPS_ELF_PROGRAM_HDR *)(pbELF + psHeader->ui32Ephoff);
+ PVRSRV_ERROR eError;
+
+ for (ui32Entry = 0; ui32Entry < psHeader->ui32Ephnum; ui32Entry++, psProgramHeader++)
+ {
+ void *pvWriteAddr;
+ RGX_FW_SEGMENT_LIST sRGXFWSegList;
+ sRGXFWSegList.psRGXFWCodeSeg = asRGXMipsFWCodeSegments;
+ sRGXFWSegList.psRGXFWDataSeg = asRGXMipsFWDataSegments;
+ sRGXFWSegList.ui32CodeSegCount = RGXFW_MIPS_NUM_CODE_SEGMENTS;
+ sRGXFWSegList.ui32DataSegCount = RGXFW_MIPS_NUM_DATA_SEGMENTS;
+
+ /* Only consider loadable entries in the ELF segment table */
+ if (psProgramHeader->ui32Ptype != ELF_PT_LOAD) continue;
+
+ eError = FindMMUSegment(psProgramHeader->ui32Pvaddr,
+ pvHostFWCodeAddr,
+ pvHostFWDataAddr,
+ &pvWriteAddr,
+ &sRGXFWSegList);
+
+ if (eError != PVRSRV_OK)
+ {
+ RGXErrorLogInit(hPrivate,
+ "%s: Addr 0x%x (size: %d) not found in any segment",__func__,
+ psProgramHeader->ui32Pvaddr,
+ psProgramHeader->ui32Pfilesz);
+ return eError;
+ }
+
+ /* Write to FW allocation only if available */
+ if (pvWriteAddr)
+ {
+ RGXMemCopy(hPrivate,
+ pvWriteAddr,
+ (IMG_PBYTE)(pbELF + psProgramHeader->ui32Poffset),
+ psProgramHeader->ui32Pfilesz);
+
+ RGXMemSet(hPrivate,
+ (IMG_PBYTE)pvWriteAddr + psProgramHeader->ui32Pfilesz,
+ 0,
+ psProgramHeader->ui32Pmemsz - psProgramHeader->ui32Pfilesz);
+ }
+ }
+
+ return PVRSRV_OK;
+}
+#endif /* RGX_FEATURE_MIPS */
+
+
+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate,
+ IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize,
+ IMG_DEVMEM_SIZE_T *puiFWDataAllocSize,
+ IMG_DEVMEM_SIZE_T *puiFWCorememAllocSize)
+{
+ IMG_UINT32 i, ui32NumCodeSegments = 0, ui32NumDataSegments = 0;
+ RGX_FW_SEGMENT *pasRGXFWCodeSegments = NULL, *pasRGXFWDataSegments = NULL;
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ IMG_BOOL bMIPS = RGXDeviceHasFeatureInit(hPrivate, RGX_FEATURE_MIPS_BIT_MASK);
+#elif defined(RGX_FEATURE_MIPS)
+ IMG_BOOL bMIPS = IMG_TRUE;
+#else
+ IMG_BOOL bMIPS = IMG_FALSE;
+#endif
+
+#if defined(RGX_FEATURE_META) || defined(SUPPORT_KERNEL_SRVINIT)
+ if (!bMIPS)
+ {
+ pasRGXFWCodeSegments = asRGXMetaFWCodeSegments;
+ pasRGXFWDataSegments = asRGXMetaFWDataSegments;
+ ui32NumCodeSegments = RGXFW_META_NUM_CODE_SEGMENTS;
+ ui32NumDataSegments = RGXFW_META_NUM_DATA_SEGMENTS;
+ }
+#endif
+
+#if defined(RGX_FEATURE_MIPS) || defined(SUPPORT_KERNEL_SRVINIT)
+ if (bMIPS)
+ {
+ pasRGXFWCodeSegments = asRGXMipsFWCodeSegments;
+ pasRGXFWDataSegments = asRGXMipsFWDataSegments;
+ ui32NumCodeSegments = RGXFW_MIPS_NUM_CODE_SEGMENTS;
+ ui32NumDataSegments = RGXFW_MIPS_NUM_DATA_SEGMENTS;
+ }
+#endif
+
+ *puiFWCodeAllocSize = 0;
+ *puiFWDataAllocSize = 0;
+ *puiFWCorememAllocSize = 0;
+
+ /* Calculate how much memory the FW needs for its code and data segments */
+
+ for(i = 0; i < ui32NumCodeSegments; i++) {
+ *puiFWCodeAllocSize += ((pasRGXFWCodeSegments + i)->ui32SegAllocSize);
+ }
+
+ for(i = 0; i < ui32NumDataSegments; i++) {
+ *puiFWDataAllocSize += ((pasRGXFWDataSegments + i)->ui32SegAllocSize);
+ }
+
+ *puiFWCorememAllocSize = RGXGetFWCorememSize(hPrivate);
+
+ if (*puiFWCorememAllocSize != 0)
+ {
+ *puiFWCorememAllocSize = *puiFWCorememAllocSize - RGX_META_COREMEM_DATA_SIZE;
+ }
+
+ if (bMIPS)
+ {
+ if ((*puiFWCodeAllocSize % RGXMIPSFW_PAGE_SIZE) != 0)
+ {
+ RGXErrorLogInit(hPrivate,
+ "%s: The MIPS FW code allocation is not"
+ " a multiple of the page size!", __func__);
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+ if ((*puiFWDataAllocSize % RGXMIPSFW_PAGE_SIZE) != 0)
+ {
+ RGXErrorLogInit(hPrivate,
+ "%s: The MIPS FW data allocation is not"
+ " a multiple of the page size!", __func__);
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate,
+ const IMG_BYTE *pbRGXFirmware,
+ void *pvFWCode,
+ void *pvFWData,
+ void *pvFWCorememCode,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ IMG_DEV_VIRTADDR *psFWCorememDevVAddrBase,
+ RGXFWIF_DEV_VIRTADDR *psFWCorememFWAddr,
+ RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+ IMG_UINT32 ui32NumThreads,
+ IMG_UINT32 ui32MainThreadID)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ IMG_BOOL bMIPS = RGXDeviceHasFeatureInit(hPrivate, RGX_FEATURE_MIPS_BIT_MASK);
+#elif defined(RGX_FEATURE_MIPS)
+ IMG_BOOL bMIPS = IMG_TRUE;
+#else
+ IMG_BOOL bMIPS = IMG_FALSE;
+#endif
+
+#if defined(RGX_FEATURE_META) || defined(SUPPORT_KERNEL_SRVINIT)
+ if (!bMIPS)
+ {
+ IMG_UINT32 *pui32BootConf = NULL;
+ /* Skip bootloader configuration if a pointer to the FW code
+ * allocation is not available
+ */
+ if (pvFWCode)
+ {
+ /* This variable points to the bootloader code which is mostly
+ * a sequence of <register address,register value> pairs
+ */
+ pui32BootConf = ((IMG_UINT32*) pvFWCode) + RGXFW_BOOTLDR_CONF_OFFSET;
+
+ /* Slave port and JTAG accesses are privileged */
+ *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD;
+ *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD_PRIV_EN;
+
+ RGXFWConfigureSegMMU(hPrivate,
+ psFWCodeDevVAddrBase,
+ psFWDataDevVAddrBase,
+ &pui32BootConf);
+ }
+
+ /* Process FW image data stream */
+ eError = ProcessLDRCommandStream(hPrivate,
+ pbRGXFirmware,
+ pvFWCode,
+ pvFWData,
+ pvFWCorememCode,
+ &pui32BootConf);
+ if (eError != PVRSRV_OK)
+ {
+ RGXErrorLogInit(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError);
+ return eError;
+ }
+
+ /* Skip bootloader configuration if a pointer to the FW code
+ * allocation is not available
+ */
+ if (pvFWCode)
+ {
+ if ((ui32NumThreads == 0) || (ui32NumThreads > 2) || (ui32MainThreadID >= 2))
+ {
+ RGXErrorLogInit(hPrivate,
+ "ProcessFWImage: Wrong Meta threads configuration, using one thread only");
+
+ ui32NumThreads = 1;
+ ui32MainThreadID = 0;
+ }
+
+ RGXFWConfigureMetaCaches(hPrivate,
+ ui32NumThreads,
+ ui32MainThreadID,
+ &pui32BootConf);
+
+ /* Signal the end of the conf sequence */
+ *pui32BootConf++ = 0x0;
+ *pui32BootConf++ = 0x0;
+
+ /* The FW main argv arguments start here */
+ *pui32BootConf++ = psRGXFwInit->ui32Addr;
+
+ if ((RGXGetFWCorememSize(hPrivate) != 0) && (psFWCorememFWAddr != NULL))
+ {
+ *pui32BootConf++ = psFWCorememFWAddr->ui32Addr;
+ }
+ else
+ {
+ *pui32BootConf++ = 0;
+ }
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ if (RGXDeviceHasFeatureInit(hPrivate, RGX_FEATURE_META_DMA_BIT_MASK))
+#elif defined(RGX_FEATURE_META_DMA)
+ if (IMG_TRUE)
+#else
+ if (IMG_FALSE)
+#endif
+ {
+ *pui32BootConf++ = (IMG_UINT32) (psFWCorememDevVAddrBase->uiAddr >> 32);
+ *pui32BootConf++ = (IMG_UINT32) psFWCorememDevVAddrBase->uiAddr;
+ }
+ else
+ {
+ *pui32BootConf++ = 0;
+ *pui32BootConf++ = 0;
+ }
+
+ }
+ }
+#endif
+
+#if defined(RGX_FEATURE_MIPS) || defined(SUPPORT_KERNEL_SRVINIT)
+ if (bMIPS)
+ {
+ /* Process FW image data stream */
+ eError = ProcessELFCommandStream(hPrivate,
+ pbRGXFirmware,
+ pvFWCode,
+ pvFWData);
+ if (eError != PVRSRV_OK)
+ {
+ RGXErrorLogInit(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError);
+ return eError;
+ }
+
+ PVR_UNREFERENCED_PARAMETER(pvFWData); /* No need to touch the data segment in MIPS */
+ PVR_UNREFERENCED_PARAMETER(pvFWCorememCode); /* Coremem N/A in MIPS */
+ PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase);
+ PVR_UNREFERENCED_PARAMETER(psFWDataDevVAddrBase);
+ PVR_UNREFERENCED_PARAMETER(psFWCorememDevVAddrBase);
+ PVR_UNREFERENCED_PARAMETER(psFWCorememFWAddr);
+ PVR_UNREFERENCED_PARAMETER(psRGXFwInit);
+ PVR_UNREFERENCED_PARAMETER(ui32NumThreads);
+ PVR_UNREFERENCED_PARAMETER(ui32MainThreadID);
+ }
+#endif
+
+ return eError;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Header for Services Firmware image utilities used at init time
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for Services Firmware image utilities used at init time
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXFWIMAGEUTILS_H__
+#define __RGXFWIMAGEUTILS_H__
+
+/* The routines declared here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when DRM security is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxlayer.h"
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXGetFWImageAllocSize
+
+ @Description Return size of Firmware code/data/coremem code allocations
+
+ @Input puiFWCodeAllocSize : Returned code size
+ @Input puiFWDataAllocSize : Returned data size
+ @Input puiFWCorememCodeAllocSize : Returned coremem code size (0 if N/A)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate,
+ IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize,
+ IMG_DEVMEM_SIZE_T *puiFWDataAllocSize,
+ IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize);
+
+/*!
+*******************************************************************************
+
+ @Function RGXProcessFWImage
+
+ @Description Process the Firmware binary blob copying code and data
+ sections into their final location and passing some
+ information to the Firmware bootloader.
+ If a pointer to the final memory location for FW code or data
+ is not valid (NULL) then the relative section will not be
+ processed.
+
+ @Input hPrivate : Implementation specific data
+ @Input pbRGXFirmware : Pointer to FW blob
+ @Input pvFWCode : Pointer to FW code
+ @Input pvFWData : Pointer to FW data
+ @Input pvFWCorememCode : Pointer to FW coremem code
+ @Input psFWCodeDevVAddrBase : FW code base device virtual address
+ @Input psFWDataDevVAddrBase : FW data base device virtual address
+ @Input psFWCorememDevVAddrBase : FW coremem code base device virtual address
+ @Input psFWCorememFWAddr : FW coremem code allocation 32 bit (FW) address
+ @Input psRGXFwInit : FW init structure 32 bit (FW) address
+ @Input ui32NumThreads : Number of FW threads in use
+ @Input ui32MainThreadID : ID of the FW thread in use
+ (only meaningful if ui32NumThreads == 1)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate,
+ const IMG_BYTE *pbRGXFirmware,
+ void *pvFWCode,
+ void *pvFWData,
+ void *pvFWCorememCode,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ IMG_DEV_VIRTADDR *psFWCorememDevVAddrBase,
+ RGXFWIF_DEV_VIRTADDR *psFWCorememFWAddr,
+ RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+ IMG_UINT32 ui32NumThreads,
+ IMG_UINT32 ui32MainThreadID);
+
+#endif /* __RGXFWIMAGEUTILS_H__ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services firmware load and access routines for Linux
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/firmware.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "device.h"
+#include "module_common.h"
+#include "rgxfwload.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+
+struct RGXFW
+{
+ const struct firmware sFW;
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) && defined(RGX_FW_SIGNED)
+
+/* The Linux kernel does not support the RSA PSS padding mode. It only
+ * supports the legacy PKCS#1 padding mode.
+ */
+#if defined(RGX_FW_PKCS1_PSS_PADDING)
+#error Linux does not support verification of RSA PSS padded signatures
+#endif
+
+#include <crypto/public_key.h>
+#include <crypto/hash_info.h>
+#include <crypto/hash.h>
+
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+
+#include "signfw.h"
+
+static bool VerifyFirmware(const struct firmware *psFW)
+{
+ struct FirmwareSignatureHeader *psHeader;
+ struct public_key_signature *psPKS;
+ unsigned char *szKeyID, *pcKeyID;
+ size_t uDigestSize, uDescSize;
+ void *pvSignature, *pvSigner;
+ struct crypto_shash *psTFM;
+ struct shash_desc *psDesc;
+ uint32_t ui32SignatureLen;
+ bool bVerified = false;
+ key_ref_t hKey;
+ uint8_t i;
+ int res;
+
+ if (psFW->size < FW_SIGN_BACKWARDS_OFFSET)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware is too small (%zu bytes)",
+ __func__, psFW->size));
+ goto err_release_firmware;
+ }
+
+ psHeader = (struct FirmwareSignatureHeader *)
+ (psFW->data + (psFW->size - FW_SIGN_BACKWARDS_OFFSET));
+
+ /* All derived from u8 so can't be exploited to flow out of this page */
+ pvSigner = (u8 *)psHeader + sizeof(struct FirmwareSignatureHeader);
+ pcKeyID = (unsigned char *)((u8 *)pvSigner + psHeader->ui8SignerLen);
+ pvSignature = (u8 *)pcKeyID + psHeader->ui8KeyIDLen;
+
+ /* We cannot update KERNEL_RO in-place, so we must copy the len */
+ ui32SignatureLen = ntohl(psHeader->ui32SignatureLen);
+
+ if (psHeader->ui8Algo >= PKEY_ALGO__LAST)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Public key algorithm %u is not supported",
+ __func__, psHeader->ui8Algo));
+ goto err_release_firmware;
+ }
+
+ if (psHeader->ui8HashAlgo >= PKEY_HASH__LAST)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Hash algorithm %u is not supported",
+ __func__, psHeader->ui8HashAlgo));
+ goto err_release_firmware;
+ }
+
+ if (psHeader->ui8IDType != PKEY_ID_X509)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Only asymmetric X.509 PKI certificates "
+ "are supported", __func__));
+ goto err_release_firmware;
+ }
+
+ /* Generate a hash of the fw data (including the padding) */
+
+ psTFM = crypto_alloc_shash(hash_algo_name[psHeader->ui8HashAlgo], 0, 0);
+ if (IS_ERR(psTFM))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: crypto_alloc_shash() failed (%ld)",
+ __func__, PTR_ERR(psTFM)));
+ goto err_release_firmware;
+ }
+
+ uDescSize = crypto_shash_descsize(psTFM) + sizeof(*psDesc);
+ uDigestSize = crypto_shash_digestsize(psTFM);
+
+ psPKS = kzalloc(sizeof(*psPKS) + uDescSize + uDigestSize, GFP_KERNEL);
+ if (!psPKS)
+ goto err_free_crypto_shash;
+
+ psDesc = (struct shash_desc *)((u8 *)psPKS + sizeof(*psPKS));
+ psDesc->tfm = psTFM;
+ psDesc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ psPKS->pkey_algo = psHeader->ui8Algo;
+ psPKS->pkey_hash_algo = psHeader->ui8HashAlgo;
+
+ psPKS->digest = (u8 *)psPKS + sizeof(*psPKS) + uDescSize;
+ psPKS->digest_size = uDigestSize;
+
+ res = crypto_shash_init(psDesc);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: crypto_shash_init() failed (%d)",
+ __func__, res));
+ goto err_free_pks;
+ }
+
+ res = crypto_shash_finup(psDesc, psFW->data, psFW->size - FW_SIGN_BACKWARDS_OFFSET,
+ psPKS->digest);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: crypto_shash_finup() failed (%d)",
+ __func__, res));
+ goto err_free_pks;
+ }
+
+ /* Populate the MPI with the signature payload */
+
+ psPKS->nr_mpi = 1;
+ psPKS->rsa.s = mpi_read_raw_data(pvSignature, ui32SignatureLen);
+ if (!psPKS->rsa.s)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: mpi_read_raw_data() failed", __func__));
+ goto err_free_pks;
+ }
+
+ /* Look up the key we'll use to verify this signature */
+
+ szKeyID = kmalloc(psHeader->ui8SignerLen + 2 +
+ psHeader->ui8KeyIDLen * 2 + 1, GFP_KERNEL);
+ if (!szKeyID)
+ goto err_free_mpi;
+
+ memcpy(szKeyID, pvSigner, psHeader->ui8SignerLen);
+
+ szKeyID[psHeader->ui8SignerLen + 0] = ':';
+ szKeyID[psHeader->ui8SignerLen + 1] = ' ';
+
+ for (i = 0; i < psHeader->ui8KeyIDLen; i++)
+ sprintf(&szKeyID[psHeader->ui8SignerLen + 2 + i * 2],
+ "%02x", pcKeyID[i]);
+
+ szKeyID[psHeader->ui8SignerLen + 2 + psHeader->ui8KeyIDLen * 2] = 0;
+
+ hKey = keyring_search(make_key_ref(system_trusted_keyring, 1),
+ &key_type_asymmetric, szKeyID);
+ if (IS_ERR(hKey))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Request for unknown key '%s' (%ld)",
+ szKeyID, PTR_ERR(hKey)));
+ goto err_free_keyid_string;
+ }
+
+ res = verify_signature(key_ref_to_ptr(hKey), psPKS);
+ if (res)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware digital signature verification "
+ "failed (%d)", __func__, res));
+ goto err_put_key;
+ }
+
+ PVR_LOG(("Digital signature for '%s' verified successfully.",
+ RGX_FW_FILENAME));
+ bVerified = true;
+err_put_key:
+ key_put(key_ref_to_ptr(hKey));
+err_free_keyid_string:
+ kfree(szKeyID);
+err_free_mpi:
+ mpi_free(psPKS->rsa.s);
+err_free_pks:
+ kfree(psPKS);
+err_free_crypto_shash:
+ crypto_free_shash(psTFM);
+err_release_firmware:
+ return bVerified;
+}
+
+#else /* defined(RGX_FW_SIGNED) */
+
+static inline bool VerifyFirmware(const struct firmware *psFW)
+{
+ return true;
+}
+
+#endif /* defined(RGX_FW_SIGNED) */
+
+IMG_INTERNAL struct RGXFW *
+RGXLoadFirmware(SHARED_DEV_CONNECTION psDeviceNode, const IMG_CHAR *pszBVNCString, const IMG_CHAR *pszBVpNCString)
+{
+ const struct firmware *psFW;
+ int res;
+
+ if(pszBVNCString != NULL)
+ {
+ res = request_firmware(&psFW, pszBVNCString, psDeviceNode->psDevConfig->pvOSDevice);
+ if (res != 0)
+ {
+ if(pszBVpNCString != NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') failed (%d), trying '%s'",
+ __func__, pszBVNCString, res, pszBVpNCString));
+ res = request_firmware(&psFW, pszBVpNCString, psDeviceNode->psDevConfig->pvOSDevice);
+ }
+ if (res != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') failed (%d), trying '%s'",
+ __func__, pszBVpNCString, res, RGX_FW_FILENAME));
+ res = request_firmware(&psFW, RGX_FW_FILENAME, psDeviceNode->psDevConfig->pvOSDevice);
+ }
+ }
+ }
+ else
+ {
+ res = request_firmware(&psFW, RGX_FW_FILENAME, psDeviceNode->psDevConfig->pvOSDevice);
+ }
+ if (res != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: request_firmware('%s') failed (%d)",
+ __func__, RGX_FW_FILENAME, res));
+ return NULL;
+ }
+
+ if (!VerifyFirmware(psFW))
+ {
+ release_firmware(psFW);
+ return NULL;
+ }
+
+ return (struct RGXFW *)psFW;
+}
+
+IMG_INTERNAL void
+RGXUnloadFirmware(struct RGXFW *psRGXFW)
+{
+ const struct firmware *psFW = &psRGXFW->sFW;
+
+ release_firmware(psFW);
+}
+
+IMG_INTERNAL size_t
+RGXFirmwareSize(struct RGXFW *psRGXFW)
+{
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ PVR_UNREFERENCED_PARAMETER(psRGXFW);
+ return 0;
+#else
+ const struct firmware *psFW = &psRGXFW->sFW;
+
+ return psFW->size;
+#endif
+}
+
+IMG_INTERNAL const void *
+RGXFirmwareData(struct RGXFW *psRGXFW)
+{
+ const struct firmware *psFW = &psRGXFW->sFW;
+
+ return psFW->data;
+}
+
+/******************************************************************************
+ End of file (rgxfwload.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services firmware load and access routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXFWLOAD_H__
+#define __RGXFWLOAD_H__
+
+#include "img_defs.h"
+#include "rgxdefs_km.h"
+#include "device_connection.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+struct RGXFW;
+
+IMG_INTERNAL struct RGXFW *RGXLoadFirmware(SHARED_DEV_CONNECTION psDeviceNode, const IMG_CHAR *pszBVNCString, const IMG_CHAR *pszBVpNCString);
+
+IMG_INTERNAL void RGXUnloadFirmware(struct RGXFW *psRGXFW);
+
+IMG_INTERNAL const void *RGXFirmwareData(struct RGXFW *psRGXFW);
+IMG_INTERNAL size_t RGXFirmwareSize(struct RGXFW *psRGXFW);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __RGXFWLOAD_H__ */
+
+/*****************************************************************************
+ End of file (rgxfwload.h)
+*****************************************************************************/
--- /dev/null
+ /*************************************************************************/ /*!
+@File
+@Title Rogue firmware utility routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Rogue firmware utility routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "lists.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "cache_km.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_server.h"
+
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "rgxfwutils.h"
+#include "rgx_options.h"
+#include "rgx_fwif.h"
+#include "rgx_fwif_alignchecks.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_pdump_panics.h"
+#include "rgxheapconfig.h"
+#include "pvrsrv.h"
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+#include "rgxfwutils_vz.h"
+#endif
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#include "rgxccb.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#include "rgxpower.h"
+#include "rgxray.h"
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+#include "rgxmem.h"
+#include "rgxta3d.h"
+#include "rgxutils.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "tlstream.h"
+#include "devicemem_server_utils.h"
+#include "htbuffer.h"
+#include "rgx_bvnc_defs_km.h"
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#include "physmem_osmem.h"
+#endif
+
+#ifdef __linux__
+#include <linux/kernel.h> // sprintf
+#include <linux/string.h> // strncpy, strlen
+#include "rogue_trace_events.h"
+#else
+#include <stdio.h>
+#include <string.h>
+#endif
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+/* Kernel CCB length */
+ /* Reducing the size of the KCCB in an attempt to avoid flooding and overflowing the FW kick queue
+ * in the case of multiple OSes */
+#define RGXFWIF_KCCB_NUMCMDS_LOG2_GPUVIRT_ONLY (6)
+#define RGXFWIF_KCCB_NUMCMDS_LOG2_FEAT_GPU_VIRTUALISATION (7)
+
+
+/* Firmware CCB length */
+#if defined(SUPPORT_PDVFS)
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (8)
+#else
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (5)
+#endif
+
+/* Workload Estimation Firmware CCB length */
+#define RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2 (7)
+
+typedef struct
+{
+ RGXFWIF_KCCB_CMD sKCCBcmd;
+ DLLIST_NODE sListNode;
+ PDUMP_FLAGS_T uiPdumpFlags;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGXFWIF_DM eDM;
+} RGX_DEFERRED_KCCB_CMD;
+
+#if defined(PDUMP)
+/* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the
+ * PID filter example entries
+ */
+static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32),
+ "FW PID filtering assumes the IMG_PID type is 32-bits wide as it "
+ "generates WRW commands for loading the PID values");
+#endif
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_INIT* psRGXFWInit)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc;
+ IMG_UINT32 ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(psDevInfo->sDevFeatureCfg.ui32CacheLineSize);
+
+ PVR_DPF_ENTERED;
+
+ eError = DevmemAllocate(psDevInfo->psFirmwareHeap,
+ 1,
+ ui32CacheLineSize,
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+ "SLC3 Fence WA",
+ ppsSLC3FenceMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+
+ /*
+ We need to map it so the heap for this allocation
+ is set
+ */
+ eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc,
+ psDevInfo->psFirmwareHeap,
+ &psRGXFWInit->sSLC3FenceDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ DevmemFwFree(psDevInfo, *ppsSLC3FenceMemDesc);
+ *ppsSLC3FenceMemDesc = NULL;
+ }
+
+ PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc);
+}
+
+static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+ DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc;
+
+ if (psSLC3FenceMemDesc)
+ {
+ DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc);
+ DevmemFree(psSLC3FenceMemDesc);
+ }
+}
+#endif
+
+static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value)
+{
+ /* ensure memory is flushed before kicking MTS */
+ OSWriteMemoryBarrier();
+
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value);
+
+ /* ensure the MTS kick goes through before continuing */
+ OSMemoryBarrier();
+}
+
+
+/*!
+*******************************************************************************
+ @Function RGXFWSetupSignatureChecks
+ @Description
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo,
+ DEVMEM_MEMDESC** ppsSigChecksMemDesc,
+ IMG_UINT32 ui32SigChecksBufSize,
+ RGXFWIF_SIGBUF_CTL* psSigBufCtl,
+ const IMG_CHAR* pszBufferName)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_FLAGS_T uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /* Allocate memory for the checks */
+ PDUMPCOMMENT("Allocate memory for %s signature checks", pszBufferName);
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32SigChecksBufSize,
+ uiMemAllocFlags,
+ "FwSignatureChecks",
+ ppsSigChecksMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for signature checks (%u)",
+ ui32SigChecksBufSize,
+ eError));
+ return eError;
+ }
+
+ /* Prepare the pointer for the fw to access that memory */
+ RGXSetFirmwareAddress(&psSigBufCtl->sBuffer,
+ *ppsSigChecksMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ DevmemPDumpLoadMem( *ppsSigChecksMemDesc,
+ 0,
+ ui32SigChecksBufSize,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32);
+
+ return PVRSRV_OK;
+}
+
+#if defined(RGXFW_ALIGNCHECKS)
+/*!
+*******************************************************************************
+ @Function RGXFWSetupAlignChecks
+ @Description This functions allocates and fills memory needed for the
+ aligns checks of the UM and KM structures shared with the
+ firmware. The format of the data in the memory is as follows:
+ <number of elements in the KM array>
+ <array of KM structures' sizes and members' offsets>
+ <number of elements in the UM array>
+ <array of UM structures' sizes and members' offsets>
+ The UM array is passed from the user side. If the
+ SUPPORT_KERNEL_SRVINIT macro is defined the firmware is
+ is responsible for filling this part of the memory. If that
+ happens the check of the UM structures will be performed
+ by the host driver on client's connect.
+ If the macro is not defined the client driver fills the memory
+ and the firmware checks for the alignment of all structures.
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo,
+ RGXFWIF_DEV_VIRTADDR *psAlignChecksDevFW,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength)
+{
+ IMG_UINT32 aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM };
+ IMG_UINT32 ui32RGXFWAlingChecksTotal;
+ IMG_UINT32* paui32AlignChecks;
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ /* In this case we don't know the number of elements in UM array.
+ * We have to assume something so we assume RGXFW_ALIGN_CHECKS_UM_MAX. */
+ PVR_ASSERT(ui32RGXFWAlignChecksArrLength == 0);
+ ui32RGXFWAlingChecksTotal = sizeof(aui32RGXFWAlignChecksKM)
+ + RGXFW_ALIGN_CHECKS_UM_MAX * sizeof(IMG_UINT32)
+ + 2 * sizeof(IMG_UINT32);
+#else
+ /* '2 * sizeof(IMG_UINT32)' if for sizes of km and um arrays. */
+ PVR_ASSERT(ui32RGXFWAlignChecksArrLength != 0);
+ ui32RGXFWAlingChecksTotal = sizeof(aui32RGXFWAlignChecksKM)
+ + ui32RGXFWAlignChecksArrLength * sizeof(IMG_UINT32)
+ + 2 * sizeof(IMG_UINT32);
+#endif
+
+ /* Allocate memory for the checks */
+ PDUMPCOMMENT("Allocate memory for alignment checks");
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32RGXFWAlingChecksTotal,
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+#endif
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | PVRSRV_MEMALLOCFLAG_UNCACHED,
+ "FwAlignmentChecks",
+ &psDevInfo->psRGXFWAlignChecksMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for alignment checks (%u)",
+ ui32RGXFWAlingChecksTotal,
+ eError));
+ goto failAlloc;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc,
+ (void **)&paui32AlignChecks);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel addr for alignment checks (%u)",
+ eError));
+ goto failAqCpuAddr;
+ }
+
+ /* Copy the values */
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ *paui32AlignChecks++ = sizeof(aui32RGXFWAlignChecksKM)/sizeof(IMG_UINT32);
+ OSDeviceMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0], sizeof(aui32RGXFWAlignChecksKM));
+ paui32AlignChecks += sizeof(aui32RGXFWAlignChecksKM)/sizeof(IMG_UINT32);
+
+ *paui32AlignChecks = 0;
+#else
+ *paui32AlignChecks++ = sizeof(aui32RGXFWAlignChecksKM)/sizeof(IMG_UINT32);
+ OSDeviceMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0], sizeof(aui32RGXFWAlignChecksKM));
+ paui32AlignChecks += sizeof(aui32RGXFWAlignChecksKM)/sizeof(IMG_UINT32);
+
+ *paui32AlignChecks++ = ui32RGXFWAlignChecksArrLength;
+ OSDeviceMemCopy(paui32AlignChecks, pui32RGXFWAlignChecks, ui32RGXFWAlignChecksArrLength * sizeof(IMG_UINT32));
+#endif
+
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWAlignChecksMemDesc,
+ 0,
+ ui32RGXFWAlingChecksTotal,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* Prepare the pointer for the fw to access that memory */
+ RGXSetFirmwareAddress(psAlignChecksDevFW,
+ psDevInfo->psRGXFWAlignChecksMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ return PVRSRV_OK;
+
+
+
+
+failAqCpuAddr:
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc);
+ psDevInfo->psRGXFWAlignChecksMemDesc = NULL;
+failAlloc:
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static void RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+ if (psDevInfo->psRGXFWAlignChecksMemDesc != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc);
+ psDevInfo->psRGXFWAlignChecksMemDesc = NULL;
+ }
+}
+#endif
+
+
+void RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest,
+ DEVMEM_MEMDESC *psSrc,
+ IMG_UINT32 uiExtraOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEV_VIRTADDR psDevVirtAddr;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_UINT64 ui64ErnsBrns = 0;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc);
+ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+ ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+
+ if(psDevInfo->sDevFeatureCfg.ui32META)
+ {
+ IMG_UINT32 ui32Offset;
+ IMG_BOOL bCachedInMETA;
+ DEVMEM_FLAGS_T uiDevFlags;
+ IMG_UINT32 uiGPUCacheMode;
+
+ eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Convert to an address in META memmap */
+ ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_HEAP_BASE ;
+
+ /* Check in the devmem flags whether this memory is cached/uncached */
+ DevmemGetFlags(psSrc, &uiDevFlags);
+
+ /* Honour the META cache flags */
+ bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0;
+
+ /* Honour the SLC cache flags */
+ uiGPUCacheMode = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags);
+
+ ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+ if (bCachedInMETA)
+ {
+ ui32Offset |= RGXFW_SEGMMU_DATA_META_CACHED;
+ }
+ else
+ {
+ ui32Offset |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+ }
+
+ if (PVRSRV_CHECK_GPU_CACHED(uiGPUCacheMode))
+ {
+ ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED;
+ }
+ else
+ {
+ ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED;
+ }
+ ppDest->ui32Addr = ui32Offset;
+ }else
+ {
+ eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ ppDest->ui32Addr = (IMG_UINT32)((psDevVirtAddr.uiAddr + uiExtraOffset) & 0xFFFFFFFF);
+ }
+
+ if (ui32Flags & RFW_FWADDR_NOREF_FLAG)
+ {
+ DevmemReleaseDevVirtAddr(psSrc);
+ }
+
+}
+
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest,
+ DEVMEM_MEMDESC *psSrcMemDesc,
+ RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr,
+ IMG_UINT32 uiOffset)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEV_VIRTADDR sDevVirtAddr;
+
+ eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr;
+ psDest->psDevVirtAddr.uiAddr += uiOffset;
+ psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr;
+
+ DevmemReleaseDevVirtAddr(psSrcMemDesc);
+}
+
+
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc)
+{
+ DevmemReleaseDevVirtAddr(psSrc);
+}
+
+struct _RGX_SERVER_COMMON_CONTEXT_ {
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ DEVMEM_MEMDESC *psFWCommonContextMemDesc;
+ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ DEVMEM_MEMDESC *psContextStateMemDesc;
+ RGX_CLIENT_CCB *psClientCCB;
+ DEVMEM_MEMDESC *psClientCCBMemDesc;
+ DEVMEM_MEMDESC *psClientCCBCtrlMemDesc;
+ IMG_BOOL bCommonContextMemProvided;
+ IMG_UINT32 ui32ContextID;
+ DLLIST_NODE sListNode;
+ RGXFWIF_CONTEXT_RESET_REASON eLastResetReason;
+ IMG_UINT32 ui32LastResetJobRef;
+};
+
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+ RGXFWIF_DM eDM,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ DEVMEM_MEMDESC *psContextStateMemDesc,
+ IMG_UINT32 ui32CCBAllocSize,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext;
+ IMG_UINT32 ui32FWCommonContextOffset;
+ IMG_UINT8 *pui8Ptr;
+ PVRSRV_ERROR eError;
+
+ /*
+ Allocate all the resources that are required
+ */
+ psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext));
+ if (psServerCommonContext == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ psServerCommonContext->psDevInfo = psDevInfo;
+
+ if (psAllocatedMemDesc)
+ {
+ PDUMPCOMMENT("Using existing MemDesc for Rogue firmware %s context (offset = %d)",
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ ui32AllocatedOffset);
+ ui32FWCommonContextOffset = ui32AllocatedOffset;
+ psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc;
+ psServerCommonContext->bCommonContextMemProvided = IMG_TRUE;
+ }
+ else
+ {
+ /* Allocate device memory for the firmware context */
+ PDUMPCOMMENT("Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFWCommonContext),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwContext",
+ &psServerCommonContext->psFWCommonContextMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s : Failed to allocate firmware %s context (%s)",
+ __FUNCTION__,
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_contextalloc;
+ }
+ ui32FWCommonContextOffset = 0;
+ psServerCommonContext->bCommonContextMemProvided = IMG_FALSE;
+ }
+
+ /* Record this context so we can refer to it if the FW needs to tell us it was reset. */
+ psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE;
+ psServerCommonContext->ui32LastResetJobRef = 0;
+ psServerCommonContext->ui32ContextID = psDevInfo->ui32CommonCtxtCurrentID++;
+
+ /* Allocate the client CCB */
+ eError = RGXCreateCCB(psDevInfo,
+ ui32CCBAllocSize,
+ psConnection,
+ eRGXCCBRequestor,
+ psServerCommonContext,
+ &psServerCommonContext->psClientCCB,
+ &psServerCommonContext->psClientCCBMemDesc,
+ &psServerCommonContext->psClientCCBCtrlMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to create CCB for %s context(%s)",
+ __FUNCTION__,
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_allocateccb;
+ }
+
+ /*
+ Temporarily map the firmware context to the kernel and init it
+ */
+ eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc,
+ (void **)&pui8Ptr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware %s context (%s)to CPU",
+ __FUNCTION__,
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_cpuvirtacquire;
+ }
+
+ psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset);
+ psFWCommonContext->eDM = eDM;
+
+ /* Set the firmware CCB device addresses in the firmware common context */
+ RGXSetFirmwareAddress(&psFWCommonContext->psCCB,
+ psServerCommonContext->psClientCCBMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+ RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl,
+ psServerCommonContext->psClientCCBCtrlMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_META_DMA_BIT_MASK)
+ {
+ RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr,
+ psServerCommonContext->psClientCCBMemDesc,
+ &psFWCommonContext->psCCB,
+ 0);
+ }
+
+ /* Set the memory context device address */
+ psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+ RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext,
+ psFWMemContextMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+
+ /* Set the framework register updates address */
+ psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc;
+ if (psInfo->psFWFrameworkMemDesc != NULL)
+ {
+ RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd,
+ psInfo->psFWFrameworkMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+ }
+ else
+ {
+ /* This should never be touched in this contexts without a framework
+ * memdesc, but ensure it is zero so we see crashes if it is.
+ */
+ psFWCommonContext->psRFCmd.ui32Addr = 0;
+ }
+
+ psFWCommonContext->ui32Priority = ui32Priority;
+ psFWCommonContext->ui32PrioritySeqNum = 0;
+
+ if(psInfo->psMCUFenceAddr != NULL)
+ {
+ psFWCommonContext->ui64MCUFenceAddr = psInfo->psMCUFenceAddr->uiAddr;
+ }
+
+ if((psDevInfo->sDevFeatureCfg.ui32CtrlStreamFormat == 2) && \
+ (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK))
+ {
+ if (eDM == RGXFWIF_DM_CDM)
+ {
+ if(psInfo->psResumeSignalAddr != NULL)
+ {
+ psFWCommonContext->ui64ResumeSignalAddr = psInfo->psResumeSignalAddr->uiAddr;
+ }
+ }
+ }
+
+ /* Store a references to Server Common Context and PID for notifications back from the FW. */
+ psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID;
+ psFWCommonContext->ui32PID = OSGetCurrentClientProcessIDKM();
+
+ /* Set the firmware GPU context state buffer */
+ psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc;
+ if (psContextStateMemDesc)
+ {
+ RGXSetFirmwareAddress(&psFWCommonContext->psContextState,
+ psContextStateMemDesc,
+ 0,
+ RFW_FWADDR_FLAG_NONE);
+ }
+
+ /*
+ * Dump the created context
+ */
+ PDUMPCOMMENT("Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+ DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc,
+ ui32FWCommonContextOffset,
+ sizeof(*psFWCommonContext),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* We've finished the setup so release the CPU mapping */
+ DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+
+ /* Map this allocation into the FW */
+ RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr,
+ psServerCommonContext->psFWCommonContextMemDesc,
+ ui32FWCommonContextOffset,
+ RFW_FWADDR_FLAG_NONE);
+
+#if defined(LINUX)
+ {
+ IMG_UINT32 ui32FWAddr = 0;
+ switch (eDM) {
+ case RGXFWIF_DM_TA:
+ ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+ psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext));
+ break;
+ case RGXFWIF_DM_3D:
+ ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+ psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext));
+ break;
+ default:
+ ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr;
+ break;
+ }
+
+ trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(),
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ ui32FWAddr);
+ }
+#endif
+ /*Add the node to the list when finalised */
+ dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode));
+
+ *ppsServerCommonContext = psServerCommonContext;
+ return PVRSRV_OK;
+
+fail_allocateccb:
+ DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+fail_cpuvirtacquire:
+ RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc);
+ if (!psServerCommonContext->bCommonContextMemProvided)
+ {
+ DevmemFwFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc);
+ psServerCommonContext->psFWCommonContextMemDesc = NULL;
+ }
+fail_contextalloc:
+ OSFreeMem(psServerCommonContext);
+fail_alloc:
+ return eError;
+}
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+
+ /* Remove the context from the list of all contexts. */
+ dllist_remove_node(&psServerCommonContext->sListNode);
+
+ /*
+ Unmap the context itself and then all it's resources
+ */
+
+ /* Unmap the FW common context */
+ RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc);
+ /* Umap context state buffer (if there was one) */
+ if (psServerCommonContext->psContextStateMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc);
+ }
+ /* Unmap the framework buffer */
+ if (psServerCommonContext->psFWFrameworkMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc);
+ }
+ /* Unmap client CCB and CCB control */
+ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc);
+ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc);
+ /* Unmap the memory context */
+ RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc);
+
+ /* Destroy the client CCB */
+ RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB);
+
+
+ /* Free the FW common context (if there was one) */
+ if (!psServerCommonContext->bCommonContextMemProvided)
+ {
+ DevmemFwFree(psServerCommonContext->psDevInfo,
+ psServerCommonContext->psFWCommonContextMemDesc);
+ psServerCommonContext->psFWCommonContextMemDesc = NULL;
+ }
+ /* Free the hosts representation of the common context */
+ OSFreeMem(psServerCommonContext);
+}
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+ return psServerCommonContext->sFWCommonContextFWAddr;
+}
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+ return psServerCommonContext->psClientCCB;
+}
+
+RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ IMG_UINT32 *pui32LastResetJobRef)
+{
+ RGXFWIF_CONTEXT_RESET_REASON eLastResetReason;
+
+ PVR_ASSERT(psServerCommonContext != NULL);
+ PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+ /* Take the most recent reason & job ref and reset for next time... */
+ eLastResetReason = psServerCommonContext->eLastResetReason;
+ *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef;
+ psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE;
+ psServerCommonContext->ui32LastResetJobRef = 0;
+
+ return eLastResetReason;
+}
+
+/*!
+*******************************************************************************
+ @Function RGXFreeKernelCCB
+ @Description Free the kernel CCB
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+static void RGXFreeKernelCCB(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ if (psDevInfo->psKernelCCBMemDesc != NULL)
+ {
+ if (psDevInfo->psKernelCCB != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psKernelCCBMemDesc);
+ psDevInfo->psKernelCCB = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psKernelCCBMemDesc);
+ psDevInfo->psKernelCCBMemDesc = NULL;
+ }
+ if (psDevInfo->psKernelCCBCtlMemDesc != NULL)
+ {
+ if (psDevInfo->psKernelCCBCtl != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psKernelCCBCtlMemDesc);
+ psDevInfo->psKernelCCBCtl = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psKernelCCBCtlMemDesc);
+ psDevInfo->psKernelCCBCtlMemDesc = NULL;
+ }
+}
+
+/*!
+*******************************************************************************
+ @Function RGXSetupKernelCCB
+ @Description Allocate and initialise the kernel CCB
+ @Input psDevInfo
+ @Input psRGXFWInit
+ @Input ui32NumCmdsLog2
+ @Input ui32CmdSize
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupKernelCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit,
+ IMG_UINT32 ui32NumCmdsLog2,
+ IMG_UINT32 ui32CmdSize)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_CCB_CTL *psKCCBCtl;
+ DEVMEM_FLAGS_T uiCCBCtlMemAllocFlags, uiCCBMemAllocFlags;
+ IMG_UINT32 ui32kCCBSize = (1U << ui32NumCmdsLog2);
+
+
+ /*
+ * FIXME: the write offset need not be writeable by the firmware, indeed may
+ * not even be needed for reading. Consider moving it to its own data
+ * structure.
+ */
+ uiCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /* Allocation flags for Kernel CCB */
+ uiCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /*
+ * Allocate memory for the kernel CCB control.
+ */
+ PDUMPCOMMENT("Allocate memory for kernel CCB control");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_CCB_CTL),
+ uiCCBCtlMemAllocFlags,
+ "FwKernelCCBControl",
+ &psDevInfo->psKernelCCBCtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to allocate kernel CCB ctl (%u)", eError));
+ goto fail;
+ }
+
+ /*
+ * Allocate memory for the kernel CCB.
+ * (this will reference further command data in non-shared CCBs)
+ */
+ PDUMPCOMMENT("Allocate memory for kernel CCB");
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32kCCBSize * ui32CmdSize,
+ uiCCBMemAllocFlags,
+ "FwKernelCCB",
+ &psDevInfo->psKernelCCBMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to allocate kernel CCB (%u)", eError));
+ goto fail;
+ }
+
+ /*
+ * Map the kernel CCB control to the kernel.
+ */
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psKernelCCBCtlMemDesc,
+ (void **)&psDevInfo->psKernelCCBCtl);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to acquire cpu kernel CCB Ctl (%u)", eError));
+ goto fail;
+ }
+
+ /*
+ * Map the kernel CCB to the kernel.
+ */
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psKernelCCBMemDesc,
+ (void **)&psDevInfo->psKernelCCB);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to acquire cpu kernel CCB (%u)", eError));
+ goto fail;
+ }
+
+ /*
+ * Initialise the kernel CCB control.
+ */
+ psKCCBCtl = psDevInfo->psKernelCCBCtl;
+ psKCCBCtl->ui32WriteOffset = 0;
+ psKCCBCtl->ui32ReadOffset = 0;
+ psKCCBCtl->ui32WrapMask = ui32kCCBSize - 1;
+ psKCCBCtl->ui32CmdSize = ui32CmdSize;
+
+ /*
+ * Set-up RGXFWIfCtl pointers to access the kCCB
+ */
+ RGXSetFirmwareAddress(&psRGXFWInit->psKernelCCBCtl,
+ psDevInfo->psKernelCCBCtlMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ RGXSetFirmwareAddress(&psRGXFWInit->psKernelCCB,
+ psDevInfo->psKernelCCBMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ /*
+ * Pdump the kernel CCB control.
+ */
+ PDUMPCOMMENT("Initialise kernel CCB ctl");
+ DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, 0, sizeof(RGXFWIF_CCB_CTL), 0);
+
+ return PVRSRV_OK;
+
+fail:
+ RGXFreeKernelCCB(psDevInfo);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function RGXFreeFirmwareCCB
+ @Description Free the firmware CCB
+ @Input psDevInfo
+ @Input ppsFirmwareCCBCtl
+ @Input ppsFirmwareCCBCtlMemDesc
+ @Input ppui8FirmwareCCB
+ @Input ppsFirmwareCCBMemDesc
+
+ @Return void
+******************************************************************************/
+static void RGXFreeFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_CCB_CTL **ppsFirmwareCCBCtl,
+ DEVMEM_MEMDESC **ppsFirmwareCCBCtlMemDesc,
+ IMG_UINT8 **ppui8FirmwareCCB,
+ DEVMEM_MEMDESC **ppsFirmwareCCBMemDesc)
+{
+ if (*ppsFirmwareCCBMemDesc != NULL)
+ {
+ if (*ppui8FirmwareCCB != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(*ppsFirmwareCCBMemDesc);
+ *ppui8FirmwareCCB = NULL;
+ }
+ DevmemFwFree(psDevInfo, *ppsFirmwareCCBMemDesc);
+ *ppsFirmwareCCBMemDesc = NULL;
+ }
+ if (*ppsFirmwareCCBCtlMemDesc != NULL)
+ {
+ if (*ppsFirmwareCCBCtl != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(*ppsFirmwareCCBCtlMemDesc);
+ *ppsFirmwareCCBCtl = NULL;
+ }
+ DevmemFwFree(psDevInfo, *ppsFirmwareCCBCtlMemDesc);
+ *ppsFirmwareCCBCtlMemDesc = NULL;
+ }
+}
+
+#define INPUT_STR_SIZE_MAX 13
+#define APPEND_STR_SIZE 7
+#define COMBINED_STR_LEN_MAX (INPUT_STR_SIZE_MAX + APPEND_STR_SIZE + 1)
+
+/*!
+*******************************************************************************
+ @Function RGXSetupFirmwareCCB
+ @Description Allocate and initialise a Firmware CCB
+ @Input psDevInfo
+ @Input ppsFirmwareCCBCtl
+ @Input ppsFirmwareCCBCtlMemDesc
+ @Input ppui8FirmwareCCB
+ @Input ppsFirmwareCCBMemDesc
+ @Input psFirmwareCCBCtlFWAddr
+ @Input psFirmwareCCBFWAddr
+ @Input ui32NumCmdsLog2
+ @Input ui32CmdSize
+ @Input pszName Must be less than or equal to
+ INPUT_STR_SIZE_MAX
+ @Return PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_CCB_CTL **ppsFirmwareCCBCtl,
+ DEVMEM_MEMDESC **ppsFirmwareCCBCtlMemDesc,
+ IMG_UINT8 **ppui8FirmwareCCB,
+ DEVMEM_MEMDESC **ppsFirmwareCCBMemDesc,
+ PRGXFWIF_CCB_CTL *psFirmwareCCBCtlFWAddr,
+ PRGXFWIF_CCB *psFirmwareCCBFWAddr,
+ IMG_UINT32 ui32NumCmdsLog2,
+ IMG_UINT32 ui32CmdSize,
+ IMG_PCHAR pszName)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_CCB_CTL *psFWCCBCtl;
+ DEVMEM_FLAGS_T uiCCBCtlMemAllocFlags, uiCCBMemAllocFlags;
+ IMG_UINT32 ui32FWCCBSize = (1U << ui32NumCmdsLog2);
+ IMG_CHAR sCCBCtlName[COMBINED_STR_LEN_MAX] = "";
+ IMG_CHAR sAppend[] = "Control";
+
+ /*
+ * FIXME: the write offset need not be writeable by the host, indeed may
+ * not even be needed for reading. Consider moving it to its own data
+ * structure.
+ */
+ uiCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /* Allocation flags for Firmware CCB */
+ uiCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PVR_ASSERT(strlen(sCCBCtlName) == 0);
+ PVR_ASSERT(strlen(sAppend) == APPEND_STR_SIZE);
+ PVR_ASSERT(strlen(pszName) <= INPUT_STR_SIZE_MAX);
+
+ /* Append "Control" to the name for the control struct. */
+ strncat(sCCBCtlName, pszName, INPUT_STR_SIZE_MAX);
+ strncat(sCCBCtlName, sAppend, APPEND_STR_SIZE);
+
+ /*
+ Allocate memory for the Firmware CCB control.
+ */
+ PDUMPCOMMENT("Allocate memory for %s", sCCBCtlName);
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_CCB_CTL),
+ uiCCBCtlMemAllocFlags,
+ sCCBCtlName,
+ ppsFirmwareCCBCtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to allocate %s (%u)", sCCBCtlName, eError));
+ goto fail;
+ }
+
+ /*
+ Allocate memory for the Firmware CCB.
+ (this will reference further command data in non-shared CCBs)
+ */
+ PDUMPCOMMENT("Allocate memory for %s", pszName);
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32FWCCBSize * ui32CmdSize,
+ uiCCBMemAllocFlags,
+ pszName,
+ ppsFirmwareCCBMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to allocate %s (%u)", pszName, eError));
+ goto fail;
+ }
+
+ /*
+ Map the Firmware CCB control to the kernel.
+ */
+ eError = DevmemAcquireCpuVirtAddr(*ppsFirmwareCCBCtlMemDesc,
+ (void **)ppsFirmwareCCBCtl);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to acquire cpu %s (%u)", sCCBCtlName, eError));
+ goto fail;
+ }
+
+ /*
+ Map the firmware CCB to the kernel.
+ */
+ eError = DevmemAcquireCpuVirtAddr(*ppsFirmwareCCBMemDesc,
+ (void **)ppui8FirmwareCCB);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to acquire cpu %s (%u)", pszName, eError));
+ goto fail;
+ }
+
+ /*
+ * Initialise the firmware CCB control.
+ */
+ psFWCCBCtl = *ppsFirmwareCCBCtl;
+ psFWCCBCtl->ui32WriteOffset = 0;
+ psFWCCBCtl->ui32ReadOffset = 0;
+ psFWCCBCtl->ui32WrapMask = ui32FWCCBSize - 1;
+ psFWCCBCtl->ui32CmdSize = ui32CmdSize;
+
+ /*
+ * Set-up RGXFWIfCtl pointers to access the kCCBs
+ */
+ RGXSetFirmwareAddress(psFirmwareCCBCtlFWAddr,
+ *ppsFirmwareCCBCtlMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ RGXSetFirmwareAddress(psFirmwareCCBFWAddr,
+ *ppsFirmwareCCBMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ /*
+ * Pdump the kernel CCB control.
+ */
+ PDUMPCOMMENT("Initialise %s", sCCBCtlName);
+ DevmemPDumpLoadMem(*ppsFirmwareCCBCtlMemDesc,
+ 0,
+ sizeof(RGXFWIF_CCB_CTL),
+ 0);
+
+ return PVRSRV_OK;
+
+fail:
+ RGXFreeFirmwareCCB(psDevInfo,
+ ppsFirmwareCCBCtl,
+ ppsFirmwareCCBCtlMemDesc,
+ ppui8FirmwareCCB,
+ ppsFirmwareCCBMemDesc);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PMR *psPMR;
+
+ if (psDevInfo->psRGXFaultAddressMemDesc)
+ {
+ if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc,(void **)&psPMR) == PVRSRV_OK)
+ {
+ PMRUnlockSysPhysAddresses(psPMR);
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+ psDevInfo->psRGXFaultAddressMemDesc = NULL;
+ }
+}
+
+static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_INIT *psRGXFWInit)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ IMG_UINT32 *pui32MemoryVirtAddr;
+ IMG_UINT32 i;
+ size_t ui32PageSize;
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PMR *psPMR;
+
+ ui32PageSize = OSGetPageSize();
+
+ /* Allocate page of memory to use for page faults on non-blocking memory transactions */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED;
+
+ psDevInfo->psRGXFaultAddressMemDesc = NULL;
+ eError = DevmemFwAllocateExportable(psDeviceNode,
+ ui32PageSize,
+ ui32PageSize,
+ uiMemAllocFlags,
+ "FwExFaultAddress",
+ &psDevInfo->psRGXFaultAddressMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate mem for fault address (%u)",
+ eError));
+ goto failFaultAddressDescAlloc;
+ }
+
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc,
+ (void **)&pui32MemoryVirtAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire mem for fault address (%u)",
+ eError));
+ goto failFaultAddressDescAqCpuVirt;
+ }
+
+ for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++)
+ {
+ *(pui32MemoryVirtAddr + i) = 0xDEADBEEF;
+ }
+
+ eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc,(void **)&psPMR);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error getting PMR for fault address (%u)",
+ eError));
+
+ goto failFaultAddressDescGetPMR;
+ }
+ else
+ {
+ IMG_BOOL bValid;
+ IMG_UINT32 ui32Log2PageSize = OSGetPageShift();
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error locking physical address for fault address MemDesc (%u)",
+ eError));
+
+ goto failFaultAddressDescLockPhys;
+ }
+
+ eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize,1,0,&(psRGXFWInit->sFaultPhysAddr),&bValid);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error getting physical address for fault address MemDesc (%u)",
+ eError));
+
+ goto failFaultAddressDescGetPhys;
+ }
+
+ if (!bValid)
+ {
+ psRGXFWInit->sFaultPhysAddr.uiAddr = 0;
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed getting physical address for fault address MemDesc - invalid page (0x%llX)",
+ psRGXFWInit->sFaultPhysAddr.uiAddr));
+
+ goto failFaultAddressDescGetPhys;
+ }
+ }
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+
+ return PVRSRV_OK;
+
+failFaultAddressDescGetPhys:
+ PMRUnlockSysPhysAddresses(psPMR);
+
+failFaultAddressDescLockPhys:
+
+failFaultAddressDescGetPMR:
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+
+failFaultAddressDescAqCpuVirt:
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+ psDevInfo->psRGXFaultAddressMemDesc = NULL;
+
+failFaultAddressDescAlloc:
+#endif
+ return eError;
+}
+
+static PVRSRV_ERROR RGXHwBrn37200(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ IMG_UINT64 ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+ IMG_UINT32 ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(psDevInfo->sDevFeatureCfg.ui32CacheLineSize);
+
+ if(ui64ErnsBrns & FIX_HW_BRN_37200_BIT_MASK)
+ {
+ struct _DEVMEM_HEAP_ *psBRNHeap;
+ DEVMEM_FLAGS_T uiFlags;
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ size_t uiPageSize;
+
+ uiPageSize = OSGetPageSize();
+
+ uiFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx,
+ "HWBRN37200", /* FIXME: We need to create an IDENT macro for this string.
+ Make sure the IDENT macro is not accessible to userland */
+ &psBRNHeap);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXHwBrn37200: HWBRN37200 Failed DevmemFindHeapByName (%u)", eError));
+ goto failFWHWBRN37200FindHeapByName;
+ }
+
+ psDevInfo->psRGXFWHWBRN37200MemDesc = NULL;
+ eError = DevmemAllocate(psBRNHeap,
+ uiPageSize,
+ ui32CacheLineSize,
+ uiFlags,
+ "HWBRN37200",
+ &psDevInfo->psRGXFWHWBRN37200MemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXHwBrn37200: Failed to allocate %u bytes for HWBRN37200 (%u)",
+ (IMG_UINT32)uiPageSize,
+ eError));
+ goto failFWHWBRN37200MemDescAlloc;
+ }
+
+ /*
+ We need to map it so the heap for this allocation
+ is set
+ */
+ eError = DevmemMapToDevice(psDevInfo->psRGXFWHWBRN37200MemDesc,
+ psBRNHeap,
+ &sTmpDevVAddr);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXHwBrn37200: Failed to allocate %u bytes for HWBRN37200 (%u)",
+ (IMG_UINT32)uiPageSize,
+ eError));
+ goto failFWHWBRN37200DevmemMapToDevice;
+ }
+
+
+
+ return PVRSRV_OK;
+
+ failFWHWBRN37200DevmemMapToDevice:
+
+ failFWHWBRN37200MemDescAlloc:
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWHWBRN37200MemDesc);
+ psDevInfo->psRGXFWHWBRN37200MemDesc = NULL;
+
+ failFWHWBRN37200FindHeapByName:;
+ }
+#endif
+ return eError;
+}
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+/*************************************************************************/ /*!
+@Function RGXTraceBufferIsInitRequired
+
+@Description Returns true if the firmware trace buffer is not allocated and
+ might be required by the firmware soon. Trace buffer allocated
+ on-demand to reduce RAM footprint on systems not needing
+ firmware trace.
+
+@Input psDevInfo RGX device info
+
+@Return IMG_BOOL Whether on-demand allocation(s) is/are needed
+ or not
+*/ /**************************************************************************/
+INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+ /* The firmware expects a trace buffer only when:
+ * - Logtype is "trace" AND
+ * - at least one LogGroup is configured
+ */
+ if((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL)
+ && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)
+ && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK))
+ {
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function RGXTraceBufferInitOnDemandResources
+
+@Description Allocates the firmware trace buffer required for dumping trace
+ info from the firmware.
+
+@Input psDevInfo RGX device info
+
+@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32FwThreadNum;
+
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++)
+ {
+ /* Ensure allocation API is only called when not already allocated */
+ PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum] == NULL);
+
+ PDUMPCOMMENT("Allocate rgxfw trace buffer(%u)", ui32FwThreadNum);
+ eError = DevmemFwAllocate(psDevInfo,
+ RGXFW_TRACE_BUFFER_SIZE * sizeof(*(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer)),
+ uiMemAllocFlags,
+ "FwTraceBuffer",
+ &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate %zu bytes for fw trace buffer %u (Error code:%u)",
+ __FUNCTION__,
+ RGXFW_TRACE_BUFFER_SIZE * sizeof(*(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer)),
+ ui32FwThreadNum,
+ eError));
+ goto fail;
+ }
+
+ /* Firmware address should not be already set */
+ PVR_ASSERT(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer.ui32Addr == 0x0);
+
+ /* for the FW to use this address when dumping in log (trace) buffer */
+ RGXSetFirmwareAddress(&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer,
+ psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum],
+ 0, RFW_FWADDR_NOREF_FLAG);
+ /* Set an address for the host to be able to read fw trace buffer */
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum],
+ (void **)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel tracebuf (%u) ctl (Error code: %u)",
+ __FUNCTION__, ui32FwThreadNum, eError));
+ goto fail;
+ }
+ }
+
+/* Just return error in-case of failures, clean-up would be handled by DeInit function */
+fail:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function RGXTraceBufferDeinit
+
+@Description Deinitialises all the allocations and references that are made
+ for the FW trace buffer(s)
+
+@Input ppsDevInfo RGX device info
+@Return void
+*/ /**************************************************************************/
+static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_UINT32 i;
+
+ for (i = 0; i < RGXFW_THREAD_NUM; i++)
+ {
+ if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i])
+ {
+ if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+ psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL;
+ }
+
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+ psDevInfo->psRGXFWIfTraceBufferMemDesc[i] = NULL;
+ }
+ }
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXSetupFirmware
+
+ @Description
+
+ Setups all the firmware related data
+
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32HWPerfFWBufSizeKB,
+ IMG_UINT64 ui64HWPerfFilter,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32ConfigFlags,
+ IMG_UINT32 ui32LogType,
+ RGXFWIF_BIFTILINGMODE eBifTilingMode,
+ IMG_UINT32 ui32NumTilingCfgs,
+ IMG_UINT32 *pui32BIFTilingXStrides,
+ IMG_UINT32 ui32FilterFlags,
+ IMG_UINT32 ui32JonesDisableMask,
+ IMG_UINT32 ui32HWRDebugDumpLimit,
+ IMG_UINT32 ui32HWPerfCountersDataSize,
+ PMR **ppsHWPerfPMR,
+ RGXFWIF_DEV_VIRTADDR *psRGXFWInitFWAddr,
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+ FW_PERF_CONF eFirmwarePerf)
+
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ RGXFWIF_INIT *psRGXFWInit = NULL;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 dm, ui32Temp = 0;
+ IMG_UINT64 ui64ErnsBrns;
+#if defined (SUPPORT_PDVFS)
+ RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo;
+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg;
+#endif
+ ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+
+ /* Fw init data */
+
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+ /* FIXME: Change to Cached */
+
+
+ PDUMPCOMMENT("Allocate RGXFWIF_INIT structure");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_INIT),
+ uiMemAllocFlags,
+ "FwInitStructure",
+ &psDevInfo->psRGXFWIfInitMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw if ctl (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_INIT),
+ eError));
+ goto fail;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+ (void **)&psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel fw if ctl (%u)",
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psDevInfo->sFWInitFWAddr,
+ psDevInfo->psRGXFWIfInitMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+ *psRGXFWInitFWAddr = psDevInfo->sFWInitFWAddr;
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /*
+ * Guest drivers do not support the following functionality:
+ * - Perform actual on-chip fw loading & initialisation
+ * - Perform actual on-chip fw management (i.e. reset)
+ * - Perform actual on-chip fw HWPerf,Trace,Utils,ActivePM
+ */
+#else
+ /* FW trace control structure */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate rgxfw trace control structure");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_TRACEBUF),
+ uiMemAllocFlags,
+ "FwTraceCtlStruct",
+ &psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw trace (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_TRACEBUF),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInit->sTraceBufCtl,
+ psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ (void **)&psDevInfo->psRGXFWIfTraceBuf);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel tracebuf ctl (%u)",
+ eError));
+ goto fail;
+ }
+
+ /* Set initial firmware log type/group(s) */
+ if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Invalid initial log type (0x%X)",ui32LogType));
+ goto fail;
+ }
+ psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32LogType;
+
+#if defined (PDUMP)
+ /* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource
+ * (irrespective of loggroup(s) enabled), given that logtype/loggroups can
+ * be set during PDump playback in logconfig, at any point of time */
+ eError = RGXTraceBufferInitOnDemandResources(psDevInfo);
+#else
+ /* Otherwise, allocate only if required */
+ if (RGXTraceBufferIsInitRequired(psDevInfo))
+ {
+ eError = RGXTraceBufferInitOnDemandResources(psDevInfo);
+ }
+ else
+ {
+ eError = PVRSRV_OK;
+ }
+#endif
+ PVR_LOGG_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail);
+
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ if ((0 != psDevInfo->sDevFeatureCfg.ui32MCMS) && \
+ (0 == (ui64ErnsBrns & FIX_HW_BRN_50767_BIT_MASK)))
+ {
+ IMG_BOOL bMetaDMA = psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_META_DMA_BIT_MASK;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ if (bMetaDMA)
+ {
+ IMG_UINT64 ui64SecBufHandle;
+
+ PDUMPCOMMENT("Import secure buffer to store FW coremem data");
+ eError = DevmemImportTDSecureBuf(psDeviceNode,
+ RGX_META_COREMEM_BSS_SIZE,
+ OSGetPageShift(),
+ uiMemAllocFlags,
+ &psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+ &ui64SecBufHandle);
+ }
+ else
+#endif
+ {
+ PDUMPCOMMENT("Allocate buffer to store FW coremem data");
+ eError = DevmemFwAllocate(psDevInfo,
+ RGX_META_COREMEM_BSS_SIZE,
+ uiMemAllocFlags,
+ "FwCorememDataStore",
+ &psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetupFirmware: Failed to allocate coremem data store (%u)",
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInit->sCorememDataStore.pbyFWAddr,
+ psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ if (bMetaDMA)
+ {
+ RGXSetMetaDMAAddress(&psRGXFWInit->sCorememDataStore,
+ psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+ &psRGXFWInit->sCorememDataStore.pbyFWAddr,
+ 0);
+ }
+ }
+
+ /* init HW frame info */
+ PDUMPCOMMENT("Allocate rgxfw HW info buffer");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_HWRINFOBUF),
+ uiMemAllocFlags,
+ "FwHWInfoBuffer",
+ &psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for HW info (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_HWRINFOBUF),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInit->sRGXFWIfHWRInfoBufCtl,
+ psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+ (void **)&psDevInfo->psRGXFWIfHWRInfoBuf);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel tracebuf ctl (%u)",
+ eError));
+ goto fail;
+ }
+
+ /* Might be uncached. Be conservative and use a DeviceMemSet */
+ OSDeviceMemSet(psDevInfo->psRGXFWIfHWRInfoBuf, 0, sizeof(RGXFWIF_HWRINFOBUF));
+
+ /* Allocate shared buffer for GPU utilisation */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate shared buffer for GPU utilisation");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_GPU_UTIL_FWCB),
+ uiMemAllocFlags,
+ "FwGPUUtilisationBuffer",
+ &psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for GPU utilisation buffer ctl (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_GPU_UTIL_FWCB),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInit->sGpuUtilFWCbCtl,
+ psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+ (void **)&psDevInfo->psRGXFWIfGpuUtilFWCb);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel GPU utilisation buffer ctl (%u)",
+ eError));
+ goto fail;
+ }
+
+ /* Initialise GPU utilisation buffer */
+ psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord =
+ RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE);
+
+
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate rgxfw FW runtime configuration (FW)");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_RUNTIME_CFG),
+ uiMemAllocFlags,
+ "FwRuntimeCfg",
+ &psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for FW runtime configuration (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_RUNTIME_CFG),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInit->sRuntimeCfg,
+ psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+ (void **)&psDevInfo->psRGXFWIfRuntimeCfg);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel FW runtime configuration (%u)",
+ eError));
+ goto fail;
+ }
+
+
+ /* HWPerf: Determine the size of the FW buffer */
+ if (ui32HWPerfFWBufSizeKB == 0 ||
+ ui32HWPerfFWBufSizeKB == RGXFW_HWPERF_L1_SIZE_DEFAULT)
+ {
+ /* Under pvrsrvctl 0 size implies AppHint not set or is set to zero,
+ * use default size from driver constant. Under SUPPORT_KERNEL_SRVINIT
+ * default is the above macro. In either case, set it to the default,
+ * size, no logging.
+ */
+ psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_DEFAULT<<10;
+ }
+ else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX))
+ {
+ /* Size specified as a AppHint but it is too big */
+ PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)",
+ ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX));
+ psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_MAX<<10;
+ }
+ else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN))
+ {
+ /* Size specified as in AppHint HWPerfFWBufSizeInKB */
+ PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: Using HWPerf FW buffer size of %u KB",
+ ui32HWPerfFWBufSizeKB));
+ psDevInfo->ui32RGXFWIfHWPerfBufSize = ui32HWPerfFWBufSizeKB<<10;
+ }
+ else
+ {
+ /* Size specified as a AppHint but it is too small */
+ PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)",
+ ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN));
+ psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_MIN<<10;
+ }
+
+ /* init HWPERF data */
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfRIdx = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfWIdx = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfWrapCount = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+ psRGXFWInit->bDisableFilterHWPerfCustomCounter = (ui32ConfigFlags & RGXFWIF_INICFG_HWP_DISABLE_FILTER) ? IMG_TRUE : IMG_FALSE;
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfUt = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfDropCount = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32FirstDropOrdinal = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32LastDropOrdinal = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32PowMonEnergy = 0;
+
+ /* Second stage initialisation or HWPerf, hHWPerfLock created in first
+ * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */
+ if (psDevInfo->ui64HWPerfFilter == 0)
+ {
+ psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter;
+ psRGXFWInit->ui64HWPerfFilter = ui64HWPerfFilter;
+ }
+ else
+ {
+ /* The filter has already been modified. This can happen if the driver
+ * was compiled with SUPPORT_KERNEL_SRVINIT enabled and e.g.
+ * pvr/gpu_tracing_on was enabled. */
+ psRGXFWInit->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter;
+ }
+
+#if defined (PDUMP)
+ /* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources
+ * (irrespective of HWPerf enabled or not), given that HWPerf can be
+ * enabled during PDump playback via RTCONF at any point of time. */
+ eError = RGXHWPerfInitOnDemandResources();
+#else
+ /* Otherwise, only allocate if HWPerf is enabled via apphint */
+ if (ui32ConfigFlags & RGXFWIF_INICFG_HWPERF_EN)
+ {
+ eError = RGXHWPerfInitOnDemandResources();
+ }
+#endif
+ PVR_LOGG_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail);
+
+ RGXHWPerfInitAppHintCallbacks(psDeviceNode);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PDUMPCOMMENT("Allocate rgxfw register configuration structure");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_REG_CFG),
+ uiMemAllocFlags | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE,
+ "FwRegisterConfigStructure",
+ &psDevInfo->psRGXFWIfRegCfgMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw register configurations (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_REG_CFG),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInit->sRegCfg,
+ psDevInfo->psRGXFWIfRegCfgMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+#endif
+
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate rgxfw hwperfctl structure");
+ eError = DevmemFwAllocateExportable(psDeviceNode,
+ ui32HWPerfCountersDataSize,
+ OSGetPageSize(),
+ uiMemAllocFlags,
+ "FwExHWPerfControlStructure",
+ &psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXInitHWPerfCounters: Failed to allocate %u bytes for fw hwperf control (%u)",
+ ui32HWPerfCountersDataSize,
+ eError));
+ goto fail;
+ }
+
+ eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void**) ppsHWPerfPMR);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+ goto fail;
+ }
+
+
+ RGXSetFirmwareAddress(&psRGXFWInit->sHWPerfCtl,
+ psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+ 0, 0);
+
+ /* Required info by FW to calculate the ActivePM idle timer latency */
+ {
+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+
+ psRGXFWInit->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+ psRGXFWInit->ui32ActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms;
+
+ /* Initialise variable runtime configuration to the system defaults */
+ psRuntimeCfg->ui32CoreClockSpeed = psRGXFWInit->ui32InitialCoreClockSpeed;
+ psRuntimeCfg->ui32ActivePMLatencyms = psRGXFWInit->ui32ActivePMLatencyms;
+ psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE;
+
+ /* Initialize the DefaultDustsNumInit Field to Max Dusts */
+ psRuntimeCfg->ui32DefaultDustsNumInit = MAX(1, (psDevInfo->sDevFeatureCfg.ui32NumClusters/2));
+ }
+#if defined(PDUMP)
+ PDUMPCOMMENT("Dump initial state of FW runtime configuration");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+ 0,
+ sizeof(RGXFWIF_RUNTIME_CFG),
+ PDUMP_FLAGS_CONTINUOUS);
+#endif
+#endif /* defined(PVRSRV_GPUVIRT_GUESTDRV) */
+
+ /* Allocate a sync for power management */
+ eError = SyncPrimContextCreate(psDevInfo->psDeviceNode,
+ &psDevInfo->hSyncPrimContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate sync primitive context with error (%u)", eError));
+ goto fail;
+ }
+
+ eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate sync primitive with error (%u)", eError));
+ goto fail;
+ }
+
+ eError = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim,
+ &psRGXFWInit->sPowerSync.ui32Addr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get Sync Prim FW address with error (%u)",
+ __FUNCTION__, eError));
+ goto fail;
+ }
+
+ /* Setup Fault read register */
+ eError = RGXSetupFaultReadRegister(psDeviceNode, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup fault read register"));
+ goto fail;
+ }
+
+ /* Apply FIX_HW_BRN_37200 */
+ eError = RGXHwBrn37200(psDevInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to apply HWBRN37200"));
+ goto fail;
+ }
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK))
+ {
+ ui32Temp = RGXFWIF_KCCB_NUMCMDS_LOG2_GPUVIRT_ONLY;
+ }else
+#endif
+ {
+ ui32Temp = RGXFWIF_KCCB_NUMCMDS_LOG2_FEAT_GPU_VIRTUALISATION;
+ }
+ /*
+ * Set up kernel CCB.
+ */
+ eError = RGXSetupKernelCCB(psDevInfo,
+ psRGXFWInit,
+ ui32Temp,
+ sizeof(RGXFWIF_KCCB_CMD));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Kernel CCB"));
+ goto fail;
+ }
+
+ /*
+ * Set up firmware CCB.
+ */
+ eError = RGXSetupFirmwareCCB(psDevInfo,
+ &psDevInfo->psFirmwareCCBCtl,
+ &psDevInfo->psFirmwareCCBCtlMemDesc,
+ &psDevInfo->psFirmwareCCB,
+ &psDevInfo->psFirmwareCCBMemDesc,
+ &psRGXFWInit->psFirmwareCCBCtl,
+ &psRGXFWInit->psFirmwareCCB,
+ RGXFWIF_FWCCB_NUMCMDS_LOG2,
+ sizeof(RGXFWIF_FWCCB_CMD),
+ "FwCCB");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Firmware CCB"));
+ goto fail;
+ }
+ /* RD Power Island */
+ {
+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+ IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland;
+ IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) ||
+ (eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON);
+
+ ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0;
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST_V2;
+
+#if defined(SUPPORT_PDVFS)
+ /* Proactive DVFS depends on Workload Estimation */
+ psPDVFSOPPInfo = &(psRGXFWInit->sPDVFSOPPInfo);
+ psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+
+ if(psDVFSDeviceCfg->pasOPPTable != NULL)
+ {
+ if(psDVFSDeviceCfg->ui32OPPTableSize >
+ sizeof(psPDVFSOPPInfo->asOPPValues)/sizeof(psPDVFSOPPInfo->asOPPValues[0]))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXSetupFirmware: OPP Table too large :"
+ " Size = %u, Maximum size = %lu",
+ psDVFSDeviceCfg->ui32OPPTableSize,
+ (unsigned long)(sizeof(psPDVFSOPPInfo->asOPPValues)/sizeof(psPDVFSOPPInfo->asOPPValues[0]))));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail;
+ }
+
+ memcpy(psPDVFSOPPInfo->asOPPValues,
+ psDVFSDeviceCfg->pasOPPTable,
+ sizeof(psPDVFSOPPInfo->asOPPValues));
+ psPDVFSOPPInfo->ui32MaxOPPPoint =
+ (psDVFSDeviceCfg->ui32OPPTableSize) - 1;
+
+ ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS_V2;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Missing OPP Table"));
+ }
+#endif
+#endif
+
+ psRGXFWInit->ui32ConfigFlags = ui32ConfigFlags & RGXFWIF_INICFG_ALL;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /*
+ * Set up Workload Estimation firmware CCB.
+ */
+ eError = RGXSetupFirmwareCCB(psDevInfo,
+ &psDevInfo->psWorkEstFirmwareCCBCtl,
+ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+ &psDevInfo->psWorkEstFirmwareCCB,
+ &psDevInfo->psWorkEstFirmwareCCBMemDesc,
+ &psRGXFWInit->psWorkEstFirmwareCCBCtl,
+ &psRGXFWInit->psWorkEstFirmwareCCB,
+ RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2,
+ sizeof(RGXFWIF_WORKEST_FWCCB_CMD),
+ "FwWEstCCB");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Workload Estimation Firmware CCB"));
+ goto fail;
+ }
+#endif
+
+ /* Require a minimum amount of memory for the signature buffers */
+ if (ui32SignatureChecksBufSize < RGXFW_SIG_BUFFER_SIZE_MIN)
+ {
+ ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN;
+ }
+
+ /* Setup Signature and Checksum Buffers for TA and 3D */
+ eError = RGXFWSetupSignatureChecks(psDevInfo,
+ &psDevInfo->psRGXFWSigTAChecksMemDesc,
+ ui32SignatureChecksBufSize,
+ &psRGXFWInit->asSigBufCtl[RGXFWIF_DM_TA],
+ "TA");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup TA signature checks"));
+ goto fail;
+ }
+ psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize;
+
+ eError = RGXFWSetupSignatureChecks(psDevInfo,
+ &psDevInfo->psRGXFWSig3DChecksMemDesc,
+ ui32SignatureChecksBufSize,
+ &psRGXFWInit->asSigBufCtl[RGXFWIF_DM_3D],
+ "3D");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup 3D signature checks"));
+ goto fail;
+ }
+ psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize;
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ eError = RGXFWSetupSignatureChecks(psDevInfo,
+ &psDevInfo->psRGXFWSigRTChecksMemDesc,
+ ui32SignatureChecksBufSize,
+ &psRGXFWInit->asSigBufCtl[RGXFWIF_DM_RTU],
+ "RTU");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup RTU signature checks"));
+ goto fail;
+ }
+ psDevInfo->ui32SigRTChecksSize = ui32SignatureChecksBufSize;
+
+ eError = RGXFWSetupSignatureChecks(psDevInfo,
+ &psDevInfo->psRGXFWSigSHChecksMemDesc,
+ ui32SignatureChecksBufSize,
+ &psRGXFWInit->asSigBufCtl[RGXFWIF_DM_SHG],
+ "SHG");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup SHG signature checks"));
+ goto fail;
+ }
+ psDevInfo->ui32SigSHChecksSize = ui32SignatureChecksBufSize;
+ }
+
+#if defined(RGXFW_ALIGNCHECKS)
+ eError = RGXFWSetupAlignChecks(psDevInfo,
+ &psRGXFWInit->sAlignChecks,
+ pui32RGXFWAlignChecks,
+ ui32RGXFWAlignChecksArrLength);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup alignment checks"));
+ goto fail;
+ }
+#endif
+
+ psRGXFWInit->ui32FilterFlags = ui32FilterFlags;
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /*
+ * Guest drivers do not support the following functionality:
+ * - Perform actual on-chip fw RDPowIsland(ing)
+ * - Perform actual on-chip fw tracing
+ * - Configure FW perf counters
+ */
+ PVR_UNREFERENCED_PARAMETER(dm);
+ PVR_UNREFERENCED_PARAMETER(eFirmwarePerf);
+#else
+
+ if(ui64ErnsBrns & FIX_HW_BRN_52402_BIT_MASK)
+ {
+ /* Fill the remaining bits of fw the init data */
+ psRGXFWInit->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_BRN_52402_HEAP_BASE;
+ psRGXFWInit->sUSCExecBase.uiAddr = RGX_USCCODE_BRN_52402_HEAP_BASE;
+ }else
+ {
+ /* Fill the remaining bits of fw the init data */
+ psRGXFWInit->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE;
+ psRGXFWInit->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE;
+ }
+
+ psRGXFWInit->sDPXControlStreamBase.uiAddr = RGX_DOPPLER_HEAP_BASE;
+ psRGXFWInit->sResultDumpBase.uiAddr = RGX_DOPPLER_OVERFLOW_HEAP_BASE;
+ psRGXFWInit->sRTUHeapBase.uiAddr = RGX_DOPPLER_HEAP_BASE;
+ psRGXFWInit->sTDMTPUYUVCeoffsHeapBase.uiAddr = RGX_TDM_TPU_YUV_COEFFS_HEAP_BASE;
+
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+ psRGXFWInit->ui32JonesDisableMask = ui32JonesDisableMask;
+ }
+ psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_SRVCFG_DISABLE_PDP_EN)
+ ? IMG_FALSE : IMG_TRUE;
+ psRGXFWInit->ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit;
+
+ psRGXFWInit->eFirmwarePerf = eFirmwarePerf;
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SLC_VIVT_BIT_MASK)
+ {
+ eError = _AllocateSLC3Fence(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate memory for SLC3Fence"));
+ goto fail;
+ }
+ }
+
+
+ if ( (psDevInfo->sDevFeatureCfg.ui32META) && \
+ ((ui32ConfigFlags & RGXFWIF_INICFG_METAT1_ENABLED) != 0))
+ {
+ /* Allocate a page for T1 stack */
+ eError = DevmemFwAllocate(psDevInfo,
+ RGX_META_STACK_SIZE,
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwMETAT1Stack",
+ & psDevInfo->psMETAT1StackMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate T1 Stack"));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInit->sT1Stack,
+ psDevInfo->psMETAT1StackMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXSetupFirmware: T1 Stack Frame allocated at %x",
+ psRGXFWInit->sT1Stack.ui32Addr));
+ }
+
+#if defined(SUPPORT_PDVFS)
+ /* Core clock rate */
+ uiMemAllocFlags =
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(IMG_UINT32),
+ uiMemAllocFlags,
+ "FwCoreClkRate",
+ &psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate PDVFS core clock rate"));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInit->sCoreClockRate,
+ psDevInfo->psRGXFWIFCoreClkRateMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXSetupFirmware: PDVFS core clock rate allocated at %x",
+ psRGXFWInit->sCoreClockRate.ui32Addr));
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc,
+ (void **)&psDevInfo->pui32RGXFWIFCoreClkRate);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire core clk rate (%u)",
+ eError));
+ goto fail;
+ }
+#endif
+
+ /* Timestamps */
+ uiMemAllocFlags =
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | /* XXX ?? */
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /*
+ the timer query arrays
+ */
+ PDUMPCOMMENT("Allocate timer query arrays (FW)");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES,
+ uiMemAllocFlags,
+ "FwStartTimesArray",
+ & psDevInfo->psStartTimeMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map start times array"));
+ goto fail;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psStartTimeMemDesc,
+ (void **)& psDevInfo->pui64StartTimeById);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map start times array"));
+ goto fail;
+ }
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES,
+ uiMemAllocFlags,
+ "FwEndTimesArray",
+ & psDevInfo->psEndTimeMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map end times array"));
+ goto fail;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psEndTimeMemDesc,
+ (void **)& psDevInfo->pui64EndTimeById);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map end times array"));
+ goto fail;
+ }
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(IMG_UINT32) * RGX_MAX_TIMER_QUERIES,
+ uiMemAllocFlags,
+ "FwCompletedOpsArray",
+ & psDevInfo->psCompletedMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to completed ops array"));
+ goto fail;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCompletedMemDesc,
+ (void **)& psDevInfo->pui32CompletedById);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map completed ops array"));
+ goto fail;
+ }
+
+ /* Initialize FW started flag */
+ psRGXFWInit->bFirmwareStarted = IMG_FALSE;
+ psRGXFWInit->ui32MarkerVal = 1;
+
+ /* Initialise the compatibility check data */
+ RGXFWIF_COMPCHECKS_BVNC_INIT(psRGXFWInit->sRGXCompChecks.sFWBVNC);
+ RGXFWIF_COMPCHECKS_BVNC_INIT(psRGXFWInit->sRGXCompChecks.sHWBVNC);
+
+ PDUMPCOMMENT("Dump RGXFW Init data");
+ if (!bEnableSignatureChecks)
+ {
+#if defined(PDUMP)
+ PDUMPCOMMENT("(to enable rgxfw signatures place the following line after the RTCONF line)");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, asSigBufCtl),
+ sizeof(RGXFWIF_SIGBUF_CTL)*(psDevInfo->sDevFeatureCfg.ui32MAXDMCount),
+ PDUMP_FLAGS_CONTINUOUS);
+#endif
+ psRGXFWInit->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0;
+ psRGXFWInit->asSigBufCtl[RGXFWIF_DM_TA].sBuffer.ui32Addr = 0x0;
+ }
+
+ for (dm = 0; dm < (psDevInfo->sDevFeatureCfg.ui32MAXDMCount); dm++)
+ {
+ psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmLockedUpCount[dm] = 0;
+ psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmOverranCount[dm] = 0;
+ psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmRecoveredCount[dm] = 0;
+ psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmFalseDetectCount[dm] = 0;
+ }
+
+ /*
+ * BIF Tiling configuration
+ */
+
+ psRGXFWInit->eBifTilingMode = eBifTilingMode;
+
+ psRGXFWInit->sBifTilingCfg[0].uiBase = RGX_BIF_TILING_HEAP_1_BASE;
+ psRGXFWInit->sBifTilingCfg[0].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+ psRGXFWInit->sBifTilingCfg[0].uiXStride = pui32BIFTilingXStrides[0];
+ psRGXFWInit->sBifTilingCfg[1].uiBase = RGX_BIF_TILING_HEAP_2_BASE;
+ psRGXFWInit->sBifTilingCfg[1].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+ psRGXFWInit->sBifTilingCfg[1].uiXStride = pui32BIFTilingXStrides[1];
+ psRGXFWInit->sBifTilingCfg[2].uiBase = RGX_BIF_TILING_HEAP_3_BASE;
+ psRGXFWInit->sBifTilingCfg[2].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+ psRGXFWInit->sBifTilingCfg[2].uiXStride = pui32BIFTilingXStrides[2];
+ psRGXFWInit->sBifTilingCfg[3].uiBase = RGX_BIF_TILING_HEAP_4_BASE;
+ psRGXFWInit->sBifTilingCfg[3].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+ psRGXFWInit->sBifTilingCfg[3].uiXStride = pui32BIFTilingXStrides[3];
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Dump rgxfw hwperfctl structure");
+ DevmemPDumpLoadZeroMem (psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+ 0,
+ ui32HWPerfCountersDataSize,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("Dump rgxfw trace control structure");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ 0,
+ sizeof(RGXFWIF_TRACEBUF),
+ PDUMP_FLAGS_CONTINUOUS);
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PDUMPCOMMENT("Dump rgxfw register configuration buffer");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfRegCfgMemDesc,
+ 0,
+ sizeof(RGXFWIF_REG_CFG),
+ PDUMP_FLAGS_CONTINUOUS);
+#endif
+ PDUMPCOMMENT("Dump rgxfw init structure");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfInitMemDesc,
+ 0,
+ sizeof(RGXFWIF_INIT),
+ PDUMP_FLAGS_CONTINUOUS);
+ if ((0 != psDevInfo->sDevFeatureCfg.ui32MCMS) && \
+ (0 == (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_50767_BIT_MASK)))
+ {
+ PDUMPCOMMENT("Dump rgxfw coremem data store");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+ 0,
+ RGX_META_COREMEM_BSS_SIZE,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+
+ PDUMPCOMMENT("RTCONF: run-time configuration");
+
+
+ /* Dump the config options so they can be edited.
+ *
+ * FIXME: Need new DevmemPDumpWRW API which writes a WRW to load ui32ConfigFlags
+ */
+ PDUMPCOMMENT("(Set the FW config options here)");
+ PDUMPCOMMENT("( Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_TA_EN);
+ PDUMPCOMMENT("( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_3D_EN);
+ PDUMPCOMMENT("( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_CDM_EN);
+ PDUMPCOMMENT("( Ctx Switch Rand mode: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND);
+ PDUMPCOMMENT("( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN);
+ PDUMPCOMMENT("( Reserved (do not set): 0x%08x)", RGXFWIF_INICFG_RSVD);
+ PDUMPCOMMENT("( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST);
+ PDUMPCOMMENT("( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN);
+ PDUMPCOMMENT("( Enable HWR: 0x%08x)", RGXFWIF_INICFG_HWR_EN);
+ PDUMPCOMMENT("( Check MList: 0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN);
+ PDUMPCOMMENT("( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN);
+ PDUMPCOMMENT("( Enable HWPerf Polling Perf Counter: 0x%08x)", RGXFWIF_INICFG_POLL_COUNTERS_EN);
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK)
+ {
+ PDUMPCOMMENT("( Ctx Switch Object mode Index: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX);
+ PDUMPCOMMENT("( Ctx Switch Object mode Instance: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE);
+ PDUMPCOMMENT("( Ctx Switch Object mode List: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST);
+ }
+
+ PDUMPCOMMENT("( Enable SHG Bypass mode: 0x%08x)", RGXFWIF_INICFG_SHG_BYPASS_EN);
+ PDUMPCOMMENT("( Enable RTU Bypass mode: 0x%08x)", RGXFWIF_INICFG_RTU_BYPASS_EN);
+ PDUMPCOMMENT("( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN);
+ PDUMPCOMMENT("( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY);
+ PDUMPCOMMENT("( Disable HWPerf custom counter filter: 0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER);
+ PDUMPCOMMENT("( Enable HWPerf custom performance timer: 0x%08x)", RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN);
+ PDUMPCOMMENT("( Enable CDM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN);
+ PDUMPCOMMENT("( Enable Ctx Switch profile mode: 0x%08x (none=b'000, fast=b'001, medium=b'010, slow=b'011, nodelay=b'100))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK);
+ PDUMPCOMMENT("( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP);
+ PDUMPCOMMENT("( Enable Meta T1 running main code: 0x%08x)", RGXFWIF_INICFG_METAT1_MAIN);
+ PDUMPCOMMENT("( Enable Meta T1 running dummy code: 0x%08x)", RGXFWIF_INICFG_METAT1_DUMMY);
+ PDUMPCOMMENT("( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER);
+
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, ui32ConfigFlags),
+ psRGXFWInit->ui32ConfigFlags,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* default: no filter */
+ psRGXFWInit->sPIDFilter.eMode = RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT;
+ psRGXFWInit->sPIDFilter.asItems[0].uiPID = 0;
+
+ PDUMPCOMMENT("( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)",
+ RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+ RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT);
+
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sPIDFilter.eMode),
+ psRGXFWInit->sPIDFilter.eMode,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))",
+ RGXFWIF_PID_FILTER_MAX_NUM_PIDS);
+ {
+ IMG_UINT32 i;
+
+ /* generate a few WRWs in the pdump stream as an example */
+ for(i = 0; i < MIN(RGXFWIF_PID_FILTER_MAX_NUM_PIDS, 8); i++)
+ {
+ /*
+ * Some compilers cannot cope with the uses of offsetof() below - the specific problem being the use of
+ * a non-const variable in the expression, which it needs to be const. Typical compiler output is
+ * "expression must have a constant value".
+ */
+ const IMG_DEVMEM_OFFSET_T uiPIDOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_INIT *)0)->sPIDFilter.asItems[i].uiPID);
+
+ const IMG_DEVMEM_OFFSET_T uiOSIDOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_INIT *)0)->sPIDFilter.asItems[i].ui32OSID);
+
+ PDUMPCOMMENT("(PID and OSID pair %u)", i);
+
+ PDUMPCOMMENT("(PID)");
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+ uiPIDOff,
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("(OSID)");
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+ uiOSIDOff,
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+ }
+
+ /*
+ * Dump the log config so it can be edited.
+ */
+ PDUMPCOMMENT("(Set the log config here)");
+ PDUMPCOMMENT("( Log Type: set bit 0 for TRACE, reset for TBI)");
+ PDUMPCOMMENT("( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN);
+ PDUMPCOMMENT("( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS);
+ PDUMPCOMMENT("( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP);
+ PDUMPCOMMENT("( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW);
+ PDUMPCOMMENT("( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF);
+ PDUMPCOMMENT("( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM);
+ PDUMPCOMMENT("( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD);
+ PDUMPCOMMENT("( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM);
+ PDUMPCOMMENT("( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW);
+ PDUMPCOMMENT("( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR);
+ PDUMPCOMMENT("( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP);
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ PDUMPCOMMENT("( RPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RPM);
+ }
+
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_META_DMA_BIT_MASK)
+ {
+ PDUMPCOMMENT("( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA);
+ }
+ PDUMPCOMMENT("( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG);
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ offsetof(RGXFWIF_TRACEBUF, ui32LogType),
+ psDevInfo->psRGXFWIfTraceBuf->ui32LogType,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("Set the HWPerf Filter config here");
+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, ui64HWPerfFilter),
+ psRGXFWInit->ui64HWPerfFilter,
+ PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PDUMPCOMMENT("(Number of registers configurations at pow on, dust change, ta, 3d, cdm and tla/tdm)");
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[RGXFWIF_REG_CFG_TYPE_PWR_ON]),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[RGXFWIF_REG_CFG_TYPE_DUST_CHANGE]),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[RGXFWIF_REG_CFG_TYPE_TA]),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[RGXFWIF_REG_CFG_TYPE_3D]),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[RGXFWIF_REG_CFG_TYPE_CDM]),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK)
+ {
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[RGXFWIF_REG_CFG_TYPE_TLA]),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+ {
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[RGXFWIF_REG_CFG_TYPE_TDM]),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+
+ PDUMPCOMMENT("(Set registers here: address, mask, value)");
+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Mask),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */
+#endif /* PDUMP */
+#endif /* PVRSRV_GPUVIRT_GUESTDRV */
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ /* Perform additional virtualisation initialisation */
+ eError = RGXVzSetupFirmware(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed RGXVzSetupFirmware"));
+ goto fail;
+ }
+#endif
+
+ /* We don't need access to the fw init data structure anymore */
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ psRGXFWInit = NULL;
+
+ psDevInfo->bFirmwareInitialised = IMG_TRUE;
+
+ return PVRSRV_OK;
+
+fail:
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ if (psDevInfo->psRGXFWIfInitMemDesc != NULL && psRGXFWInit != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ }
+#endif
+ RGXFreeFirmware(psDevInfo);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXFreeFirmware
+
+ @Description
+
+ Frees all the firmware-related allocations
+
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT64 ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+
+ psDevInfo->bFirmwareInitialised = IMG_FALSE;
+
+ RGXFreeKernelCCB(psDevInfo);
+
+ RGXFreeFirmwareCCB(psDevInfo,
+ &psDevInfo->psFirmwareCCBCtl,
+ &psDevInfo->psFirmwareCCBCtlMemDesc,
+ &psDevInfo->psFirmwareCCB,
+ &psDevInfo->psFirmwareCCBMemDesc);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ RGXFreeFirmwareCCB(psDevInfo,
+ &psDevInfo->psWorkEstFirmwareCCBCtl,
+ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+ &psDevInfo->psWorkEstFirmwareCCB,
+ &psDevInfo->psWorkEstFirmwareCCBMemDesc);
+#endif
+
+#if defined(RGXFW_ALIGNCHECKS)
+ if (psDevInfo->psRGXFWAlignChecksMemDesc)
+ {
+ RGXFWFreeAlignChecks(psDevInfo);
+ }
+#endif
+
+ if (psDevInfo->psRGXFWSigTAChecksMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSigTAChecksMemDesc);
+ psDevInfo->psRGXFWSigTAChecksMemDesc = NULL;
+ }
+
+ if (psDevInfo->psRGXFWSig3DChecksMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc);
+ psDevInfo->psRGXFWSig3DChecksMemDesc = NULL;
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ if (psDevInfo->psRGXFWSigRTChecksMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSigRTChecksMemDesc);
+ psDevInfo->psRGXFWSigRTChecksMemDesc = NULL;
+ }
+
+ if (psDevInfo->psRGXFWSigSHChecksMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSigSHChecksMemDesc);
+ psDevInfo->psRGXFWSigSHChecksMemDesc = NULL;
+ }
+ }
+
+ if(ui64ErnsBrns & FIX_HW_BRN_37200_BIT_MASK)
+ {
+ if (psDevInfo->psRGXFWHWBRN37200MemDesc)
+ {
+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWHWBRN37200MemDesc);
+ DevmemFree(psDevInfo->psRGXFWHWBRN37200MemDesc);
+ psDevInfo->psRGXFWHWBRN37200MemDesc = NULL;
+ }
+ }
+
+ RGXSetupFaultReadRegisterRollback(psDevInfo);
+
+ if (psDevInfo->psPowSyncPrim != NULL)
+ {
+ SyncPrimFree(psDevInfo->psPowSyncPrim);
+ psDevInfo->psPowSyncPrim = NULL;
+ }
+
+ if (psDevInfo->hSyncPrimContext != 0)
+ {
+ SyncPrimContextDestroy(psDevInfo->hSyncPrimContext);
+ psDevInfo->hSyncPrimContext = 0;
+ }
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc)
+ {
+ if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+ psDevInfo->psRGXFWIfGpuUtilFWCb = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+ psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL;
+ }
+
+ RGXHWPerfDeinit();
+
+ if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc)
+ {
+ if (psDevInfo->psRGXFWIfRuntimeCfg != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+ psDevInfo->psRGXFWIfRuntimeCfg = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+ psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL;
+ }
+
+ if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc)
+ {
+ if (psDevInfo->psRGXFWIfHWRInfoBuf != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+ psDevInfo->psRGXFWIfHWRInfoBuf = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+ psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = NULL;
+ }
+
+ if ((0 != psDevInfo->sDevFeatureCfg.ui32MCMS) && \
+ (0 == (ui64ErnsBrns & FIX_HW_BRN_50767_BIT_MASK)))
+ {
+ if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+ psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+ }
+ }
+
+ if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc)
+ {
+ if (psDevInfo->psRGXFWIfTraceBuf != NULL)
+ {
+ /* first deinit/free the tracebuffer allocation */
+ RGXTraceBufferDeinit(psDevInfo);
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+ psDevInfo->psRGXFWIfTraceBuf = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+ psDevInfo->psRGXFWIfTraceBufCtlMemDesc = NULL;
+ }
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ if (psDevInfo->psRGXFWIfRegCfgMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfRegCfgMemDesc);
+ psDevInfo->psRGXFWIfRegCfgMemDesc = NULL;
+ }
+#endif
+ if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+ psDevInfo->psRGXFWIfHWPerfCountersMemDesc = NULL;
+ }
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SLC_VIVT_BIT_MASK)
+ {
+ _FreeSLC3Fence(psDevInfo);
+ }
+
+ if( (psDevInfo->sDevFeatureCfg.ui32META) && (psDevInfo->psMETAT1StackMemDesc))
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psMETAT1StackMemDesc);
+ psDevInfo->psMETAT1StackMemDesc = NULL;
+ }
+
+#if defined(SUPPORT_PDVFS)
+ if (psDevInfo->psRGXFWIFCoreClkRateMemDesc)
+ {
+ if (psDevInfo->pui32RGXFWIFCoreClkRate != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+ psDevInfo->pui32RGXFWIFCoreClkRate = NULL;
+ }
+
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+ psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL;
+ }
+#endif
+
+ if (psDevInfo->psRGXFWIfInitMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfInitMemDesc);
+ psDevInfo->psRGXFWIfInitMemDesc = NULL;
+ }
+#endif
+
+ if (psDevInfo->psCompletedMemDesc)
+ {
+ if (psDevInfo->pui32CompletedById)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psCompletedMemDesc);
+ psDevInfo->pui32CompletedById = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psCompletedMemDesc);
+ psDevInfo->psCompletedMemDesc = NULL;
+ }
+ if (psDevInfo->psEndTimeMemDesc)
+ {
+ if (psDevInfo->pui64EndTimeById)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psEndTimeMemDesc);
+ psDevInfo->pui64EndTimeById = NULL;
+ }
+
+ DevmemFwFree(psDevInfo, psDevInfo->psEndTimeMemDesc);
+ psDevInfo->psEndTimeMemDesc = NULL;
+ }
+ if (psDevInfo->psStartTimeMemDesc)
+ {
+ if (psDevInfo->pui64StartTimeById)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psStartTimeMemDesc);
+ psDevInfo->pui64StartTimeById = NULL;
+ }
+
+ DevmemFwFree(psDevInfo, psDevInfo->psStartTimeMemDesc);
+ psDevInfo->psStartTimeMemDesc = NULL;
+ }
+}
+
+
+/******************************************************************************
+ FUNCTION : RGXAcquireKernelCCBSlot
+
+ PURPOSE : Attempts to obtain a slot in the Kernel CCB
+
+ PARAMETERS : psCCB - the CCB
+ : Address of space if available, NULL otherwise
+
+ RETURNS : PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXAcquireKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc,
+ RGXFWIF_CCB_CTL *psKCCBCtl,
+ IMG_UINT32 *pui32Offset)
+{
+ IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset;
+
+ ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+ ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask;
+
+ /* Note: The MTS can queue up to 255 kicks (254 pending kicks and 1 executing kick)
+ * Hence the kernel CCB should not queue more 254 commands
+ */
+ PVR_ASSERT(psKCCBCtl->ui32WrapMask < 255);
+
+#if defined(PDUMP)
+ /* Wait for sufficient CCB space to become available */
+ PDUMPCOMMENTWITHFLAGS(0, "Wait for kCCB woff=%u", ui32NextWriteOffset);
+ DevmemPDumpCBP(psKCCBCtrlMemDesc,
+ offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+ ui32NextWriteOffset,
+ 1,
+ (psKCCBCtl->ui32WrapMask + 1));
+#endif
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+
+ if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset)
+ {
+ *pui32Offset = ui32NextWriteOffset;
+ return PVRSRV_OK;
+ }
+ {
+ /*
+ * The following sanity check doesn't impact performance,
+ * since the CPU has to wait for the GPU anyway (full kernel CCB).
+ */
+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ return PVRSRV_ERROR_KERNEL_CCB_FULL;
+ }
+ }
+
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /* Time out on waiting for CCB space */
+ return PVRSRV_ERROR_KERNEL_CCB_FULL;
+}
+
+
+PVRSRV_ERROR RGXSendCommandWithPowLock(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+
+ /* Ensure Rogue is powered up before kicking MTS */
+ eError = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandWithPowLock: failed to acquire powerlock (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+
+ goto _PVRSRVPowerLock_Exit;
+ }
+
+ PDUMPPOWCMDSTART();
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON,
+ IMG_FALSE);
+ PDUMPPOWCMDEND();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandWithPowLock: failed to transition Rogue to ON (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+
+ goto _PVRSRVSetDevicePowerStateKM_Exit;
+ }
+
+ eError = RGXSendCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32CmdSize, ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSendCommandWithPowLock: failed to schedule command (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+#if defined(DEBUG)
+ /* PVRSRVDebugRequest must be called without powerlock */
+ PVRSRVPowerUnlock(psDeviceNode);
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+ goto _PVRSRVPowerLock_Exit;
+#endif
+ }
+
+_PVRSRVSetDevicePowerStateKM_Exit:
+ PVRSRVPowerUnlock(psDeviceNode);
+
+_PVRSRVPowerLock_Exit:
+ return eError;
+}
+
+static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 uiPdumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+ RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+ IMG_UINT8 *pui8KCCB = psDevInfo->psKernelCCB;
+ IMG_UINT32 ui32NewWriteOffset;
+ IMG_UINT32 ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(uiPdumpFlags);
+#else
+ IMG_BOOL bIsInCaptureRange;
+ IMG_BOOL bPdumpEnabled;
+ IMG_BOOL bPDumpPowTrans = PDUMPPOWCMDINTRANS();
+
+ PDumpIsCaptureFrameKM(&bIsInCaptureRange);
+ bPdumpEnabled = (bIsInCaptureRange || PDUMP_IS_CONTINUOUS(uiPdumpFlags)) && !bPDumpPowTrans;
+
+ /* in capture range */
+ if (bPdumpEnabled)
+ {
+ if (!psDevInfo->bDumpedKCCBCtlAlready)
+ {
+ /* entering capture range */
+ psDevInfo->bDumpedKCCBCtlAlready = IMG_TRUE;
+
+ /* wait for firmware to catch up */
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXSendCommandRaw: waiting on fw to catch-up, roff: %d, woff: %d",
+ psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset));
+ PVRSRVPollForValueKM(&psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset, 0xFFFFFFFF);
+
+ /* Dump Init state of Kernel CCB control (read and write offset) */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Initial state of kernel CCB Control, roff: %d, woff: %d",
+ psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset);
+
+ DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+ 0,
+ sizeof(RGXFWIF_CCB_CTL),
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+ }
+#endif
+
+ psKCCBCmd->eDM = eKCCBType;
+
+ PVR_ASSERT(ui32CmdSize == psKCCBCtl->ui32CmdSize);
+ if (!OSLockIsLocked(psDeviceNode->hPowerLock))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSendCommandRaw called without power lock held!"));
+ PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock));
+ }
+
+ /*
+ * Acquire a slot in the CCB.
+ */
+ eError = RGXAcquireKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, psKCCBCtl, &ui32NewWriteOffset);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSendCommandRaw failed to acquire CCB slot. Type:%u Error:%u",
+ eKCCBType, eError));
+ goto _RGXSendCommandRaw_Exit;
+ }
+
+ /*
+ * Copy the command into the CCB.
+ */
+ OSDeviceMemCopy(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize],
+ psKCCBCmd, psKCCBCtl->ui32CmdSize);
+
+ /* ensure kCCB data is written before the offsets */
+ OSWriteMemoryBarrier();
+
+ /* Move past the current command */
+ psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset;
+
+
+#if defined(PDUMP)
+ /* in capture range */
+ if (bPdumpEnabled)
+ {
+ /* Dump new Kernel CCB content */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump kCCB cmd for DM %d, woff = %d",
+ eKCCBType,
+ ui32OldWriteOffset);
+ DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc,
+ ui32OldWriteOffset * psKCCBCtl->ui32CmdSize,
+ psKCCBCtl->ui32CmdSize,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* Dump new kernel CCB write offset */
+ PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "Dump kCCBCtl woff (added new cmd for DM %d): %d",
+ eKCCBType,
+ ui32NewWriteOffset);
+ DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+ offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+ sizeof(IMG_UINT32),
+ uiPdumpFlags);
+ }
+
+ /* out of capture range */
+ if (!bPdumpEnabled)
+ {
+ RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandRaw: problem draining kCCB (%d)", eError));
+ goto _RGXSendCommandRaw_Exit;
+ }
+ }
+#endif
+
+
+ PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "MTS kick for kernel CCB");
+ /*
+ * Kick the MTS to schedule the firmware.
+ */
+ {
+ IMG_UINT32 ui32MTSRegVal;
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK))
+ {
+ ui32MTSRegVal = ((RGXFWIF_DM_GP + PVRSRV_GPUVIRT_OSID) & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_COUNTED;
+ }else
+#endif
+ {
+ ui32MTSRegVal = (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_COUNTED;
+ }
+
+
+ __MTSScheduleWrite(psDevInfo, ui32MTSRegVal);
+
+ PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE, ui32MTSRegVal, uiPdumpFlags);
+ }
+
+#if defined (NO_HARDWARE)
+ /* keep the roff updated because fw isn't there to update it */
+ psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset;
+#endif
+
+_RGXSendCommandRaw_Exit:
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXSendCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 uiPdumpFlags)
+{
+
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ DLLIST_NODE *psNode, *psNext;
+ RGX_DEFERRED_KCCB_CMD *psTempDeferredKCCBCmd;
+
+ /* Check if there is any deferred KCCB command before sending the command passed as argument */
+ dllist_foreach_node(&psDevInfo->sKCCBDeferredCommandsListHead, psNode, psNext)
+ {
+ psTempDeferredKCCBCmd = IMG_CONTAINER_OF(psNode, RGX_DEFERRED_KCCB_CMD, sListNode);
+ /* For every deferred KCCB command, try to send it*/
+ eError = RGXSendCommandRaw(psTempDeferredKCCBCmd->psDevInfo,
+ psTempDeferredKCCBCmd->eDM,
+ &(psTempDeferredKCCBCmd->sKCCBcmd),
+ sizeof(psTempDeferredKCCBCmd->sKCCBcmd),
+ psTempDeferredKCCBCmd->uiPdumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto _exit;
+ }
+ /* Remove from the deferred list the sent deferred KCCB command */
+ dllist_remove_node(psNode);
+ OSFreeMem(psTempDeferredKCCBCmd);
+ }
+
+ eError = RGXSendCommandRaw(psDevInfo,
+ eKCCBType,
+ psKCCBCmd,
+ ui32CmdSize,
+ uiPdumpFlags);
+
+
+_exit:
+ /*
+ * If we don't manage to enqueue one of the deferred commands or the command
+ * passed as argument because the KCCB is full, insert the latter into the deferred commands list.
+ * The deferred commands will also be flushed eventually by:
+ * - one more KCCB command sent for any DM
+ * - the watchdog thread
+ * - the power off sequence
+ */
+ if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)
+ {
+ RGX_DEFERRED_KCCB_CMD *psDeferredCommand;
+
+ psDeferredCommand = OSAllocMem(sizeof(*psDeferredCommand));
+
+ if(!psDeferredCommand)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"Deferring a KCCB command failed: allocation failure: requesting retry "));
+ eError = PVRSRV_ERROR_RETRY;
+ }
+ else
+ {
+ psDeferredCommand->sKCCBcmd = *psKCCBCmd;
+ psDeferredCommand->eDM = eKCCBType;
+ psDeferredCommand->uiPdumpFlags = uiPdumpFlags;
+ psDeferredCommand->psDevInfo = psDevInfo;
+
+ PVR_DPF((PVR_DBG_WARNING,"Deferring a KCCB command for DM %d" ,eKCCBType));
+ dllist_add_to_tail(&(psDevInfo->sKCCBDeferredCommandsListHead), &(psDeferredCommand->sListNode));
+
+ eError = PVRSRV_OK;
+ }
+ }
+
+ return eError;
+
+}
+
+
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ OSScheduleMISR(psDevInfo->hProcessQueuesMISR);
+}
+
+/*!
+******************************************************************************
+
+ @Function _RGXScheduleProcessQueuesMISR
+
+ @Description - Sends uncounted kick to all the DMs (the FW will process all
+ the queue for all the DMs)
+******************************************************************************/
+static void _RGXScheduleProcessQueuesMISR(void *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ PVRSRV_DEV_POWER_STATE ePowerState;
+
+ /* We don't need to acquire the BridgeLock as this power transition won't
+ send a command to the FW */
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXScheduleProcessQueuesKM: failed to acquire powerlock (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+
+ return;
+ }
+
+ /* Check whether it's worth waking up the GPU */
+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+ if ((eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF))
+ {
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* For now, guest drivers will always wake-up the GPU */
+ RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ IMG_BOOL bGPUHasWorkWaiting;
+
+ bGPUHasWorkWaiting =
+ (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED);
+
+ if (!bGPUHasWorkWaiting)
+ {
+ /* all queues are empty, don't wake up the GPU */
+ PVRSRVPowerUnlock(psDeviceNode);
+ return;
+ }
+#endif
+ }
+
+ PDUMPPOWCMDSTART();
+ /* wake up the GPU */
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON,
+ IMG_FALSE);
+ PDUMPPOWCMDEND();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXScheduleProcessQueuesKM: failed to transition Rogue to ON (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+
+ PVRSRVPowerUnlock(psDeviceNode);
+ return;
+ }
+
+ /* uncounted kick to the FW */
+ {
+ IMG_UINT32 ui32MTSRegVal;
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK))
+ {
+ ui32MTSRegVal = ((RGXFWIF_DM_GP + PVRSRV_GPUVIRT_OSID) & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED;
+ }else
+#endif
+ {
+ ui32MTSRegVal = (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED;
+ }
+
+ HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED);
+ __MTSScheduleWrite(psDevInfo, ui32MTSRegVal);
+ }
+
+ PVRSRVPowerUnlock(psDeviceNode);
+}
+
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ return OSInstallMISR(phMISR,
+ _RGXScheduleProcessQueuesMISR,
+ psDeviceNode);
+}
+
+/*!
+******************************************************************************
+
+ @Function RGXScheduleCommand
+
+ @Description - Submits a CCB command and kicks the firmware but first schedules
+ any commands which have to happen before handle
+
+ @Input psDevInfo - pointer to device info
+ @Input eKCCBType - see RGXFWIF_CMD_*
+ @Input psKCCBCmd - kernel CCB command
+ @Input ui32CmdSize - kernel CCB SIZE
+ @Input ui32CacheOpFence - CPU dcache operation fence
+ @Input ui32PDumpFlags - PDUMP_FLAGS_CONTINUOUS bit set if the pdump flags should be continuous
+
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXScheduleCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32CacheOpFence,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiMMUSyncUpdate;
+
+ eError = CacheOpFence(eKCCBType, ui32CacheOpFence);
+ if (eError != PVRSRV_OK) goto RGXScheduleCommand_exit;
+
+#if defined (SUPPORT_VALIDATION)
+ /* For validation, force the core to different dust count states with each kick */
+ if ((eKCCBType == RGXFWIF_DM_TA) || (eKCCBType == RGXFWIF_DM_CDM))
+ {
+ if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN)
+ {
+ IMG_UINT32 ui32NumDusts = RGXGetNextDustCount(&psDevInfo->sDustReqState, psDevInfo->sDevFeatureCfg.ui32MAXDustCount);
+ PVRSRVDeviceDustCountChange(psDevInfo->psDeviceNode, ui32NumDusts);
+ }
+ }
+#endif
+
+ eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate, IMG_FALSE);
+ if (eError != PVRSRV_OK) goto RGXScheduleCommand_exit;
+
+ eError = RGXSendCommandWithPowLock(psDevInfo, eKCCBType, psKCCBCmd, ui32CmdSize, ui32PDumpFlags);
+ if (eError != PVRSRV_OK) goto RGXScheduleCommand_exit;
+
+RGXScheduleCommand_exit:
+ return eError;
+}
+
+/*
+ * RGXCheckFirmwareCCB
+ */
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_FWCCB_CMD *psFwCCBCmd;
+
+ RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl;
+ IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB;
+
+ while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset)
+ {
+ /* Point to the next command */
+ psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset;
+
+ HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType);
+ switch(psFwCCBCmd->eCmdType)
+ {
+ case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING:
+ {
+ if (psDevInfo->bPDPEnabled)
+ {
+ PDUMP_PANIC(ZSBUFFER_BACKING, "Request to add backing to ZSBuffer");
+ }
+ RGXProcessRequestZSBufferBacking(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING:
+ {
+ if (psDevInfo->bPDPEnabled)
+ {
+ PDUMP_PANIC(ZSBUFFER_UNBACKING, "Request to remove backing from ZSBuffer");
+ }
+ RGXProcessRequestZSBufferUnbacking(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_FREELIST_GROW:
+ {
+ if (psDevInfo->bPDPEnabled)
+ {
+ PDUMP_PANIC(FREELIST_GROW, "Request to grow the free list");
+ }
+ RGXProcessRequestGrow(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID);
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
+ {
+ if (psDevInfo->bPDPEnabled)
+ {
+ PDUMP_PANIC(FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists");
+ }
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXCheckFirmwareCCBs: Freelist reconstruction request (%d) for %d freelists",
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+#else
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXCheckFirmwareCCBs: Freelist reconstruction request (%d/%d) for %d freelists",
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+ psDevInfo->psRGXFWIfTraceBuf->ui32HwrCounter+1,
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+#endif
+
+ RGXProcessRequestFreelistsReconstruction(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount,
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs);
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_DOPPLER_MEMORY_GROW:
+ {
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ if (psDevInfo->bPDPEnabled)
+ {
+ PDUMP_PANIC(FREELIST_GROW, "Request to grow the RPM free list");
+ }
+ RGXProcessRequestRPMGrow(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID);
+ }
+ }
+
+ case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION:
+ {
+ DLLIST_NODE *psNode, *psNext;
+ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification =
+ &psFwCCBCmd->uCmdData.sCmdContextResetNotification;
+ IMG_UINT32 ui32ServerCommonContextID =
+ psCmdContextResetNotification->ui32ServerCommonContextID;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL;
+
+ dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_COMMON_CONTEXT *psThisContext =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+ if (psThisContext->ui32ContextID == ui32ServerCommonContextID)
+ {
+ psServerCommonContext = psThisContext;
+ break;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXCheckFirmwareCCBs: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)",
+ psServerCommonContext,
+ psCmdContextResetNotification->ui32ServerCommonContextID,
+ (IMG_UINT32)(psCmdContextResetNotification->eResetReason),
+ psCmdContextResetNotification->ui32ResetJobRef));
+
+ if (psServerCommonContext != NULL)
+ {
+ psServerCommonContext->eLastResetReason = psCmdContextResetNotification->eResetReason;
+ psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef;
+ }
+
+ if (psCmdContextResetNotification->bPageFault)
+ {
+ DevmemIntPFNotify(psDevInfo->psDeviceNode,
+ psCmdContextResetNotification->ui64PCAddress);
+ }
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_DEBUG_DUMP:
+ {
+ RGXDumpDebugInfo(NULL,NULL,psDevInfo);
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_UPDATE_STATS:
+ {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner;
+ IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue;
+
+ switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate)
+ {
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS:
+ {
+ PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp);
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY:
+ {
+ PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp);
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES:
+ {
+ PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp);
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES:
+ {
+ PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp);
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_SH_STORES:
+ {
+ PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp);
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES:
+ {
+ PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp);
+ break;
+ }
+ }
+#endif
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE:
+ {
+#if defined(SUPPORT_PDVFS)
+ PDVFSProcessCoreClkRateChange(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate);
+#endif
+ break;
+ }
+ default:
+ {
+ PVR_ASSERT(IMG_FALSE);
+ }
+ }
+
+ /* Update read offset */
+ psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask;
+ }
+}
+
+/*
+ * PVRSRVRGXFrameworkCopyCommand
+ */
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC *psFWFrameworkMemDesc,
+ IMG_PBYTE pbyGPUFRegisterList,
+ IMG_UINT32 ui32FrameworkRegisterSize)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_RF_REGISTERS *psRFReg;
+
+ eError = DevmemAcquireCpuVirtAddr(psFWFrameworkMemDesc,
+ (void **)&psRFReg);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFrameworkCopyCommand: Failed to map firmware render context state (%u)",
+ eError));
+ return eError;
+ }
+
+ OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize);
+
+ /* Release the CPU mapping */
+ DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc);
+
+ /*
+ * Dump the FW framework buffer
+ */
+ PDUMPCOMMENT("Dump FWFramework buffer");
+ DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXFrameworkCreateKM
+ */
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC **ppsFWFrameworkMemDesc,
+ IMG_UINT32 ui32FrameworkCommandSize)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ /*
+ Allocate device memory for the firmware GPU framework state.
+ Sufficient info to kick one or more DMs should be contained in this buffer
+ */
+ PDUMPCOMMENT("Allocate Rogue firmware framework state");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32FrameworkCommandSize,
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwGPUFrameworkState",
+ ppsFWFrameworkMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFrameworkContextKM: Failed to allocate firmware framework state (%u)",
+ eError));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXWaitForFWOp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+ RGXFWIF_KCCB_CMD sCmdSyncPrim;
+
+ /* Ensure Rogue is powered up before kicking MTS */
+ eError = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire powerlock (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+
+ goto _PVRSRVPowerLock_Exit;
+ }
+
+ PDUMPPOWCMDSTART();
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON,
+ IMG_FALSE);
+ PDUMPPOWCMDEND();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to transition Rogue to ON (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+
+ goto _PVRSRVSetDevicePowerStateKM_Exit;
+ }
+
+ /* Setup sync primitive */
+ eError = SyncPrimSet(psSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set SyncPrim (%u)",
+ __FUNCTION__, eError));
+ goto _SyncPrimSet_Exit;
+ }
+
+ /* prepare a sync command */
+ eError = SyncPrimGetFirmwareAddr(psSyncPrim,
+ &sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to get SyncPrim FW address(%u)",
+ __FUNCTION__, eError));
+ goto _SyncPrimGetFirmwareAddr_Exit;
+ }
+ sCmdSyncPrim.eCmdType = RGXFWIF_KCCB_CMD_SYNC;
+ sCmdSyncPrim.uCmdData.sSyncData.uiUpdateVal = 1;
+
+ PDUMPCOMMENT("RGXWaitForFWOp: Submit Kernel SyncPrim [0x%08x] to DM %d ",
+ sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr, eDM);
+
+ /* submit the sync primitive to the kernel CCB */
+ eError = RGXSendCommand(psDevInfo,
+ eDM,
+ &sCmdSyncPrim,
+ sizeof(RGXFWIF_KCCB_CMD),
+ ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to schedule Kernel SyncPrim with error (%u)",
+ __FUNCTION__,
+ eError));
+ goto _RGXSendCommandRaw_Exit;
+ }
+
+ /* Wait for sync primitive to be updated */
+#if defined(PDUMP)
+ PDUMPCOMMENT("RGXScheduleCommandAndWait: Poll for Kernel SyncPrim [0x%08x] on DM %d ",
+ sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr, eDM);
+
+ SyncPrimPDumpPol(psSyncPrim,
+ 1,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+#endif
+
+ {
+ RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+ IMG_UINT32 ui32CurrentQueueLength =
+ (psKCCBCtl->ui32WrapMask+1 +
+ psKCCBCtl->ui32WriteOffset -
+ psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+ IMG_UINT32 ui32MaxRetries;
+
+ for (ui32MaxRetries = (ui32CurrentQueueLength + 1) * 3;
+ ui32MaxRetries > 0;
+ ui32MaxRetries--)
+ {
+ eError = PVRSRVWaitForValueKMAndHoldBridgeLockKM(psSyncPrim->pui32LinAddr, 1, 0xffffffff);
+
+ if (eError != PVRSRV_ERROR_TIMEOUT)
+ {
+ break;
+ }
+ }
+
+ if (eError == PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: PVRSRVWaitForValueKMAndHoldBridgeLock timed out. Dump debug information.",
+ __FUNCTION__));
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+ PVR_ASSERT(eError != PVRSRV_ERROR_TIMEOUT);
+ goto _PVRSRVDebugRequest_Exit;
+ }
+ }
+
+_RGXSendCommandRaw_Exit:
+_SyncPrimGetFirmwareAddr_Exit:
+_SyncPrimSet_Exit:
+_PVRSRVSetDevicePowerStateKM_Exit:
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+_PVRSRVDebugRequest_Exit:
+_PVRSRVPowerLock_Exit:
+ return eError;
+}
+
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Config,
+ IMG_UINT32 *pui32ConfigState,
+ IMG_BOOL bSetNotClear)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sStateFlagCmd;
+ PVRSRV_CLIENT_SYNC_PRIM *psResponseSync;
+
+ if (!psDevInfo)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto return_;
+ }
+
+ if (psDevInfo->psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE)
+ {
+ eError = PVRSRV_ERROR_NOT_INITIALISED;
+ goto return_;
+ }
+
+ sStateFlagCmd.eCmdType = RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL;
+ sStateFlagCmd.eDM = RGXFWIF_DM_GP;
+ sStateFlagCmd.uCmdData.sStateFlagCtrl.ui32Config = ui32Config;
+ sStateFlagCmd.uCmdData.sStateFlagCtrl.bSetNotClear = bSetNotClear;
+
+ eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psResponseSync, "rgx config flags");
+ if (PVRSRV_OK != eError)
+ {
+ goto return_;
+ }
+ eError = SyncPrimSet(psResponseSync, 0);
+ if (eError != PVRSRV_OK)
+ {
+ goto return_freesync_;
+ }
+
+ eError = SyncPrimGetFirmwareAddr(psResponseSync, &sStateFlagCmd.uCmdData.sStateFlagCtrl.sSyncObjDevVAddr.ui32Addr);
+ if (PVRSRV_OK != eError)
+ {
+ goto return_freesync_;
+ }
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sStateFlagCmd,
+ sizeof(sStateFlagCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", return_);
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDevInfo,
+ RGXFWIF_DM_GP,
+ psDevInfo->psDeviceNode->psSyncPrim,
+ PDUMP_FLAGS_CONTINUOUS);
+ PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", return_);
+
+ if (pui32ConfigState)
+ {
+ *pui32ConfigState = *psResponseSync->pui32LinAddr;
+ }
+
+return_freesync_:
+ SyncPrimFree(psResponseSync);
+return_:
+ return eError;
+}
+
+static
+PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ RGXFWIF_CLEANUP_TYPE eCleanupType,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP;
+
+ psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType;
+ eError = SyncPrimGetFirmwareAddr(psSyncPrim, &psKCCBCmd->uCmdData.sCleanupData.sSyncObjDevVAddr.ui32Addr);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_command;
+ }
+
+ eError = SyncPrimSet(psSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_command;
+ }
+
+ /*
+ Send the cleanup request to the firmware. If the resource is still busy
+ the firmware will tell us and we'll drop out with a retry.
+ */
+ eError = RGXScheduleCommand(psDevInfo,
+ eDM,
+ psKCCBCmd,
+ ui32CmdSize,
+ 0,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_command;
+ }
+
+ /* Wait for sync primitive to be updated */
+#if defined(PDUMP)
+ PDUMPCOMMENT("Wait for the firmware to reply to the cleanup command");
+ SyncPrimPDumpPol(psSyncPrim,
+ RGXFWIF_CLEANUP_RUN,
+ RGXFWIF_CLEANUP_RUN,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+
+ /*
+ * The cleanup request to the firmware will tell us if a given resource is busy or not.
+ * If the RGXFWIF_CLEANUP_BUSY flag is set, this means that the resource is still in use.
+ * In this case we return a PVRSRV_ERROR_RETRY error to the client drivers and they will
+ * re-issue the cleanup request until it succeed.
+ *
+ * Since this retry mechanism doesn't work for pdumps, client drivers should ensure
+ * that cleanup requests are only submitted if the resource is unused.
+ * If this is not the case, the following poll will block infinitely, making sure
+ * the issue doesn't go unnoticed.
+ */
+ PDUMPCOMMENT("Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps",
+ eDM,
+ psKCCBCmd->uCmdData.sCleanupData.eCleanupType,
+ psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr);
+ SyncPrimPDumpPol(psSyncPrim,
+ 0,
+ RGXFWIF_CLEANUP_BUSY,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+#endif
+
+ {
+ RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+ IMG_UINT32 ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 +
+ psKCCBCtl->ui32WriteOffset -
+ psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+ IMG_UINT32 ui32MaxRetries;
+
+ for (ui32MaxRetries = ui32CurrentQueueLength + 1;
+ ui32MaxRetries > 0;
+ ui32MaxRetries--)
+ {
+ eError = PVRSRVWaitForValueKMAndHoldBridgeLockKM(psSyncPrim->pui32LinAddr, RGXFWIF_CLEANUP_RUN, RGXFWIF_CLEANUP_RUN);
+
+ if (eError != PVRSRV_ERROR_TIMEOUT)
+ {
+ break;
+ }
+ }
+
+ /*
+ If the firmware hasn't got back to us in a timely manner
+ then bail and let the caller retry the command.
+ */
+ if (eError == PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"RGXScheduleCleanupCommand: PVRSRVWaitForValueKMAndHoldBridgeLock timed out. Dump debug information."));
+
+ eError = PVRSRV_ERROR_RETRY;
+#if defined(DEBUG)
+ PVRSRVDebugRequest(psDevInfo->psDeviceNode,
+ DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+#endif
+ goto fail_poll;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ goto fail_poll;
+ }
+ }
+
+ /*
+ If the command has was run but a resource was busy, then the request
+ will need to be retried.
+ */
+ if (*psSyncPrim->pui32LinAddr & RGXFWIF_CLEANUP_BUSY)
+ {
+ eError = PVRSRV_ERROR_RETRY;
+ goto fail_requestbusy;
+ }
+
+ return PVRSRV_OK;
+
+fail_requestbusy:
+fail_poll:
+fail_command:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+/*
+ RGXRequestCommonContextCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ RGXFWIF_DM eDM,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ RGXFWIF_KCCB_CMD sRCCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+ PRGXFWIF_FWCOMMONCONTEXT psFWCommonContextFWAddr;
+
+ psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext);
+
+ PDUMPCOMMENT("Common ctx cleanup Request DM%d [context = 0x%08x]",
+ eDM, psFWCommonContextFWAddr.ui32Addr);
+ PDUMPCOMMENT("Wait for CCB to be empty before common ctx cleanup");
+
+ RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags);
+
+ /* Setup our command data, the cleanup call will fill in the rest */
+ sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr;
+
+ /* Request cleanup of the firmware resource */
+ eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+ eDM,
+ &sRCCleanUpCmd,
+ sizeof(RGXFWIF_KCCB_CMD),
+ RGXFWIF_CLEANUP_FWCOMMONCONTEXT,
+ psSyncPrim,
+ ui32PDumpFlags);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRequestCommonContextCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+/*
+ * RGXRequestHWRTDataCleanUp
+ */
+
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PRGXFWIF_HWRTDATA psHWRTData,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ RGXFWIF_DM eDM)
+{
+ RGXFWIF_KCCB_CMD sHWRTDataCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("HW RTData cleanup Request DM%d [HWRTData = 0x%08x]", eDM, psHWRTData.ui32Addr);
+
+ sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData;
+
+ eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+ eDM,
+ &sHWRTDataCleanUpCmd,
+ sizeof(sHWRTDataCleanUpCmd),
+ RGXFWIF_CLEANUP_HWRTDATA,
+ psSync,
+ IMG_FALSE);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRequestHWRTDataCleanUp: Failed to schedule a HWRTData cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+/*
+ RGXFWRequestFreeListCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_FREELIST psFWFreeList,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ RGXFWIF_KCCB_CMD sFLCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr);
+
+ /* Setup our command data, the cleanup call will fill in the rest */
+ sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList;
+
+ /* Request cleanup of the firmware resource */
+ eError = RGXScheduleCleanupCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sFLCleanUpCmd,
+ sizeof(RGXFWIF_KCCB_CMD),
+ RGXFWIF_CLEANUP_FREELIST,
+ psSync,
+ IMG_FALSE);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestFreeListCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+/*
+ RGXFWRequestZSBufferCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_ZSBUFFER psFWZSBuffer,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ RGXFWIF_KCCB_CMD sZSBufferCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr);
+
+ /* Setup our command data, the cleanup call will fill in the rest */
+ sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer;
+
+ /* Request cleanup of the firmware resource */
+ eError = RGXScheduleCleanupCommand(psDevInfo,
+ RGXFWIF_DM_3D,
+ &sZSBufferCleanUpCmd,
+ sizeof(RGXFWIF_KCCB_CMD),
+ RGXFWIF_CLEANUP_ZSBUFFER,
+ psSync,
+ IMG_FALSE);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestZSBufferCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXFWRequestRayFrameDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PRGXFWIF_RAY_FRAME_DATA psHWFrameData,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ RGXFWIF_DM eDM)
+{
+ RGXFWIF_KCCB_CMD sHWFrameDataCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("HW FrameData cleanup Request DM%d [HWFrameData = 0x%08x]", eDM, psHWFrameData.ui32Addr);
+
+ sHWFrameDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWFrameData = psHWFrameData;
+
+ eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+ eDM,
+ &sHWFrameDataCleanUpCmd,
+ sizeof(sHWFrameDataCleanUpCmd),
+ RGXFWIF_CLEANUP_HWFRAMEDATA,
+ psSync,
+ IMG_FALSE);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestRayFrameDataCleanUp: Failed to schedule a HWFrameData cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+/*
+ RGXFWRequestRPMFreeListCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestRPMFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_RPM_FREELIST psFWRPMFreeList,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ RGXFWIF_KCCB_CMD sFLCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("RPM Free list cleanup Request [RPM FreeList = 0x%08x]", psFWRPMFreeList.ui32Addr);
+
+ /* Setup our command data, the cleanup call will fill in the rest */
+ sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psRPMFreelist = psFWRPMFreeList;
+
+ /* Request cleanup of the firmware resource */
+ eError = RGXScheduleCleanupCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sFLCleanUpCmd,
+ sizeof(RGXFWIF_KCCB_CMD),
+ RGXFWIF_CLEANUP_RPM_FREELIST,
+ psSync,
+ IMG_FALSE);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestRPMFreeListCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32HCSDeadlineMs)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sSetHCSDeadline;
+
+ sSetHCSDeadline.eCmdType = RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE;
+ sSetHCSDeadline.eDM = RGXFWIF_DM_GP;
+ sSetHCSDeadline.uCmdData.sHCSCtrl.ui32HCSDeadlineMS = ui32HCSDeadlineMs;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sSetHCSDeadline,
+ sizeof(sSetHCSDeadline),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
+}
+
+PVRSRV_ERROR RGXFWOSConfig(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sOSConfigCmdData;
+
+ sOSConfigCmdData.eCmdType = RGXFWIF_KCCB_CMD_OS_CFG_INIT;
+ sOSConfigCmdData.eDM = RGXFWIF_DM_GP;
+ sOSConfigCmdData.uCmdData.sCmdOSConfigData.sOSInit = psDevInfo->sFWInitFWAddr;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sOSConfigCmdData,
+ sizeof(sOSConfigCmdData),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
+}
+
+PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32IsolationPriorityThreshold)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sOSidIsoConfCmd;
+
+ sOSidIsoConfCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE;
+ sOSidIsoConfCmd.uCmdData.sCmdOSidIsolationData.ui32IsolationPriorityThreshold = ui32IsolationPriorityThreshold;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sOSidIsoConfCmd,
+ sizeof(sOSidIsoConfCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
+}
+
+PVRSRV_ERROR RGXFWSetVMOnlineState(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32OSid,
+ RGXFWIF_OS_STATE_CHANGE eOSOnlineState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ RGXFWIF_KCCB_CMD sOSOnlineStateCmd;
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ volatile IMG_UINT32 *pui32OSStateFlags;
+
+ sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE;
+ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid;
+ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState;
+
+ if (eOSOnlineState == RGXFWIF_OS_ONLINE)
+ {
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sOSOnlineStateCmd,
+ sizeof(sOSOnlineStateCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
+ }
+
+ if (psRGXFWIfTraceBuf == NULL)
+ {
+ return PVRSRV_ERROR_NOT_INITIALISED;
+ }
+ pui32OSStateFlags = (volatile IMG_UINT32*) &psRGXFWIfTraceBuf->ui32OSStateFlags[ui32OSid];
+
+ /* Attempt several times until the FW manages to offload the OS */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ IMG_UINT32 ui32OSStateFlags;
+
+ /* Send request */
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sOSOnlineStateCmd,
+ sizeof(sOSOnlineStateCmd),
+ 0,
+ IMG_TRUE);
+ if (unlikely(eError == PVRSRV_ERROR_RETRY))
+ {
+ continue;
+ }
+ PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", return_);
+
+ /* Wait for FW to process the cmd */
+ eError = RGXWaitForFWOp(psDevInfo,
+ RGXFWIF_DM_GP,
+ psDevInfo->psDeviceNode->psSyncPrim,
+ PDUMP_FLAGS_CONTINUOUS);
+ PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", return_);
+
+ /* read the OS state */
+ OSMemoryBarrier();
+ ui32OSStateFlags = *pui32OSStateFlags;
+
+ if ((ui32OSStateFlags & RGXFW_OS_STATE_ACTIVE_OS) == 0)
+ {
+ /* FW finished offloading the OSID */
+ eError = PVRSRV_OK;
+ break;
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_TIMEOUT;
+ }
+
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+
+ } END_LOOP_UNTIL_TIMEOUT();
+
+return_ :
+#endif
+ return eError;
+}
+
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sOSidPriorityCmd;
+
+ sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE;
+ sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32OSidNum = ui32OSid;
+ sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32Priority = ui32Priority;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sOSidPriorityCmd,
+ sizeof(sOSidPriorityCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
+}
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+ CONNECTION_DATA *psConnection,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Priority,
+ RGXFWIF_DM eDM)
+{
+ IMG_UINT32 ui32CmdSize;
+ IMG_UINT8 *pui8CmdPtr;
+ RGXFWIF_KCCB_CMD sPriorityCmd;
+ RGXFWIF_CCB_CMD_HEADER *psCmdHeader;
+ RGXFWIF_CMD_PRIORITY *psCmd;
+ PVRSRV_ERROR eError;
+
+ /*
+ Get space for command
+ */
+ ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY));
+
+ eError = RGXAcquireCCB(FWCommonContextGetClientCCB(psContext),
+ ui32CmdSize,
+ (void **) &pui8CmdPtr,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ if(eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __FUNCTION__));
+ }
+ goto fail_ccbacquire;
+ }
+
+ /*
+ Write the command header and command
+ */
+ psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+ psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY;
+ psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY));
+ pui8CmdPtr += sizeof(*psCmdHeader);
+
+ psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr;
+ psCmd->ui32Priority = ui32Priority;
+ pui8CmdPtr += sizeof(*psCmd);
+
+ /*
+ We should reserved space in the kernel CCB here and fill in the command
+ directly.
+ This is so if there isn't space in the kernel CCB we can return with
+ retry back to services client before we take any operations
+ */
+
+ /*
+ Submit the command
+ */
+ RGXReleaseCCB(FWCommonContextGetClientCCB(psContext),
+ ui32CmdSize,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* Construct the priority command. */
+ sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext);
+ sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psContext));
+ sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+ sPriorityCmd.uCmdData.sCmdKickData.sWorkloadDataFWAddress.ui32Addr = 0;
+ sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ eDM,
+ &sPriorityCmd,
+ sizeof(sPriorityCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"ContextSetPriority: Failed to submit set priority command with error (%u)", eError));
+ }
+
+ return PVRSRV_OK;
+
+fail_ccbacquire:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ RGXReadMETAAddr
+*/
+PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value)
+{
+ IMG_UINT8 *pui8RegBase = (IMG_UINT8*)psDevInfo->pvRegsBaseKM;
+ IMG_UINT32 ui32Value;
+
+ /* Wait for Slave Port to be Ready */
+ if (PVRSRVPollForValueKM(
+ (IMG_UINT32*) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ /* Issue the Read */
+ OSWriteHWReg32(
+ psDevInfo->pvRegsBaseKM,
+ RGX_CR_META_SP_MSLVCTRL0,
+ ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+
+ /* Wait for Slave Port to be Ready: read complete */
+ if (PVRSRVPollForValueKM(
+ (IMG_UINT32*) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ /* Read the value */
+ ui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX);
+
+ *pui32Value = ui32Value;
+
+ return PVRSRV_OK;
+}
+
+
+/*
+ RGXUpdateHealthStatus
+*/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+ IMG_BOOL bCheckAfterTimePassed)
+{
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_HEALTH_STATUS eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+ PVRSRV_DEVICE_HEALTH_REASON eNewReason = PVRSRV_DEVICE_HEALTH_REASON_NONE;
+ PVRSRV_RGXDEV_INFO* psDevInfo;
+ RGXFWIF_TRACEBUF* psRGXFWIfTraceBufCtl;
+ RGXFWIF_CCB_CTL *psKCCBCtl;
+ IMG_UINT32 ui32ThreadCount;
+ IMG_BOOL bKCCBCmdsWaiting;
+
+ PVR_ASSERT(psDevNode != NULL);
+ psDevInfo = psDevNode->pvDevice;
+ psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+ /* If the firmware is not initialised, there is not much point continuing! */
+ if (!psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL ||
+ psDevInfo->psDeviceNode == NULL)
+ {
+ return PVRSRV_OK;
+ }
+
+ /* If Rogue is not powered on, don't continue
+ (there is a race condition where PVRSRVIsDevicePowered returns TRUE when the GPU is actually powering down.
+ That's not a problem as this function does not touch the HW except for the RGXScheduleCommand function,
+ which is already powerlock safe. The worst thing that could happen is that Rogue might power back up
+ but the chances of that are very low */
+ if (!PVRSRVIsDevicePowered(psDevNode))
+ {
+ return PVRSRV_OK;
+ }
+
+ /* If this is a quick update, then include the last current value... */
+ if (!bCheckAfterTimePassed)
+ {
+ eNewStatus = OSAtomicRead(&psDevNode->eHealthStatus);
+ eNewReason = OSAtomicRead(&psDevNode->eHealthReason);
+ }
+
+ /*
+ Firmware thread checks...
+ */
+ for (ui32ThreadCount = 0; ui32ThreadCount < RGXFW_THREAD_NUM; ui32ThreadCount++)
+ {
+ if (psRGXFWIfTraceBufCtl != NULL)
+ {
+ IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo;
+
+ /*
+ Check if the FW has hit an assert...
+ */
+ if (*pszTraceAssertInfo != '\0')
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Firmware thread %d has asserted: %s (%s:%d)",
+ ui32ThreadCount, pszTraceAssertInfo,
+ psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath,
+ psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum));
+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED;
+ goto _RGXUpdateHealthStatus_Exit;
+ }
+
+ /*
+ Check the threads to see if they are in the same poll locations as last time...
+ */
+ if (bCheckAfterTimePassed)
+ {
+ if (psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] != 0 &&
+ psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] == psDevInfo->aui32CrLastPollAddr[ui32ThreadCount])
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)",
+ ui32ThreadCount,
+ ((psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+ psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET,
+ psRGXFWIfTraceBufCtl->aui32CrPollMask[ui32ThreadCount]));
+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING;
+ goto _RGXUpdateHealthStatus_Exit;
+ }
+ psDevInfo->aui32CrLastPollAddr[ui32ThreadCount] = psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount];
+ }
+ }
+ }
+
+ /*
+ Event Object Timeouts check...
+ */
+ if (!bCheckAfterTimePassed)
+ {
+ if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Global Event Object Timeouts have risen (from %d to %d)",
+ psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts));
+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS;
+ }
+ psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts;
+ }
+
+ /*
+ Check the Kernel CCB pointer is valid. If any commands were waiting last time, then check
+ that some have executed since then.
+ */
+ bKCCBCmdsWaiting = IMG_FALSE;
+ psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+ if (psKCCBCtl != NULL)
+ {
+ if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask ||
+ psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: KCCB has invalid offset (ROFF=%d WOFF=%d)",
+ psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset));
+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT;
+ }
+
+ if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset)
+ {
+ bKCCBCmdsWaiting = IMG_TRUE;
+ }
+ }
+
+ if (bCheckAfterTimePassed && psDevInfo->psRGXFWIfTraceBuf != NULL)
+ {
+ IMG_UINT32 ui32KCCBCmdsExecuted = psDevInfo->psRGXFWIfTraceBuf->ui32KCCBCmdsExecuted;
+
+ if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted)
+ {
+ /*
+ If something was waiting last time then the Firmware has stopped processing commands.
+ */
+ if (psDevInfo->bKCCBCmdsWaitingLastTime)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: No KCCB commands executed since check!"));
+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED;
+ }
+
+ /*
+ If no commands are currently pending and nothing happened since the last poll, then
+ schedule a dummy command to ping the firmware so we know it is alive and processing.
+ */
+ if (!bKCCBCmdsWaiting)
+ {
+ RGXFWIF_KCCB_CMD sCmpKCCBCmd;
+ PVRSRV_ERROR eError;
+
+ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK;
+
+ eError = RGXScheduleCommand(psDevNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sCmpKCCBCmd,
+ sizeof(sCmpKCCBCmd),
+ 0,
+ IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Cannot schedule Health Check command! (0x%x)", eError));
+ }
+ else
+ {
+ bKCCBCmdsWaiting = IMG_TRUE;
+ }
+ }
+ }
+
+ psDevInfo->bKCCBCmdsWaitingLastTime = bKCCBCmdsWaiting;
+ psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted;
+ }
+
+ if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus))
+ {
+ /* Attempt to detect and deal with any stalled client contexts.
+ * Currently, ui32StalledClientMask is not a reliable method of detecting a stalled
+ * application as the app could just be busy with a long running task,
+ * or a lots of smaller workloads. Also the definition of stalled is
+ * effectively subject to the timer frequency calling this function
+ * (which is a platform config value with no guarantee it is correctly tuned).
+ */
+
+ IMG_UINT32 ui32StalledClientMask = 0;
+
+ ui32StalledClientMask |= CheckForStalledClientTransferCtxt(psDevInfo);
+
+ ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo);
+
+#if !defined(UNDER_WDDM)
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+ {
+ ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo);
+ }
+#endif
+
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ ui32StalledClientMask |= CheckForStalledClientRayCtxt(psDevInfo);
+ }
+
+ /* If at least one DM stalled bit is different than before */
+ if (psDevInfo->ui32StalledClientMask ^ ui32StalledClientMask)
+ {
+ /* Print all the stalled DMs */
+ PVR_LOG(("RGXGetDeviceHealthStatus: Possible stalled client contexts detected: %s%s%s%s%s%s%s%s%s",
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TA),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_RTU),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_SHG),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D)));
+ }
+ psDevInfo->ui32StalledClientMask = ui32StalledClientMask;
+ }
+
+ /*
+ Finished, save the new status...
+ */
+_RGXUpdateHealthStatus_Exit:
+ OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus);
+ OSAtomicWrite(&psDevNode->eHealthReason, eNewReason);
+
+ /*
+ * Attempt to service the HWPerf buffer to regularly transport idle/periodic
+ * packets to host buffer.
+ */
+ if (psDevNode->pfnServiceHWPerf != NULL)
+ {
+ PVRSRV_ERROR eError = psDevNode->pfnServiceHWPerf(psDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "DevicesWatchdogThread: "
+ "Error occurred when servicing HWPerf buffer (%d)",
+ eError));
+ }
+ }
+
+#endif
+ return PVRSRV_OK;
+} /* RGXUpdateHealthStatus */
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM)
+{
+ RGX_CLIENT_CCB *psCurrentClientCCB = psCurrentServerCommonContext->psClientCCB;
+
+ return CheckForStalledCCB(psCurrentClientCCB, eKickTypeDM);
+}
+
+void DumpStalledFWCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ RGX_CLIENT_CCB *psCurrentClientCCB = psCurrentServerCommonContext->psClientCCB;
+ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext = psCurrentServerCommonContext->sFWCommonContextFWAddr;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+ DumpCCB(psCurrentServerCommonContext->psDevInfo, sFWCommonContext,
+ psCurrentClientCCB, pfnDumpDebugPrintf, pvDumpDebugFile);
+#else
+ DumpStalledCCBCommand(sFWCommonContext, psCurrentClientCCB, pfnDumpDebugPrintf, pvDumpDebugFile);
+#endif
+}
+
+void AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+ IMG_UINT32 *pui32NumCleanupCtl,
+ RGXFWIF_DM eDM,
+ IMG_BOOL bKick,
+ RGX_RTDATA_CLEANUP_DATA *psRTDataCleanup,
+ RGX_ZSBUFFER_DATA *psZBuffer,
+ RGX_ZSBUFFER_DATA *psSBuffer)
+{
+ PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl;
+
+ PVR_ASSERT((eDM == RGXFWIF_DM_TA) || (eDM == RGXFWIF_DM_3D));
+
+ if(bKick)
+ {
+ if(eDM == RGXFWIF_DM_TA)
+ {
+ if(psRTDataCleanup)
+ {
+ PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+ RGXSetFirmwareAddress(&psCleanupCtl, psRTDataCleanup->psFWHWRTDataMemDesc,
+ offsetof(RGXFWIF_HWRTDATA, sTACleanupState),
+ RFW_FWADDR_NOREF_FLAG);
+
+ *(psCleanupCtlWrite++) = psCleanupCtl;
+ }
+ }
+ else
+ {
+ if(psRTDataCleanup)
+ {
+ PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+ RGXSetFirmwareAddress(&psCleanupCtl, psRTDataCleanup->psFWHWRTDataMemDesc,
+ offsetof(RGXFWIF_HWRTDATA, s3DCleanupState),
+ RFW_FWADDR_NOREF_FLAG);
+
+ *(psCleanupCtlWrite++) = psCleanupCtl;
+ }
+
+ if(psZBuffer)
+ {
+ (psCleanupCtlWrite++)->ui32Addr = psZBuffer->sZSBufferFWDevVAddr.ui32Addr +
+ offsetof(RGXFWIF_FWZSBUFFER, sCleanupState);
+ }
+
+ if(psSBuffer)
+ {
+ (psCleanupCtlWrite++)->ui32Addr = psSBuffer->sZSBufferFWDevVAddr.ui32Addr +
+ offsetof(RGXFWIF_FWZSBUFFER, sCleanupState);
+ }
+ }
+ }
+
+ *pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl;
+
+ PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS);
+}
+
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode)
+{
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* Guest drivers do not support HW reset */
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+#else
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGXFWIF_HWRINFOBUF *psHWRInfoBuf;
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl;
+ IMG_UINT32 i;
+
+ if(psDevNode->pvDevice == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_DEVINFO;
+ }
+ psDevInfo = psDevNode->pvDevice;
+
+ psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBuf;
+ psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+ for(i = 0 ; i < psDevInfo->sDevFeatureCfg.ui32MAXDMCount ; i++)
+ {
+ /* Reset the HWR numbers */
+ psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[i] = 0;
+ psRGXFWIfTraceBufCtl->aui32HwrDmFalseDetectCount[i] = 0;
+ psRGXFWIfTraceBufCtl->aui32HwrDmRecoveredCount[i] = 0;
+ psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[i] = 0;
+ }
+
+ for(i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+ {
+ psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0;
+ }
+
+ for(i = 0 ; i < RGXFW_THREAD_NUM ; i++)
+ {
+ psHWRInfoBuf->ui32FirstCrPollAddr[i] = 0;
+ psHWRInfoBuf->ui32FirstCrPollMask[i] = 0;
+ }
+
+ psHWRInfoBuf->ui32WriteIndex = 0;
+ psHWRInfoBuf->ui32DDReqCount = 0;
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+ IMG_DEV_PHYADDR *psPhyAddr,
+ IMG_UINT32 ui32LogicalOffset,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_BOOL *bValid)
+{
+
+ PVRSRV_ERROR eError;
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXGetPhyAddr: PMRLockSysPhysAddresses failed (%u)",
+ eError));
+ return eError;
+ }
+
+ eError = PMR_DevPhysAddr(psPMR,
+ ui32Log2PageSize,
+ ui32NumOfPages,
+ ui32LogicalOffset,
+ psPhyAddr,
+ bValid);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXGetPhyAddr: PMR_DevPhysAddr failed (%u)",
+ eError));
+ return eError;
+ }
+
+
+ eError = PMRUnlockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXGetPhyAddr: PMRUnLockSysPhysAddresses failed (%u)",
+ eError));
+ return eError;
+ }
+
+ return eError;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset)
+{
+ RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (psDevInfo->bDumpedKCCBCtlAlready)
+ {
+ /* exiting capture range */
+ psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+
+ /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER,
+ "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)",
+ psKCCBCtl,
+ ui32WriteOffset,
+ ui32WriteOffset);
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc,
+ offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+ ui32WriteOffset,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPdumpDrainKCCB: problem pdumping POL for kCCBCtl (%d)", eError));
+ }
+ }
+
+ return eError;
+
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXClientConnectCompatCheck_ClientAgainstFW
+
+ @Description
+
+ Check compatibility of client and firmware (build options)
+ at the connection time.
+
+ @Input psDeviceNode - device node
+ @Input ui32ClientBuildOptions - build options for the client
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions)
+{
+ PVRSRV_ERROR eError;
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ eError = PVRSRV_OK;
+#else
+#if !defined(NO_HARDWARE) || defined(PDUMP)
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#endif
+#if !defined(NO_HARDWARE)
+ RGXFWIF_INIT *psRGXFWInit = NULL;
+ IMG_UINT32 ui32BuildOptionsMismatch;
+ IMG_UINT32 ui32BuildOptionsFW;
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+ (void **)&psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel fw compatibility check info (%u)",
+ __FUNCTION__, eError));
+ return eError;
+ }
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if(*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+ {
+ /* No need to wait if the FW has already updated the values */
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+#endif
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Compatibility check: client and FW build options");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions),
+ ui32ClientBuildOptions,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+#endif
+
+#if !defined(NO_HARDWARE)
+ if (psRGXFWInit == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel fw compatibility check info, psRGXFWInit is NULL", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto chk_exit;
+ }
+
+ ui32BuildOptionsFW = psRGXFWInit->sRGXCompChecks.ui32BuildOptions;
+ ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW;
+
+ if (ui32BuildOptionsMismatch != 0)
+ {
+ if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+ "extra options present in client: (0x%x). Please check rgx_options.h",
+ ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+ }
+
+ if ( (ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+ "extra options present in Firmware: (0x%x). Please check rgx_options.h",
+ ui32BuildOptionsFW & ui32BuildOptionsMismatch ));
+ }
+ eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and client build options match. [ OK ]"));
+ }
+#endif
+
+ eError = PVRSRV_OK;
+#if !defined(NO_HARDWARE)
+chk_exit:
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+#endif
+#endif
+ return eError;
+
+}
+
+/******************************************************************************
+ End of file (rgxfwutils.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX firmware utility routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware utility routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXFWUTILS_H__)
+#define __RGXFWUTILS_H__
+
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "devicemem.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "rgxta3d.h"
+#include "devicemem_utils.h"
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#include "physmem_tdfwcode.h"
+#include "physmem_tdsecbuf.h"
+#endif
+
+
+/*
+ * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size.
+ * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems
+ * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't
+ * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation.
+ */
+static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_DEVMEM_SIZE_T uiSize,
+ DEVMEM_FLAGS_T uiFlags,
+ IMG_PCHAR pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ uiFlags |= PVRSRV_MEMALLOCFLAG_UNCACHED;
+ uiFlags &= ~PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED);
+#endif
+
+ /* Ensure all RI labels begin 'Fw' for the FW heap. */
+ PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w'));
+
+ eError = DevmemAllocate(psDevInfo->psFirmwareHeap,
+ uiSize,
+ GET_ROGUE_CACHE_LINE_SIZE(psDevInfo->sDevFeatureCfg.ui32CacheLineSize),
+ uiFlags | PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+ pszText,
+ ppsMemDescPtr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+
+ /*
+ We need to map it so the heap for this allocation
+ is set
+ */
+ eError = DevmemMapToDevice(*ppsMemDescPtr,
+ psDevInfo->psFirmwareHeap,
+ &sTmpDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T uiFlags,
+ IMG_PCHAR pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ PVRSRV_ERROR eError;
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ uiFlags |= PVRSRV_MEMALLOCFLAG_UNCACHED;
+ uiFlags &= ~PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED);
+#endif
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT((pszText != NULL) &&
+ (pszText[0] == 'F') && (pszText[1] == 'w') &&
+ (pszText[2] == 'E') && (pszText[3] == 'x'));
+
+ eError = DevmemAllocateExportable(psDeviceNode,
+ uiSize,
+ uiAlign,
+ DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareHeap),
+ uiFlags | PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+ pszText,
+ ppsMemDescPtr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"FW DevmemAllocateExportable failed (%u)", eError));
+ PVR_DPF_RETURN_RC(eError);
+ }
+
+ /*
+ We need to map it so the heap for this allocation
+ is set
+ */
+ eError = DevmemMapToDevice(*ppsMemDescPtr,
+ psDevInfo->psFirmwareHeap,
+ &sTmpDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"FW DevmemMapToDevice failed (%u)", eError));
+ }
+
+ PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr);
+}
+
+static void DevmemFWPoison(DEVMEM_MEMDESC *psMemDesc, IMG_BYTE ubPoisonValue)
+{
+ void *pvLinAddr;
+ PVRSRV_ERROR eError;
+
+ eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvLinAddr);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire FW allocation mapping "
+ "to poison: %s",
+ __func__,
+ PVRSRVGETERRORSTRING(eError)));
+ return;
+ }
+
+ OSDeviceMemSet(pvLinAddr, ubPoisonValue, psMemDesc->uiAllocSize);
+
+ DevmemReleaseCpuVirtAddr(psMemDesc);
+}
+
+static INLINE void DevmemFwFree(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DEVMEM_MEMDESC *psMemDesc)
+{
+ PVR_DPF_ENTERED1(psMemDesc);
+
+ if(psDevInfo->bEnableFWPoisonOnFree)
+ {
+ DevmemFWPoison(psMemDesc, psDevInfo->ubFWPoisonOnFreeValue);
+ }
+
+ DevmemReleaseDevVirtAddr(psMemDesc);
+ DevmemFree(psMemDesc);
+
+ PVR_DPF_RETURN;
+}
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+static INLINE
+PVRSRV_ERROR DevmemImportTDFWCode(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ IMG_UINT32 uiMemAllocFlags,
+ IMG_BOOL bFWCorememCode,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ PMR *psTDFWCodePMR;
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_DEVMEM_SIZE_T uiMemDescSize;
+ IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(ppsMemDescPtr);
+
+ DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareHeap),
+ &uiSize,
+ &uiAlign);
+
+ eError = PhysmemNewTDFWCodePMR(psDeviceNode,
+ uiSize,
+ uiLog2Align,
+ uiMemAllocFlags,
+ bFWCorememCode,
+ &psTDFWCodePMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDFWCodePMR failed (%u)", eError));
+ goto PMRCreateError;
+ }
+
+ /* NB: TDFWCodePMR refcount: 1 -> 2 */
+ eError = DevmemLocalImport(psDeviceNode,
+ psTDFWCodePMR,
+ uiMemAllocFlags,
+ ppsMemDescPtr,
+ &uiMemDescSize,
+ "TDFWCode");
+ if(eError != PVRSRV_OK)
+ {
+ goto ImportError;
+ }
+
+ eError = DevmemMapToDevice(*ppsMemDescPtr,
+ psDevInfo->psFirmwareHeap,
+ &sTmpDevVAddr);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to map TD META code PMR (%u)", eError));
+ goto MapError;
+ }
+
+ /* NB: TDFWCodePMR refcount: 2 -> 1
+ * The PMR will be unreferenced again (and destroyed) when
+ * the memdesc tracking it is cleaned up
+ */
+ PMRUnrefPMR(psTDFWCodePMR);
+
+ return PVRSRV_OK;
+
+MapError:
+ DevmemFree(*ppsMemDescPtr);
+ *ppsMemDescPtr = NULL;
+ImportError:
+ /* Unref and destroy the PMR */
+ PMRUnrefPMR(psTDFWCodePMR);
+PMRCreateError:
+
+ return eError;
+}
+
+static INLINE
+PVRSRV_ERROR DevmemImportTDSecureBuf(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ IMG_UINT32 uiMemAllocFlags,
+ DEVMEM_MEMDESC **ppsMemDescPtr,
+ IMG_UINT64 *pui64SecBufHandle)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ PMR *psTDSecureBufPMR;
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_DEVMEM_SIZE_T uiMemDescSize;
+ IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(ppsMemDescPtr);
+
+ DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareHeap),
+ &uiSize,
+ &uiAlign);
+
+ eError = PhysmemNewTDSecureBufPMR(NULL,
+ psDeviceNode,
+ uiSize,
+ uiLog2Align,
+ uiMemAllocFlags,
+ &psTDSecureBufPMR,
+ pui64SecBufHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR failed (%u)", eError));
+ goto PMRCreateError;
+ }
+
+ /* NB: psTDSecureBufPMR refcount: 1 -> 2 */
+ eError = DevmemLocalImport(psDeviceNode,
+ psTDSecureBufPMR,
+ uiMemAllocFlags,
+ ppsMemDescPtr,
+ &uiMemDescSize,
+ "TDSecureBuffer");
+ if(eError != PVRSRV_OK)
+ {
+ goto ImportError;
+ }
+
+ eError = DevmemMapToDevice(*ppsMemDescPtr,
+ psDevInfo->psFirmwareHeap,
+ &sTmpDevVAddr);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to map TD secure buffer PMR (%u)", eError));
+ goto MapError;
+ }
+
+ /* NB: psTDSecureBufPMR refcount: 2 -> 1
+ * The PMR will be unreferenced again (and destroyed) when
+ * the memdesc tracking it is cleaned up
+ */
+ PMRUnrefPMR(psTDSecureBufPMR);
+
+ return PVRSRV_OK;
+
+MapError:
+ DevmemFree(*ppsMemDescPtr);
+ *ppsMemDescPtr = NULL;
+ImportError:
+ /* Unref and destroy the PMR */
+ PMRUnrefPMR(psTDSecureBufPMR);
+PMRCreateError:
+
+ return eError;
+}
+#endif
+
+
+/*
+ * This function returns the value of the hardware register RGX_CR_TIMER
+ * which is a timer counting in ticks.
+ */
+
+static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT64 ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+
+ /*
+ * In order to avoid having to issue three 32-bit reads to detect the
+ * lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated
+ * in the MSB of the high 32-bit word. If the wrap happens, we just read
+ * the register again (it will not wrap again so soon).
+ */
+ if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK)
+ {
+ ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+ }
+
+ return ((ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK) >> RGX_CR_TIMER_VALUE_SHIFT);
+}
+
+/*
+ * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first kick)
+ */
+#define RGX_FWCOMCTX_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | \
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/******************************************************************************
+ * RGXSetFirmwareAddress Flags
+ *****************************************************************************/
+#define RFW_FWADDR_FLAG_NONE (0) /*!< Void flag */
+#define RFW_FWADDR_NOREF_FLAG (1U << 0) /*!< It is safe to immediately release the reference to the pointer,
+ otherwise RGXUnsetFirmwareAddress() must be call when finished. */
+
+IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32HWPerfFWBufSizeKB,
+ IMG_UINT64 ui64HWPerfFilter,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32ConfigFlags,
+ IMG_UINT32 ui32LogType,
+ RGXFWIF_BIFTILINGMODE eBifTilingMode,
+ IMG_UINT32 ui32NumTilingCfgs,
+ IMG_UINT32 *pui32BIFTilingXStrides,
+ IMG_UINT32 ui32FilterFlags,
+ IMG_UINT32 ui32JonesDisableMask,
+ IMG_UINT32 ui32HWRDebugDumpLimit,
+ IMG_UINT32 ui32HWPerfCountersDataSize,
+ PMR **ppsHWPerfPMR,
+ RGXFWIF_DEV_VIRTADDR *psRGXFWInitFWAddr,
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+ FW_PERF_CONF eFirmwarePerf);
+
+
+
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*************************************************************************/ /*!
+@Function RGXSetFirmwareAddress
+
+@Description Sets a pointer in a firmware data structure.
+
+@Input ppDest Address of the pointer to set
+@Input psSrc MemDesc describing the pointer
+@Input ui32Flags Any combination of RFW_FWADDR_*_FLAG
+
+@Return void
+*/ /**************************************************************************/
+void RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest,
+ DEVMEM_MEMDESC *psSrc,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 ui32Flags);
+
+
+/*************************************************************************/ /*!
+@Function RGXSetMetaDMAAddress
+
+@Description Fills a Firmware structure used to setup the Meta DMA with two
+ pointers to the same data, one on 40 bit and one on 32 bit
+ (pointer in the FW memory space).
+
+@Input ppDest Address of the structure to set
+@Input psSrcMemDesc MemDesc describing the pointer
+@Input psSrcFWDevVAddr Firmware memory space pointer
+
+@Return void
+*/ /**************************************************************************/
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest,
+ DEVMEM_MEMDESC *psSrcMemDesc,
+ RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr,
+ IMG_UINT32 uiOffset);
+
+
+/*************************************************************************/ /*!
+@Function RGXUnsetFirmwareAddress
+
+@Description Unsets a pointer in a firmware data structure
+
+@Input psSrc MemDesc describing the pointer
+
+@Return void
+*/ /**************************************************************************/
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc);
+
+/*************************************************************************/ /*!
+@Function FWCommonContextAllocate
+
+@Description Allocate a FW common context. This allocates the HW memory
+ for the context, the CCB and wires it all together.
+
+@Input psConnection Connection this context is being created on
+@Input psDeviceNode Device node to create the FW context on
+ (must be RGX device node)
+@Input eRGXCCBRequestor RGX_CCB_REQUESTOR_TYPE enum constant which
+ which represents the requestor of this FWCC
+@Input eDM Data Master type
+@Input psAllocatedMemDesc Pointer to pre-allocated MemDesc to use
+ as the FW context or NULL if this function
+ should allocate it
+@Input ui32AllocatedOffset Offset into pre-allocate MemDesc to use
+ as the FW context. If psAllocatedMemDesc
+ is NULL then this parameter is ignored
+@Input psFWMemContextMemDesc MemDesc of the FW memory context this
+ common context resides on
+@Input psContextStateMemDesc FW context state (context switch) MemDesc
+@Input ui32CCBAllocSize Size of the CCB for this context
+@Input ui32Priority Priority of the context
+@Input psInfo Structure that contains extra info
+ required for the creation of the context
+ (elements might change from core to core)
+@Return PVRSRV_OK if the context was successfully created
+*/ /**************************************************************************/
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+ RGXFWIF_DM eDM,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ DEVMEM_MEMDESC *psContextStateMemDesc,
+ IMG_UINT32 ui32CCBAllocSize,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext);
+
+
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ IMG_UINT32 *pui32LastResetJobRef);
+
+/*!
+******************************************************************************
+
+ @Function RGXScheduleProcessQueuesKM
+
+ @Description - Software command complete handler
+ (sends uncounted kicks for all the DMs through the MISR)
+
+ @Input hCmdCompHandle - RGX device node
+
+******************************************************************************/
+IMG_IMPORT
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+
+/*!
+******************************************************************************
+
+ @Function RGXInstallProcessQueuesMISR
+
+ @Description - Installs the MISR to handle Process Queues operations
+
+ @Input phMISR - Pointer to the MISR handler
+
+ @Input psDeviceNode - RGX Device node
+
+******************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function RGXSendCommandWithPowLock
+
+@Description Sends a command to a particular DM without honouring
+ pending cache operations but taking the power lock.
+
+@Input psDevInfo Device Info
+@Input eDM To which DM the cmd is sent.
+@Input psKCCBCmd The cmd to send.
+@Input ui32CmdSize The cmd size.
+@Input ui32PDumpFlags Pdump flags
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommandWithPowLock(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function RGXSendCommand
+
+@Description Sends a command to a particular DM without honouring
+ pending cache operations or the power lock.
+ The function flushes any deferred KCCB commands first.
+
+@Input psDevInfo Device Info
+@Input eDM To which DM the cmd is sent.
+@Input psKCCBCmd The cmd to send.
+@Input ui32CmdSize The cmd size.
+@Input uiPdumpFlags PDump flags.
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ PDUMP_FLAGS_T uiPdumpFlags);
+
+
+/*************************************************************************/ /*!
+@Function RGXScheduleCommand
+
+@Description Sends a command to a particular DM
+
+@Input psDevInfo Device Info
+@Input eDM To which DM the cmd is sent.
+@Input psKCCBCmd The cmd to send.
+@Input ui32CmdSize The cmd size.
+@Input ui32CacheOpFence Pending cache op. fence value.
+@Input ui32PDumpFlags PDump flags
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32CacheOpFence,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function RGXScheduleCommandAndWait
+
+@Description Schedules the command with RGXScheduleCommand and then waits
+ for the FW to update a sync. The sync must be piggy backed on
+ the cmd, either by passing a sync cmd or a cmd that contains the
+ sync which the FW will eventually update. The sync is created in
+ the function, therefore the function provides a FWAddr and
+ UpdateValue for that cmd.
+
+@Input psDevInfo Device Info
+@Input eDM To which DM the cmd is sent.
+@Input psKCCBCmd The cmd to send.
+@Input ui32CmdSize The cmd size.
+@Input puiSyncObjFWAddr Pointer to the location with the FWAddr of
+ the sync.
+@Input puiUpdateValue Pointer to the location with the update
+ value of the sync.
+@Input ui32PDumpFlags PDump flags
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommandAndWait(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 *puiSyncObjDevVAddr,
+ IMG_UINT32 *puiUpdateValue,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+
+/*! ***********************************************************************//**
+@brief Copy framework command into FW addressable buffer
+
+@param psFWFrameworkMemDesc
+@param pbyGPUFRegisterList
+@param ui32FrameworkRegisterSize
+
+@returns PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC *psFWFrameworkMemDesc,
+ IMG_PBYTE pbyGPUFRegisterList,
+ IMG_UINT32 ui32FrameworkRegisterSize);
+
+
+/*! ***********************************************************************//**
+@brief Create FW addressable buffer for framework
+
+@param psDeviceNode
+@param ppsFWFrameworkMemDesc
+@param ui32FrameworkRegisterSize
+
+@returns PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+ DEVMEM_MEMDESC ** ppsFWFrameworkMemDesc,
+ IMG_UINT32 ui32FrameworkRegisterSize);
+
+/*************************************************************************/ /*!
+@Function RGXWaitForFWOp
+
+@Description Send a sync command and wait to be signalled.
+
+@Input psDevInfo Device Info
+@Input eDM To which DM the cmd is sent.
+@Input ui32PDumpFlags PDump flags
+
+@Return void
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXWaitForFWOp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function RGXStateFlagCtrl
+
+@Description Set and return FW internal state flags.
+
+@Input psDevInfo Device Info
+@Input ui32Config AppHint config flags
+@Output pui32State Current AppHint state flag configuration
+@Input bSetNotClear Set or clear the provided config flags
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Config,
+ IMG_UINT32 *pui32State,
+ IMG_BOOL bSetNotClear);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWRequestCommonContextCleanUp
+
+ @Description Schedules a FW common context cleanup. The firmware will doesn't
+ block waiting for the resource to become idle but rather notifies
+ the host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWContext - firmware address of the context to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ @Input ui32PDumpFlags - PDump continuous flag
+
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ RGXFWIF_DM eDM,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWRequestHWRTDataCleanUp
+
+ @Description Schedules a FW HWRTData memory cleanup. The firmware will doesn't
+ block waiting for the resource to become idle but rather notifies
+ the host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psHWRTData - firmware address of the HWRTData to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PRGXFWIF_HWRTDATA psHWRTData,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ RGXFWIF_DM eDM);
+
+PVRSRV_ERROR RGXFWRequestRayFrameDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PRGXFWIF_RAY_FRAME_DATA psHWFrameData,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ RGXFWIF_DM eDM);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWRequestRPMFreeListCleanUp
+
+ @Description Schedules a FW RPM FreeList cleanup. The firmware will doesn't block
+ waiting for the resource to become idle but rather notifies the
+ host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWRPMFreeList - firmware address of the RPM freelist to be cleaned up
+
+ @Input psSync - Sync object associated with cleanup
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestRPMFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_RPM_FREELIST psFWRPMFreeList,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+
+/*!
+******************************************************************************
+
+ @Function RGXFWRequestFreeListCleanUp
+
+ @Description Schedules a FW FreeList cleanup. The firmware will doesn't block
+ waiting for the resource to become idle but rather notifies the
+ host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psHWRTData - firmware address of the HWRTData to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode,
+ PRGXFWIF_FREELIST psFWFreeList,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWRequestZSBufferCleanUp
+
+ @Description Schedules a FW ZS Buffer cleanup. The firmware will doesn't block
+ waiting for the resource to become idle but rather notifies the
+ host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWZSBuffer - firmware address of the ZS Buffer to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_ZSBUFFER psFWZSBuffer,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+ CONNECTION_DATA *psConnection,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Priority,
+ RGXFWIF_DM eDM);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWSetHCSDeadline
+
+ @Description Requests the Firmware to set a new Hard Context
+ Switch timeout deadline. Context switches that
+ surpass that deadline cause the system to kill
+ the currently running workloads.
+
+ @Input psDeviceNode pointer to device node
+
+ @Input ui32HCSDeadlineMs The deadline in milliseconds.
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32HCSDeadlineMs);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWChangeOSidPriority
+
+ @Description Requests the Firmware to change the priority of an
+ operating system. Higher priority number equals
+ higher priority on the scheduling system.
+
+ @Input psDeviceNode pointer to device node
+
+ @Input ui32OSid The OSid whose priority is to be altered
+
+ @Input ui32Priority The new priority number for the specified OSid
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32Priority);
+
+/*!
+****************************************************************************
+
+ @Function RGXFWSetOSIsolationThreshold
+
+ @Description Requests the Firmware to change the priority
+ threshold of the OS Isolation group. Any OS with a
+ priority higher or equal than the threshold is
+ considered to be belonging to the isolation group.
+
+ @Input psDeviceNode pointer to device node
+
+ @Input ui32IsolationPriorityThreshold The new priority threshold
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32IsolationPriorityThreshold);
+
+/*!
+****************************************************************************
+
+ @Function RGXFWOSConfig
+
+ @Description Sends the OS Init structure to the FW to complete
+ the initialization process. The FW will then set all
+ the OS specific parameters for that DDK
+
+ @Input psDeviceNode pointer to device node
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWOSConfig(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+****************************************************************************
+
+ @Function RGXFWSetVMOnlineState
+
+ @Description Requests the Firmware to change the guest OS Online
+ states. This should be initiated by the VMM when a
+ guest VM comes online or goes offline. If offline,
+ the FW offloads any current resource from that OSID.
+ The request is repeated until the FW has had time to
+ free all the resources or has waited for workloads
+ to finish.
+
+ @Input psDeviceNode pointer to device node
+
+ @Input ui32OSid The Guest OSid whose state is being altered
+
+ @Input eOSOnlineState The new state (Online or Offline)
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWSetVMOnlineState(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32OSid,
+ RGXFWIF_OS_STATE_CHANGE eOSOnlineState);
+/*!
+******************************************************************************
+
+ @Function RGXReadMETAAddr
+
+ @Description Reads a value at given address in META memory space
+ (it can be either a memory location or a META register)
+
+ @Input psDevInfo - pointer to device info
+
+ @Input ui32METAAddr - address in META memory space
+
+ @Output pui32Value - value
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32METAAddr,
+ IMG_UINT32 *pui32Value);
+
+/*!
+******************************************************************************
+
+ @Function RGXCheckFirmwareCCB
+
+ @Description Processes all commands that are found in the Firmware CCB.
+
+ @Input psDevInfo - pointer to device
+
+ ******************************************************************************/
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+******************************************************************************
+
+ @Function RGXUpdateHealthStatus
+
+ @Description Tests a number of conditions which might indicate a fatal error has
+ occurred in the firmware. The result is stored in the device node
+ eheathStatus.
+
+ @Input psDevNode Pointer to device node structure.
+ @Input bCheckAfterTimePassed When TRUE, the function will also test for
+ firmware queues and polls not changing
+ since the previous test.
+
+ Note: if not enough time has passed since
+ the last call, false positives may occur.
+
+ @returns PVRSRV_ERROR
+ ******************************************************************************/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+ IMG_BOOL bCheckAfterTimePassed);
+
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM);
+
+void DumpStalledFWCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/*!
+******************************************************************************
+
+ @Function AttachKickResourcesCleanupCtls
+
+ @Description Attaches the cleanup structures to a kick command so that
+ submission reference counting can be performed when the
+ firmware processes the command
+
+ @Output apsCleanupCtl Array of CleanupCtl structure pointers to populate.
+ @Output pui32NumCleanupCtl Number of CleanupCtl structure pointers written out.
+ @Input eDM Which data master is the subject of the command.
+ @Input bKick TRUE if the client originally wanted to kick this DM.
+ @Input psRTDataCleanup Optional RTData cleanup associated with the command.
+ @Input psZBuffer Optional ZBuffer associated with the command.
+ @Input psSBuffer Optional SBuffer associated with the command.
+ ******************************************************************************/
+void AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+ IMG_UINT32 *pui32NumCleanupCtl,
+ RGXFWIF_DM eDM,
+ IMG_BOOL bKick,
+ RGX_RTDATA_CLEANUP_DATA *psRTDataCleanup,
+ RGX_ZSBUFFER_DATA *psZBuffer,
+ RGX_ZSBUFFER_DATA *psSBuffer);
+
+/*!
+******************************************************************************
+
+ @Function RGXResetHWRLogs
+
+ @Description Resets the HWR Logs buffer (the hardware recovery count is not reset)
+
+ @Input psDevInfo Pointer to the device
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode);
+
+
+/*!
+******************************************************************************
+
+ @Function RGXGetPhyAddr
+
+ @Description Get the physical address of a certain PMR at a certain offset within it
+
+ @Input psPMR PMR of the allocation
+
+ @Input ui32LogicalOffset Logical offset
+
+ @Output psPhyAddr Physical address of the allocation
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+ IMG_DEV_PHYADDR *psPhyAddr,
+ IMG_UINT32 ui32LogicalOffset,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_BOOL *bValid);
+
+#if defined(PDUMP)
+/*!
+******************************************************************************
+
+ @Function RGXPdumpDrainKCCB
+
+ @Description Wait for the firmware to execute all the commands in the kCCB
+
+ @Input psDevInfo Pointer to the device
+
+ @Input ui32WriteOffset Woff we have to POL for the Roff to be equal to
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset);
+#endif /* PDUMP */
+
+
+#endif /* __RGXFWUTILS_H__ */
+/******************************************************************************
+ End of file (rgxfwutils.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title device configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Memory heaps device specific configuration
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+//#warning FIXME: add the MMU specialisation defines here (or in hwdefs, perhaps?)
+
+#ifndef __RGXHEAPCONFIG_H__
+#define __RGXHEAPCONFIG_H__
+
+#include "rgxdefs_km.h"
+
+/*
+ RGX Device Virtual Address Space Definitions
+ NOTES:
+ Base addresses have to be a multiple of 4MiB
+
+ RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed,
+ on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_*
+ respectively. Therefore if clients use multiple configs they must still
+ be consistent with their definitions for these heaps.
+
+ Shared virtual memory (GENERAL_SVM) support requires half of the address
+ space be reserved for SVM allocations unless BRN fixes are required in
+ which case the SVM heap is disabled. This is reflected in the device
+ connection capability bits returned to userspace.
+
+ Variable page-size heap (GENERAL_NON4K) support splits available fixed
+ 4K page-size heap (GENERAL) address space in half. The actual page size
+ defaults to 16K; AppHint PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE
+ can be used to forced it to these values: 4K,64K,256K,1M,2M.
+*/
+
+ /* Start at 4 MiB Size of 512 GiB less 4 MiB (managed by OS/Services) */
+ #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000400000)
+ #define RGX_GENERAL_SVM_HEAP_SIZE IMG_UINT64_C(0x7FFFC00000)
+
+ /* Start at 512GiB. Size of 256 GiB */
+ #define RGX_GENERAL_HEAP_BASE IMG_UINT64_C(0x8000000000)
+ #define RGX_GENERAL_HEAP_SIZE IMG_UINT64_C(0x4000000000)
+
+ /* Start at 768GiB. Size of 64 GiB */
+ #define RGX_GENERAL_NON4K_HEAP_BASE IMG_UINT64_C(0xC000000000)
+ #define RGX_GENERAL_NON4K_HEAP_SIZE IMG_UINT64_C(0x1000000000)
+
+ /* Start at 832 GiB. Size of 32 GiB */
+ #define RGX_BIF_TILING_NUM_HEAPS 4
+ #define RGX_BIF_TILING_HEAP_SIZE IMG_UINT64_C(0x0200000000)
+ #define RGX_BIF_TILING_HEAP_1_BASE IMG_UINT64_C(0xD000000000)
+ #define RGX_BIF_TILING_HEAP_2_BASE (RGX_BIF_TILING_HEAP_1_BASE + RGX_BIF_TILING_HEAP_SIZE)
+ #define RGX_BIF_TILING_HEAP_3_BASE (RGX_BIF_TILING_HEAP_2_BASE + RGX_BIF_TILING_HEAP_SIZE)
+ #define RGX_BIF_TILING_HEAP_4_BASE (RGX_BIF_TILING_HEAP_3_BASE + RGX_BIF_TILING_HEAP_SIZE)
+
+ /* HWBRN52402 workaround requires PDS memory to be below 16GB. Start at 8GB. Size of 4GB. */
+ #define RGX_PDSCODEDATA_BRN_52402_HEAP_BASE IMG_UINT64_C(0x0200000000)
+ #define RGX_PDSCODEDATA_BRN_52402_HEAP_SIZE IMG_UINT64_C(0x0100000000)
+
+ /* Start at 872 GiB. Size of 4 GiB */
+ #define RGX_PDSCODEDATA_HEAP_BASE IMG_UINT64_C(0xDA00000000)
+ #define RGX_PDSCODEDATA_HEAP_SIZE IMG_UINT64_C(0x0100000000)
+
+ /* HWBRN63142 workaround requires Region Header memory to be at the top
+ of a 16GB aligned range. This is so when masked with 0x03FFFFFFFF the
+ address will avoid aliasing PB addresses. Start at 879.75GB. Size of 256MB. */
+ #define RGX_RGNHDR_BRN_63142_HEAP_BASE IMG_UINT64_C(0xDBF0000000)
+ #define RGX_RGNHDR_BRN_63142_HEAP_SIZE IMG_UINT64_C(0x0010000000)
+
+ /* Start at 880 GiB, Size of 1 MiB */
+ #define RGX_VISTEST_HEAP_BASE IMG_UINT64_C(0xDC00000000)
+ #define RGX_VISTEST_HEAP_SIZE IMG_UINT64_C(0x0000100000)
+
+ /* HWBRN52402 workaround requires PDS memory to be below 16GB. Start at 12GB. Size of 4GB. */
+ #define RGX_USCCODE_BRN_52402_HEAP_BASE IMG_UINT64_C(0x0300000000)
+ #define RGX_USCCODE_BRN_52402_HEAP_SIZE IMG_UINT64_C(0x0100000000)
+
+ /* Start at 896 GiB Size of 4 GiB */
+ #define RGX_USCCODE_HEAP_BASE IMG_UINT64_C(0xE000000000)
+ #define RGX_USCCODE_HEAP_SIZE IMG_UINT64_C(0x0100000000)
+
+
+ /* Start at 903GiB. Size of 32MB per OSID (defined in rgxdefs_km.h)
+ #define RGX_FIRMWARE_HEAP_BASE IMG_UINT64_C(0xE1C0000000)
+ #define RGX_FIRMWARE_HEAP_SIZE (1<<RGX_FW_HEAP_SHIFT)
+ #define RGX_FIRMWARE_HEAP_SHIFT RGX_FW_HEAP_SHIFT */
+
+ /* HWBRN52402 & HWBRN55091 workarounds requires TQ memory to be below 16GB and 16GB aligned. Start at 0GB. Size of 8GB. */
+ #define RGX_TQ3DPARAMETERS_BRN_52402_55091_HEAP_BASE IMG_UINT64_C(0x0000000000)
+ #define RGX_TQ3DPARAMETERS_BRN_52402_55091_HEAP_SIZE IMG_UINT64_C(0x0200000000)
+
+ /* Start at 912GiB. Size of 16 GiB. 16GB aligned to match RGX_CR_ISP_PIXEL_BASE */
+ #define RGX_TQ3DPARAMETERS_HEAP_BASE IMG_UINT64_C(0xE400000000)
+ #define RGX_TQ3DPARAMETERS_HEAP_SIZE IMG_UINT64_C(0x0400000000)
+
+ /* Size of 16 * 4 KB (think about large page systems .. */
+ #define RGX_HWBRN37200_HEAP_BASE IMG_UINT64_C(0xFFFFF00000)
+ #define RGX_HWBRN37200_HEAP_SIZE IMG_UINT64_C(0x0000100000)
+
+ /* Start at 928GiB. Size of 4 GiB */
+ #define RGX_DOPPLER_HEAP_BASE IMG_UINT64_C(0xE800000000)
+ #define RGX_DOPPLER_HEAP_SIZE IMG_UINT64_C(0x0100000000)
+
+ /* Start at 932GiB. Size of 4 GiB */
+ #define RGX_DOPPLER_OVERFLOW_HEAP_BASE IMG_UINT64_C(0xE900000000)
+ #define RGX_DOPPLER_OVERFLOW_HEAP_SIZE IMG_UINT64_C(0x0100000000)
+
+ /* Start at 936GiB. Two groups of 128 KBytes that must follow each other in this order. */
+ #define RGX_SERVICES_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000)
+ #define RGX_SERVICES_SIGNALS_HEAP_SIZE IMG_UINT64_C(0x0000020000)
+
+ #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00020000)
+ #define RGX_SIGNALS_HEAP_SIZE IMG_UINT64_C(0x0000020000)
+
+ /* TDM TPU YUV coeffs - can be reduced to a single page */
+ #define RGX_TDM_TPU_YUV_COEFFS_HEAP_BASE IMG_UINT64_C(0xEA00080000)
+ #define RGX_TDM_TPU_YUV_COEFFS_HEAP_SIZE IMG_UINT64_C(0x0000040000)
+
+
+#endif /* __RGXHEAPCONFIG_H__ */
+
+/*****************************************************************************
+ End of file (rgxheapconfig.h)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX HW Performance implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX HW Performance implementation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "pvr_debug.h"
+#include "pvr_hwperf.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "allocmem.h"
+
+#include "pvrsrv.h"
+#include "pvrsrv_tlstreams.h"
+#include "pvrsrv_tlcommon.h"
+#include "tlclient.h"
+#include "tlstream.h"
+
+#include "rgx_hwperf_km.h"
+#include "rgxhwperf.h"
+#include "rgxapi_km.h"
+#include "rgxfwutils.h"
+#include "rgxtimecorr.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pdump_km.h"
+#include "pvrsrv_apphint.h"
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+
+/* Defined to ensure HWPerf packets are not delayed */
+#define SUPPORT_TL_PROODUCER_CALLBACK 1
+
+
+/******************************************************************************
+ *
+ *****************************************************************************/
+
+
+/*
+ RGXHWPerfCopyDataL1toL2
+*/
+static IMG_UINT32 RGXHWPerfCopyDataL1toL2(IMG_HANDLE hHWPerfStream,
+ IMG_BYTE *pbFwBuffer,
+ IMG_UINT32 ui32BytesExp)
+{
+ IMG_BYTE *pbL2Buffer;
+ IMG_UINT32 ui32L2BufFree;
+ IMG_UINT32 ui32BytesCopied = 0;
+ IMG_UINT32 ui32BytesExpMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbFwBuffer));
+ PVRSRV_ERROR eError;
+
+/* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX;
+#endif
+
+ PVR_DPF_ENTERED;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d",
+ pbFwBuffer, ui32BytesExp));
+#endif
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ {
+ /* Check the incoming buffer of data has not lost any packets */
+ IMG_BYTE *pbFwBufferIter = pbFwBuffer;
+ IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp;
+ do
+ {
+ RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter);
+ IMG_UINT32 ui32CurOrdinal = asCurPos->ui32Ordinal;
+ if (gui32Ordinal != IMG_UINT32_MAX)
+ {
+ if ((gui32Ordinal+1) != ui32CurOrdinal)
+ {
+ if (gui32Ordinal < ui32CurOrdinal)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "HWPerf [%p] packets lost (%u packets) between ordinal %u...%u",
+ pbFwBufferIter,
+ ui32CurOrdinal - gui32Ordinal - 1,
+ gui32Ordinal,
+ ui32CurOrdinal));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "HWPerf [%p] packet ordinal out of sequence last: %u, current: %u",
+ pbFwBufferIter,
+ gui32Ordinal,
+ ui32CurOrdinal));
+ }
+ }
+ }
+ gui32Ordinal = asCurPos->ui32Ordinal;
+ pbFwBufferIter += RGX_HWPERF_GET_SIZE(asCurPos);
+ } while( pbFwBufferIter < pbFwBufferEnd );
+ }
+#endif
+
+ /* Try submitting all data in one TL packet. */
+ eError = TLStreamReserve2( hHWPerfStream,
+ &pbL2Buffer,
+ (size_t)ui32BytesExp, ui32BytesExpMin,
+ &ui32L2BufFree);
+ if ( eError == PVRSRV_OK )
+ {
+ OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)ui32BytesExp );
+ eError = TLStreamCommit(hHWPerfStream, (size_t)ui32BytesExp);
+ if ( eError != PVRSRV_OK )
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+ eError, __func__));
+ goto e0;
+ }
+ /* Data were successfully written */
+ ui32BytesCopied = ui32BytesExp;
+ }
+ else if (eError == PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG)
+ {
+ /* There was not enough space for all data, copy as much as possible */
+ IMG_UINT32 sizeSum = 0;
+ RGX_PHWPERF_V2_PACKET_HDR psCurPkt = RGX_HWPERF_GET_PACKET(pbFwBuffer);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, remaining free space: %d", ui32BytesExp, ui32L2BufFree));
+
+ /* Traverse the array to find how many packets will fit in the available space. */
+ while ( sizeSum < ui32BytesExp &&
+ sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32L2BufFree )
+ {
+ sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt);
+ psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt);
+ }
+
+ if ( 0 != sizeSum )
+ {
+ eError = TLStreamReserve( hHWPerfStream, &pbL2Buffer, (size_t)sizeSum);
+
+ if ( eError == PVRSRV_OK )
+ {
+ OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)sizeSum );
+ eError = TLStreamCommit(hHWPerfStream, (size_t)sizeSum);
+ if ( eError != PVRSRV_OK )
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+ eError, __func__));
+ goto e0;
+ }
+ /* sizeSum bytes of hwperf packets have been successfully written */
+ ui32BytesCopied = sizeSum;
+ }
+ else if ( PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG == eError )
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Can not write HWPerf packet into host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "Can not find space in host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+ }
+ }
+ if ( PVRSRV_OK != eError && /* Some other error occurred */
+ PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG != eError ) /* Full error handled by caller, we returning the copied bytes count to caller*/
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "HWPerf enabled: Unexpected Error ( %d ) while copying FW buffer to TL buffer.",
+ eError));
+ }
+
+e0:
+ /* Return the remaining packets left to be transported. */
+ PVR_DPF_RETURN_VAL(ui32BytesCopied);
+}
+
+
+static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx(
+ const IMG_UINT32 ui32BufSize,
+ const IMG_UINT32 ui32Pos,
+ const IMG_UINT32 ui32Size)
+{
+ return ( ui32Pos + ui32Size < ui32BufSize ? ui32Pos + ui32Size : 0 );
+}
+
+
+/*
+ RGXHWPerfDataStore
+*/
+static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_BYTE* psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf;
+ IMG_UINT32 ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount;
+ IMG_UINT32 ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ IMG_UINT32 ui32BytesExpSum = 0;
+#endif
+
+ PVR_DPF_ENTERED;
+
+ /* Caller should check this member is valid before calling */
+ PVR_ASSERT(psDevInfo->hHWPerfStream);
+
+ /* Get a copy of the current
+ * read (first packet to read)
+ * write (empty location for the next write to be inserted)
+ * WrapCount (size in bytes of the buffer at or past end)
+ * indexes of the FW buffer */
+ ui32SrcRIdx = psRGXFWIfTraceBufCtl->ui32HWPerfRIdx;
+ ui32SrcWIdx = psRGXFWIfTraceBufCtl->ui32HWPerfWIdx;
+ OSMemoryBarrier();
+ ui32SrcWrapCount = psRGXFWIfTraceBufCtl->ui32HWPerfWrapCount;
+
+ /* Is there any data in the buffer not yet retrieved? */
+ if ( ui32SrcRIdx != ui32SrcWIdx )
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStore EVENTS found srcRIdx:%d srcWIdx: %d ", ui32SrcRIdx, ui32SrcWIdx));
+
+ /* Is the write position higher than the read position? */
+ if ( ui32SrcWIdx > ui32SrcRIdx )
+ {
+ /* Yes, buffer has not wrapped */
+ ui32BytesExp = ui32SrcWIdx - ui32SrcRIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ ui32BytesExpSum += ui32BytesExp;
+#endif
+ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo->hHWPerfStream,
+ psHwPerfInfo + ui32SrcRIdx,
+ ui32BytesExp);
+
+ /* Advance the read index and the free bytes counter by the number
+ * of bytes transported. Items will be left in buffer if not all data
+ * could be transported. Exit to allow buffer to drain. */
+ psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx(
+ psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+ ui32BytesCopied);
+
+ ui32BytesCopiedSum += ui32BytesCopied;
+ }
+ /* No, buffer has wrapped and write position is behind read position */
+ else
+ {
+ /* Byte count equal to
+ * number of bytes from read position to the end of the buffer,
+ * + data in the extra space in the end of the buffer. */
+ ui32BytesExp = ui32SrcWrapCount - ui32SrcRIdx;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ ui32BytesExpSum += ui32BytesExp;
+#endif
+ /* Attempt to transfer the packets to the TL stream buffer */
+ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo->hHWPerfStream,
+ psHwPerfInfo + ui32SrcRIdx,
+ ui32BytesExp);
+
+ /* Advance read index as before and Update the local copy of the
+ * read index as it might be used in the last if branch*/
+ ui32SrcRIdx = RGXHWPerfAdvanceRIdx(
+ psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+ ui32BytesCopied);
+
+ /* Update Wrap Count */
+ if ( ui32SrcRIdx == 0)
+ {
+ psRGXFWIfTraceBufCtl->ui32HWPerfWrapCount = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+ }
+ psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = ui32SrcRIdx;
+
+ ui32BytesCopiedSum += ui32BytesCopied;
+
+ /* If all the data in the end of the array was copied, try copying
+ * wrapped data in the beginning of the array, assuming there is
+ * any and the RIdx was wrapped. */
+ if ( (ui32BytesCopied == ui32BytesExp)
+ && (ui32SrcWIdx > 0)
+ && (ui32SrcRIdx == 0) )
+ {
+ ui32BytesExp = ui32SrcWIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ ui32BytesExpSum += ui32BytesExp;
+#endif
+ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo->hHWPerfStream,
+ psHwPerfInfo,
+ ui32BytesExp);
+ /* Advance the FW buffer read position. */
+ psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx(
+ psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+ ui32BytesCopied);
+
+ ui32BytesCopiedSum += ui32BytesCopied;
+ }
+ }
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ if (ui32BytesCopiedSum != ui32BytesExpSum)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psRGXFWIfTraceBufCtl->ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum));
+ }
+#endif
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStore NO EVENTS to transport"));
+ }
+
+ PVR_DPF_RETURN_VAL(ui32BytesCopiedSum);
+}
+
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+ IMG_UINT32 ui32BytesCopied;
+
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDevInfo);
+ psRgxDevInfo = psDevInfo->pvDevice;
+
+ /* Keep HWPerf resource init check and use of
+ * resources atomic, they may not be freed during use
+ */
+ OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+
+ if (psRgxDevInfo->hHWPerfStream != 0)
+ {
+ ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo);
+ if ( ui32BytesCopied )
+ { /* Signal consumers that packets may be available to read when
+ * running from a HW kick, not when called by client APP thread
+ * via the transport layer CB as this can lead to stream
+ * corruption.*/
+ eError = TLStreamSync(psRgxDevInfo->hHWPerfStream);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStoreCB: Zero bytes copied"));
+ RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo);
+ }
+ }
+
+ OSLockRelease(psRgxDevInfo->hHWPerfLock);
+
+ PVR_DPF_RETURN_OK;
+}
+
+
+/* Not currently supported by default */
+#if defined(SUPPORT_TL_PROODUCER_CALLBACK)
+static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream,
+ IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)pvUser;
+
+ PVR_UNREFERENCED_PARAMETER(hStream);
+ PVR_UNREFERENCED_PARAMETER(ui32Resp);
+
+ PVR_ASSERT(psRgxDevInfo);
+
+ switch (ui32ReqOp)
+ {
+ case TL_SOURCECB_OP_CLIENT_EOS:
+ /* Keep HWPerf resource init check and use of
+ * resources atomic, they may not be freed during use
+ */
+ OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+ if (psRgxDevInfo->hHWPerfStream != 0)
+ {
+ (void) RGXHWPerfDataStore(psRgxDevInfo);
+ }
+ OSLockRelease(psRgxDevInfo->hHWPerfLock);
+ break;
+
+ default:
+ break;
+ }
+
+ return eError;
+}
+#endif
+
+
+/* References to key objects to allow kernel-side behaviour to function
+ * e.g. FTrace and KM interface to HWPerf.
+ */
+static PVRSRV_DEVICE_NODE* gpsRgxDevNode = NULL;
+static PVRSRV_RGXDEV_INFO* gpsRgxDevInfo = NULL;
+
+static void RGXHWPerfL1BufferDeinit(void)
+{
+ if (gpsRgxDevInfo && gpsRgxDevInfo->psRGXFWIfHWPerfBufMemDesc)
+ {
+ if (gpsRgxDevInfo->psRGXFWIfHWPerfBuf != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(gpsRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+ gpsRgxDevInfo->psRGXFWIfHWPerfBuf = NULL;
+ }
+ DevmemFwFree(gpsRgxDevInfo, gpsRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+ gpsRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL;
+ }
+}
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfInit
+
+@Description Called during driver init for initialization of HWPerf module
+ in the Rogue device driver. This function keeps allocated
+ only the minimal necessary resources, which are required for
+ functioning of HWPerf server module.
+
+@Input psRgxDevInfo RGX Device Node
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_DEVICE_NODE *psRgxDevNode)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ /* expecting a valid device node */
+ PVR_ASSERT(psRgxDevNode);
+
+ /* Keep RGX device's reference for later use as this parameter is
+ * optional on later calls to HWPerf server module */
+ gpsRgxDevNode = psRgxDevNode;
+ gpsRgxDevInfo = psRgxDevNode->pvDevice;
+
+ /* Create a lock for HWPerf server module used for serializing, L1 to L2
+ * copy calls (e.g. in case of TL producer callback) and L1, L2 resource
+ * allocation */
+ eError = OSLockCreate(&gpsRgxDevInfo->hHWPerfLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGR_IF_ERROR(eError, "OSLockCreate");
+
+ /* avoid uninitialised data */
+ gpsRgxDevInfo->hHWPerfStream = 0;
+ gpsRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL;
+
+ PVR_DPF_RETURN_OK;
+}
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfIsInitRequired
+
+@Description Returns true if the HWperf firmware buffer (L1 buffer) and host
+ driver TL buffer (L2 buffer) are not already allocated. Caller
+ must possess hHWPerfLock lock before calling this
+ function so the state tested is not inconsistent.
+
+@Return IMG_BOOL Whether initialization (allocation) is required
+*/ /**************************************************************************/
+static INLINE IMG_BOOL RGXHWPerfIsInitRequired(void)
+{
+ PVR_ASSERT(OSLockIsLocked(gpsRgxDevInfo->hHWPerfLock));
+
+#if !defined (NO_HARDWARE)
+ /* Both L1 and L2 buffers are required (for HWPerf functioning) on driver
+ * built for actual hardware (TC, EMU, etc.)
+ */
+ if (gpsRgxDevInfo->hHWPerfStream == 0)
+ {
+ /* The allocation API (RGXHWPerfInitOnDemandResources) allocates
+ * device memory for both L1 and L2 without any checks. Hence,
+ * either both should be allocated or both be NULL.
+ *
+ * In-case this changes in future (for e.g. a situation where one
+ * of the 2 buffers is already allocated and other is required),
+ * add required checks before allocation calls to avoid memory leaks.
+ */
+ PVR_ASSERT(gpsRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL);
+ return IMG_TRUE;
+ }
+ PVR_ASSERT(gpsRgxDevInfo->psRGXFWIfHWPerfBufMemDesc != NULL);
+#else
+ /* On a NO-HW driver L2 is not allocated. So, no point in checking its
+ * allocation */
+ if (gpsRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL)
+ {
+ return IMG_TRUE;
+ }
+#endif
+ return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfInitOnDemandResources
+
+@Description This function allocates the HWperf firmware buffer (L1 buffer)
+ and host driver TL buffer (L2 buffer) if HWPerf is enabled at
+ driver load time. Otherwise, these buffers are allocated
+ on-demand as and when required. Caller
+ must possess hHWPerfLock lock before calling this
+ function so the state tested is not inconsistent if called
+ outside of driver initialisation.
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfInitOnDemandResources(void)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32L2BufferSize;
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+
+ PVR_DPF_ENTERED;
+
+ /* Create the L1 HWPerf buffer on demand */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT)
+ | PVRSRV_MEMALLOCFLAG_GPU_READABLE
+ | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE
+ | PVRSRV_MEMALLOCFLAG_CPU_READABLE
+ | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
+ | PVRSRV_MEMALLOCFLAG_UNCACHED
+ #if defined(PDUMP)
+ | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC
+ #endif
+ ;
+
+ /* Allocate HWPerf FW L1 buffer */
+ eError = DevmemFwAllocate(gpsRgxDevInfo,
+ gpsRgxDevInfo->ui32RGXFWIfHWPerfBufSize+RGXFW_HWPERF_L1_PADDING_DEFAULT,
+ uiMemAllocFlags,
+ "FwHWPerfBuffer",
+ &gpsRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate kernel fw hwperf buffer (%u)",
+ __FUNCTION__, eError));
+ goto e0;
+ }
+
+ /* Expecting the RuntimeCfg structure is mapped into CPU virtual memory.
+ * Also, make sure the FW address is not already set */
+ PVR_ASSERT(gpsRgxDevInfo->psRGXFWIfRuntimeCfg && gpsRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr == 0x0);
+
+ /* Meta cached flag removed from this allocation as it was found
+ * FW performance was better without it. */
+ RGXSetFirmwareAddress(&gpsRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf,
+ gpsRgxDevInfo->psRGXFWIfHWPerfBufMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(gpsRgxDevInfo->psRGXFWIfHWPerfBufMemDesc,
+ (void**)&gpsRgxDevInfo->psRGXFWIfHWPerfBuf);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire kernel hwperf buffer (%u)",
+ __FUNCTION__, eError));
+ goto e0;
+ }
+
+ /* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence,
+ * L2 buffer is not allocated */
+#if !defined(NO_HARDWARE)
+ /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer
+ * accessed by the FW. The MISR may try to write one packet the size of the L1
+ * buffer in some scenarios. When logging is enabled in the MISR, it can be seen
+ * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers
+ * are the more chance of this happening.
+ * Size chosen to allow MISR to write an L1 sized packet and for the client
+ * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1.
+ */
+ ui32L2BufferSize = gpsRgxDevInfo->ui32RGXFWIfHWPerfBufSize +
+ (gpsRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1);
+ eError = TLStreamCreate(&gpsRgxDevInfo->hHWPerfStream, PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+ ui32L2BufferSize,
+ TL_FLAG_RESERVE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT,
+ NULL, NULL,
+#if !defined(SUPPORT_TL_PROODUCER_CALLBACK)
+ NULL, NULL
+#else
+ /* Not enabled by default */
+ RGXHWPerfTLCB, gpsRgxDevInfo
+#endif
+ );
+ PVR_LOGG_IF_ERROR(eError, "TLStreamCreate", e1);
+#else /* defined (NO_HARDWARE) */
+ PVR_UNREFERENCED_PARAMETER(ui32L2BufferSize);
+ PVR_UNREFERENCED_PARAMETER(RGXHWPerfTLCB);
+ui32L2BufferSize = 0;
+#endif
+
+ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d L2: %d",
+ gpsRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize));
+
+ PVR_DPF_RETURN_OK;
+
+#if !defined(NO_HARDWARE)
+e1: /* L2 buffer initialisation failures */
+ gpsRgxDevInfo->hHWPerfStream = NULL;
+#endif
+e0: /* L1 buffer initialisation failures */
+ RGXHWPerfL1BufferDeinit();
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+
+void RGXHWPerfDeinit(void)
+{
+ PVR_DPF_ENTERED;
+
+ /* Clean up the L2 buffer stream object if allocated */
+ if (gpsRgxDevInfo && gpsRgxDevInfo->hHWPerfStream)
+ {
+ TLStreamClose(gpsRgxDevInfo->hHWPerfStream);
+ gpsRgxDevInfo->hHWPerfStream = NULL;
+ }
+
+ /* Cleanup L1 buffer resources */
+ RGXHWPerfL1BufferDeinit();
+
+ /* Cleanup the HWPerf server module lock resource */
+ if (gpsRgxDevInfo && gpsRgxDevInfo->hHWPerfLock)
+ {
+ OSLockDestroy(gpsRgxDevInfo->hHWPerfLock);
+ gpsRgxDevInfo->hHWPerfLock = NULL;
+ }
+
+ PVR_DPF_RETURN;
+}
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Server API(s)
+ *****************************************************************************/
+
+static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bToggle,
+ IMG_UINT64 ui64Mask)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice;
+ RGXFWIF_KCCB_CMD sKccbCmd;
+
+ /* If this method is being used whether to enable or disable
+ * then the hwperf buffers (host and FW) are likely to be needed
+ * eventually so create them, also helps unit testing. Buffers
+ * allocated on demand to reduce RAM foot print on systems not
+ * needing HWPerf resources.
+ * Obtain lock first, test and init if required. */
+ OSLockAcquire(psDevice->hHWPerfLock);
+
+ if (!psDevice->bFirmwareInitialised)
+ {
+ gpsRgxDevInfo->ui64HWPerfFilter = ui64Mask; // at least set filter
+ eError = PVRSRV_ERROR_NOT_INITIALISED;
+
+ PVR_DPF((PVR_DBG_ERROR, "HWPerf has NOT been initialised yet."
+ " Mask has been SET to (%llx)", (long long) ui64Mask));
+
+ goto unlock_and_return;
+ }
+
+ if (RGXHWPerfIsInitRequired())
+ {
+ eError = RGXHWPerfInitOnDemandResources();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW "
+ "resources failed", __func__));
+ goto unlock_and_return;
+ }
+ }
+
+ /* Unlock here as no further HWPerf resources are used below that would be
+ * affected if freed by another thread */
+ OSLockRelease(psDevice->hHWPerfLock);
+
+ /* Return if the filter is the same */
+ if (!bToggle && gpsRgxDevInfo->ui64HWPerfFilter == ui64Mask)
+ goto return_;
+
+ /* Prepare command parameters ... */
+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG;
+ sKccbCmd.uCmdData.sHWPerfCtrl.bToggle = bToggle;
+ sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64Mask;
+
+ /* Ask the FW to carry out the HWPerf configuration command */
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice, RGXFWIF_DM_GP,
+ &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set new HWPerfFW filter in "
+ "firmware (error = %d)", __func__, eError));
+ goto return_;
+ }
+
+ gpsRgxDevInfo->ui64HWPerfFilter = bToggle ?
+ gpsRgxDevInfo->ui64HWPerfFilter ^ ui64Mask : ui64Mask;
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP,
+ psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", return_);
+
+#if defined(DEBUG)
+ if (bToggle)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfFW events (%llx) have been TOGGLED",
+ ui64Mask));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%llx)",
+ ui64Mask));
+ }
+#endif
+
+ return PVRSRV_OK;
+
+unlock_and_return:
+ OSLockRelease(psDevice->hHWPerfLock);
+
+return_:
+ return eError;
+}
+
+static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bToggle,
+ IMG_UINT32 ui32Mask)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice;
+
+ OSLockAcquire(psDevice->hLockHWPerfHostStream);
+ if (psDevice->hHWPerfHostStream == NULL)
+ {
+ eError = RGXHWPerfHostInitOnDemandResources();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialization of on-demand HWPerfHost"
+ " resources failed", __FUNCTION__));
+ OSLockRelease(psDevice->hLockHWPerfHostStream);
+ return eError;
+ }
+ }
+
+ psDevice->ui32HWPerfHostFilter = bToggle ?
+ psDevice->ui32HWPerfHostFilter ^ ui32Mask : ui32Mask;
+ OSLockRelease(psDevice->hLockHWPerfHostStream);
+
+#if defined(DEBUG)
+ if (bToggle)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost events (%x) have been TOGGLED",
+ ui32Mask));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost mask has been SET to (%x)",
+ ui32Mask));
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*
+ PVRSRVRGXCtrlHWPerfKM
+*/
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_BOOL bToggle,
+ IMG_UINT64 ui64Mask)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVR_DPF_ENTERED;
+ PVR_ASSERT(psDeviceNode);
+
+ if (eStreamId == RGX_HWPERF_STREAM_ID0_FW)
+ {
+ return RGXHWPerfCtrlFwBuffer(psDeviceNode, bToggle, ui64Mask);
+ }
+ else if (eStreamId == RGX_HWPERF_STREAM_ID1_HOST)
+ {
+ return RGXHWPerfCtrlHostBuffer(psDeviceNode, bToggle, (IMG_UINT32) ui64Mask);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCtrlHWPerfKM: Unknown stream id."));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PVR_DPF_RETURN_OK;
+}
+
+/*
+ AppHint interfaces
+*/
+static
+PVRSRV_ERROR RGXHWPerfSetFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT64 ui64Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+ return RGXHWPerfCtrlFwBuffer(psDeviceNode, IMG_FALSE, ui64Value);
+}
+
+static
+PVRSRV_ERROR RGXHWPerfReadFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT64 *pui64Value)
+{
+ PVRSRV_RGXDEV_INFO *psDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ if (!psDeviceNode || !psDeviceNode->pvDevice)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevice = psDeviceNode->pvDevice;
+ *pui64Value = psDevice->ui64HWPerfFilter;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfSetHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+ return RGXHWPerfCtrlHostBuffer(psDeviceNode, IMG_FALSE, ui32Value);
+}
+
+static
+PVRSRV_ERROR RGXHWPerfReadHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVRSRV_RGXDEV_INFO *psDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ if (!psDeviceNode || !psDeviceNode->pvDevice)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevice = psDeviceNode->pvDevice;
+ *pui32Value = psDevice->ui32HWPerfHostFilter;
+ return PVRSRV_OK;
+}
+
+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRVAppHintRegisterHandlersUINT64(APPHINT_ID_HWPerfFWFilter,
+ RGXHWPerfReadFwFilter,
+ RGXHWPerfSetFwFilter,
+ psDeviceNode,
+ NULL);
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfHostFilter,
+ RGXHWPerfReadHostFilter,
+ RGXHWPerfSetHostFilter,
+ psDeviceNode,
+ NULL);
+}
+
+/*
+ PVRSRVRGXEnableHWPerfCountersKM
+*/
+PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32ArrayLen,
+ RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sKccbCmd;
+ DEVMEM_MEMDESC* psFwBlkConfigsMemDesc;
+ RGX_HWPERF_CONFIG_CNTBLK* psFwArray;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDeviceNode);
+ PVR_ASSERT(ui32ArrayLen>0);
+ PVR_ASSERT(psBlockConfigs);
+
+ /* Fill in the command structure with the parameters needed
+ */
+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS;
+ sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen;
+
+ eError = DevmemFwAllocate(psDeviceNode->pvDevice,
+ sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+ "FwHWPerfCountersConfigBlock",
+ &psFwBlkConfigsMemDesc);
+ if (eError != PVRSRV_OK)
+ PVR_LOGR_IF_ERROR(eError, "DevmemFwAllocate");
+
+ RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs,
+ psFwBlkConfigsMemDesc, 0, 0);
+
+ eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail1);
+ }
+
+ OSDeviceMemCopy(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen);
+ DevmemPDumpLoadMem(psFwBlkConfigsMemDesc,
+ 0,
+ sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+ 0);
+
+ /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM parameters set, calling FW")); */
+
+ /* Ask the FW to carry out the HWPerf configuration command
+ */
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", fail2);
+ }
+
+ /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM command scheduled for FW")); */
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", fail2);
+ }
+
+ /* Release temporary memory used for block configuration
+ */
+ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+ DevmemFwFree(psDeviceNode->pvDevice, psFwBlkConfigsMemDesc);
+
+ /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM firmware completed")); */
+
+ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen));
+
+ PVR_DPF_RETURN_OK;
+
+fail2:
+ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+fail1:
+ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+ DevmemFwFree(psDeviceNode->pvDevice, psFwBlkConfigsMemDesc);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+
+/*
+ PVRSRVRGXConfigCustomCountersReadingHWPerfKM
+ */
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT16 ui16CustomBlockID,
+ IMG_UINT16 ui16NumCustomCounters,
+ IMG_UINT32 * pui32CustomCounterIDs)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sKccbCmd;
+ DEVMEM_MEMDESC* psFwSelectCntrsMemDesc = NULL;
+ IMG_UINT32* psFwArray;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDeviceNode);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRGXSelectCustomCountersKM: configure block %u to read %u counters", ui16CustomBlockID, ui16NumCustomCounters));
+
+ /* Fill in the command structure with the parameters needed */
+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS;
+ sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16NumCounters = ui16NumCustomCounters;
+ sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16CustomBlock = ui16CustomBlockID;
+
+ if (ui16NumCustomCounters > 0)
+ {
+ PVR_ASSERT(pui32CustomCounterIDs);
+
+ eError = DevmemFwAllocate(psDeviceNode->pvDevice,
+ sizeof(IMG_UINT32) * ui16NumCustomCounters,
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+ "FwHWPerfConfigCustomCounters",
+ &psFwSelectCntrsMemDesc);
+ if (eError != PVRSRV_OK)
+ PVR_LOGR_IF_ERROR(eError, "DevmemFwAllocate");
+
+ RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.sCustomCounterIDs,
+ psFwSelectCntrsMemDesc, 0, 0);
+
+ eError = DevmemAcquireCpuVirtAddr(psFwSelectCntrsMemDesc, (void **)&psFwArray);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail1);
+ }
+
+ OSDeviceMemCopy(psFwArray, pui32CustomCounterIDs, sizeof(IMG_UINT32) * ui16NumCustomCounters);
+ DevmemPDumpLoadMem(psFwSelectCntrsMemDesc,
+ 0,
+ sizeof(IMG_UINT32) * ui16NumCustomCounters,
+ 0);
+ }
+
+ /* Push in the KCCB the command to configure the custom counters block */
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", fail2);
+ }
+ PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: Command scheduled"));
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", fail2);
+ }
+ PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: FW operation completed"));
+
+ if (ui16NumCustomCounters > 0)
+ {
+ /* Release temporary memory used for block configuration */
+ RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+ DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+ DevmemFwFree(psDeviceNode->pvDevice, psFwSelectCntrsMemDesc);
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf custom counters %u reading will be sent with the next HW events", ui16NumCustomCounters));
+
+ PVR_DPF_RETURN_OK;
+
+ fail2:
+ if (psFwSelectCntrsMemDesc) DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+
+ fail1:
+ if (psFwSelectCntrsMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+ DevmemFwFree(psDeviceNode->pvDevice, psFwSelectCntrsMemDesc);
+ }
+
+ PVR_DPF_RETURN_RC(eError);
+}
+/*
+ PVRSRVRGXDisableHWPerfcountersKM
+*/
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_BOOL bEnable,
+ IMG_UINT32 ui32ArrayLen,
+ IMG_UINT16 * psBlockIDs)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sKccbCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDeviceNode);
+ PVR_ASSERT(ui32ArrayLen>0);
+ PVR_ASSERT(ui32ArrayLen<=RGXFWIF_HWPERF_CTRL_BLKS_MAX);
+ PVR_ASSERT(psBlockIDs);
+
+ /* Fill in the command structure with the parameters needed
+ */
+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS;
+ sKccbCmd.uCmdData.sHWPerfCtrlBlks.bEnable = bEnable;
+ sKccbCmd.uCmdData.sHWPerfCtrlBlks.ui32NumBlocks = ui32ArrayLen;
+ OSDeviceMemCopy(sKccbCmd.uCmdData.sHWPerfCtrlBlks.aeBlockIDs, psBlockIDs, sizeof(IMG_UINT16)*ui32ArrayLen);
+
+ /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM parameters set, calling FW")); */
+
+ /* Ask the FW to carry out the HWPerf configuration command
+ */
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ PVR_LOGR_IF_ERROR(eError, "RGXScheduleCommand");
+
+ /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM command scheduled for FW")); */
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ PVR_LOGR_IF_ERROR(eError, "RGXWaitForFWOp");
+
+ /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM firmware completed")); */
+
+#if defined(DEBUG)
+ if (bEnable)
+ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen));
+ else
+ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen));
+#endif
+
+ PVR_DPF_RETURN_OK;
+}
+
+static INLINE IMG_UINT32 _RGXHWPerfFixBufferSize(IMG_UINT32 ui32BufSizeKB)
+{
+ if (ui32BufSizeKB > HWPERF_HOST_TL_STREAM_SIZE_MAX)
+ {
+ /* Size specified as a AppHint but it is too big */
+ PVR_DPF((PVR_DBG_WARNING,"RGXHWPerfHostInit: HWPerf Host buffer size "
+ "value (%u) too big, using maximum (%u)", ui32BufSizeKB,
+ HWPERF_HOST_TL_STREAM_SIZE_MAX));
+ return HWPERF_HOST_TL_STREAM_SIZE_MAX<<10;
+ }
+ else if (ui32BufSizeKB >= HWPERF_HOST_TL_STREAM_SIZE_MIN)
+ {
+ return ui32BufSizeKB<<10;
+ }
+ else if (ui32BufSizeKB > 0)
+ {
+ /* Size specified as a AppHint but it is too small */
+ PVR_DPF((PVR_DBG_WARNING,"RGXHWPerfHostInit: HWPerf Host buffer size "
+ "value (%u) too small, using minimum (%u)", ui32BufSizeKB,
+ HWPERF_HOST_TL_STREAM_SIZE_MIN));
+ return HWPERF_HOST_TL_STREAM_SIZE_MIN<<10;
+ }
+ else
+ {
+ /* 0 size implies AppHint not set or is set to zero,
+ * use default size from driver constant. */
+ return HWPERF_HOST_TL_STREAM_SIZE_DEFAULT<<10;
+ }
+}
+
+/******************************************************************************
+ * RGX HW Performance Host Stream API
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfHostInit
+
+@Description Called during driver init for initialisation of HWPerfHost
+ stream in the Rogue device driver. This function keeps allocated
+ only the minimal necessary resources, which are required for
+ functioning of HWPerf server module.
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfHostInit(IMG_UINT32 ui32BufSizeKB)
+{
+ PVRSRV_ERROR eError;
+ PVR_ASSERT(gpsRgxDevInfo != NULL);
+
+ eError = OSLockCreate(&gpsRgxDevInfo->hLockHWPerfHostStream, LOCK_TYPE_PASSIVE);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", error);
+
+ gpsRgxDevInfo->hHWPerfHostStream = NULL;
+ gpsRgxDevInfo->ui32HWPerfHostFilter = 0; /* disable all events */
+ gpsRgxDevInfo->ui32HWPerfHostNextOrdinal = 0;
+ gpsRgxDevInfo->ui32HWPerfHostBufSize = _RGXHWPerfFixBufferSize(ui32BufSizeKB);
+
+error:
+ return eError;
+}
+
+static void _HWPerfHostOnConnectCB(void *pvArg)
+{
+ (void) pvArg;
+
+ RGX_HWPERF_HOST_CLK_SYNC();
+}
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfHostInitOnDemandResources
+
+@Description This function allocates the HWPerfHost buffer if HWPerf is
+ enabled at driver load time. Otherwise, these buffers are
+ allocated on-demand as and when required.
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(void)
+{
+ PVRSRV_ERROR eError;
+
+ eError = TLStreamCreate(&gpsRgxDevInfo->hHWPerfHostStream,
+ PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, gpsRgxDevInfo->ui32HWPerfHostBufSize,
+ TL_FLAG_RESERVE_DROP_NEWER, _HWPerfHostOnConnectCB, NULL, NULL,
+ NULL);
+ PVR_LOGG_IF_ERROR(eError, "TLStreamCreate", error_stream_create);
+
+ PVR_DPF((DBGPRIV_MESSAGE, "HWPerf Host buffer size is %uKB",
+ gpsRgxDevInfo->ui32HWPerfHostBufSize));
+
+ return PVRSRV_OK;
+
+error_stream_create:
+ OSLockDestroy(gpsRgxDevInfo->hLockHWPerfHostStream);
+ gpsRgxDevInfo->hLockHWPerfHostStream = NULL;
+
+ return eError;
+}
+
+void RGXHWPerfHostDeInit(void)
+{
+ if (gpsRgxDevInfo && gpsRgxDevInfo->hHWPerfHostStream)
+ {
+ TLStreamClose(gpsRgxDevInfo->hHWPerfHostStream);
+ gpsRgxDevInfo->hHWPerfHostStream = NULL;
+ }
+
+ if (gpsRgxDevInfo && gpsRgxDevInfo->hLockHWPerfHostStream)
+ {
+ OSLockDestroy(gpsRgxDevInfo->hLockHWPerfHostStream);
+ gpsRgxDevInfo->hLockHWPerfHostStream = IMG_FALSE;
+ }
+
+ /* Clear global RGX device reference */
+ gpsRgxDevInfo = NULL;
+ gpsRgxDevNode = NULL;
+}
+
+void RGXHWPerfHostSetEventFilter(IMG_UINT32 ui32Filter)
+{
+ gpsRgxDevInfo->ui32HWPerfHostFilter = ui32Filter;
+}
+
+IMG_BOOL RGXHWPerfHostIsEventEnabled(RGX_HWPERF_HOST_EVENT_TYPE eEvent)
+{
+ return gpsRgxDevInfo != NULL &&
+ (gpsRgxDevInfo->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(eEvent));
+}
+
+static inline void _PostFunctionPrologue(void)
+{
+ PVR_ASSERT(gpsRgxDevInfo->hLockHWPerfHostStream != NULL);
+ PVR_ASSERT(gpsRgxDevInfo->hHWPerfHostStream != NULL);
+
+ OSLockAcquire(gpsRgxDevInfo->hLockHWPerfHostStream);
+
+ /* In case we drop packet we increment ordinal beforehand. */
+ gpsRgxDevInfo->ui32HWPerfHostNextOrdinal++;
+}
+
+static inline void _PostFunctionEpilogue(void)
+{
+ OSLockRelease(gpsRgxDevInfo->hLockHWPerfHostStream);
+}
+
+static inline IMG_UINT8 *_ReserveHWPerfStream(IMG_UINT32 ui32Size)
+{
+ IMG_UINT8 *pui8Dest;
+
+ PVRSRV_ERROR eError = TLStreamReserve(gpsRgxDevInfo->hHWPerfHostStream,
+ &pui8Dest, ui32Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not reserve space in %s buffer"
+ " (%d). Dropping packet.",
+ __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+ return NULL;
+ }
+ PVR_ASSERT(pui8Dest != NULL);
+
+ return pui8Dest;
+}
+
+static inline void _CommitHWPerfStream(IMG_UINT32 ui32Size)
+{
+ PVRSRV_ERROR eError = TLStreamCommit(gpsRgxDevInfo->hHWPerfHostStream,
+ ui32Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not commit data to %s"
+ " (%d)", __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+ }
+}
+
+static inline void _SetupHostPacketHeader(IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_HOST_EVENT_TYPE eEvType,
+ IMG_UINT32 ui32Size)
+{
+ RGX_HWPERF_V2_PACKET_HDR *psHeader = (RGX_HWPERF_V2_PACKET_HDR *) pui8Dest;
+
+ PVR_ASSERT(ui32Size<=RGX_HWPERF_MAX_PACKET_SIZE);
+
+ psHeader->ui32Ordinal = gpsRgxDevInfo->ui32HWPerfHostNextOrdinal;
+ psHeader->ui64Timestamp = RGXGPUFreqCalibrateClockus64();
+ psHeader->ui32Sig = HWPERF_PACKET_V2B_SIG;
+ psHeader->eTypeId = RGX_HWPERF_MAKE_TYPEID(RGX_HWPERF_STREAM_ID1_HOST,
+ eEvType, 0, 0);
+ psHeader->ui32Size = ui32Size;
+}
+
+static inline IMG_UINT32 _CalculateHostCtrlPacketSize(
+ RGX_HWPERF_HOST_CTRL_TYPE eCtrlType)
+{
+ RGX_HWPERF_HOST_CTRL_DATA *psData;
+ IMG_UINT32 ui32Size = sizeof(psData->eEvType);
+
+ switch (eCtrlType)
+ {
+ case RGX_HWPERF_CTRL_TYPE_CLIENT_STREAM_OPEN:
+ case RGX_HWPERF_CTRL_TYPE_CLIENT_STREAM_CLOSE:
+ ui32Size += sizeof(psData->uData.ui32Pid);
+ break;
+ default:
+ // unknown type - this should never happen
+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostCtrlEvent: Invalid alloc"
+ " event type"));
+ PVR_ASSERT(IMG_FALSE);
+ break;
+ }
+
+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostCtrlPacketData(IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_HOST_CTRL_TYPE eEvType,
+ IMG_UINT32 ui32Pid)
+{
+ RGX_HWPERF_HOST_CTRL_DATA *psData = (RGX_HWPERF_HOST_CTRL_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+ psData->eEvType = eEvType;
+ psData->uData.ui32Pid = ui32Pid;
+}
+
+void RGXHWPerfHostPostCtrlEvent(RGX_HWPERF_HOST_CTRL_TYPE eEvType,
+ IMG_UINT32 ui32Pid)
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT32 ui32Size = _CalculateHostCtrlPacketSize(eEvType);
+
+ _PostFunctionPrologue();
+
+ if ((pui8Dest = _ReserveHWPerfStream(ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+
+ _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_CTRL, ui32Size);
+ _SetupHostCtrlPacketData(pui8Dest, eEvType, ui32Pid);
+
+ _CommitHWPerfStream(ui32Size);
+
+cleanup:
+ _PostFunctionEpilogue();
+}
+
+static inline void _SetupHostEnqPacketData(IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_KICK_TYPE eEnqType,
+ IMG_UINT32 ui32Pid,
+ IMG_UINT32 ui32FWDMContext,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32IntJobRef)
+{
+ RGX_HWPERF_HOST_ENQ_DATA *psData = (RGX_HWPERF_HOST_ENQ_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+ psData->ui32EnqType = eEnqType;
+ psData->ui32PID = ui32Pid;
+ psData->ui32ExtJobRef = ui32ExtJobRef;
+ psData->ui32IntJobRef = ui32IntJobRef;
+ psData->ui32DMContext = ui32FWDMContext;
+ psData->ui32Padding = 0; /* Set to zero for future compatibility */
+}
+
+void RGXHWPerfHostPostEnqEvent(RGX_HWPERF_KICK_TYPE eEnqType,
+ IMG_UINT32 ui32Pid,
+ IMG_UINT32 ui32FWDMContext,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32IntJobRef)
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_ENQ_DATA);
+
+ _PostFunctionPrologue();
+
+ if ((pui8Dest = _ReserveHWPerfStream(ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+
+ _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size);
+ _SetupHostEnqPacketData(pui8Dest, eEnqType, ui32Pid, ui32FWDMContext,
+ ui32ExtJobRef, ui32IntJobRef);
+
+ _CommitHWPerfStream(ui32Size);
+
+cleanup:
+ _PostFunctionEpilogue();
+}
+
+static inline IMG_UINT32 _CalculateHostUfoPacketSize(RGX_HWPERF_UFO_EV eUfoType,
+ IMG_UINT uiNoOfUFOs)
+{
+ IMG_UINT32 ui32Size =
+ (IMG_UINT32) offsetof(RGX_HWPERF_UFO_DATA, aui32StreamData);
+ RGX_HWPERF_UFO_DATA_ELEMENT *puData;
+
+ switch (eUfoType)
+ {
+ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+ ui32Size += uiNoOfUFOs * sizeof(puData->sCheckSuccess);
+ break;
+ case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+ ui32Size += uiNoOfUFOs * sizeof(puData->sCheckFail);
+ break;
+ case RGX_HWPERF_UFO_EV_UPDATE:
+ ui32Size += uiNoOfUFOs * sizeof(puData->sUpdate);
+ break;
+ default:
+ // unknown type - this should never happen
+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO"
+ " event type"));
+ PVR_ASSERT(IMG_FALSE);
+ break;
+ }
+
+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostUfoPacketData(IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_UFO_EV eUfoType,
+ RGX_HWPERF_UFO_DATA_ELEMENT psUFOData[],
+ IMG_UINT uiNoOfUFOs)
+{
+ IMG_UINT uiUFOIdx;
+ RGX_HWPERF_HOST_UFO_DATA *psData = (RGX_HWPERF_HOST_UFO_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+ RGX_HWPERF_UFO_DATA_ELEMENT *puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+ psData->aui32StreamData;
+
+ psData->eEvType = eUfoType;
+ psData->ui32StreamInfo = RGX_HWPERF_MAKE_UFOPKTINFO(uiNoOfUFOs,
+ offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData));
+
+ switch (eUfoType)
+ {
+ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+ for (uiUFOIdx = 0; uiUFOIdx < uiNoOfUFOs; uiUFOIdx++)
+ {
+ puData->sCheckSuccess.ui32FWAddr =
+ psUFOData[uiUFOIdx].sCheckSuccess.ui32FWAddr;
+ puData->sCheckSuccess.ui32Value =
+ psUFOData[uiUFOIdx].sCheckSuccess.ui32Value;
+
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+ (((IMG_BYTE *) puData) + sizeof(puData->sCheckSuccess));
+ }
+ break;
+ case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+ for (uiUFOIdx = 0; uiUFOIdx < uiNoOfUFOs; uiUFOIdx++)
+ {
+ puData->sCheckFail.ui32FWAddr =
+ psUFOData[uiUFOIdx].sCheckFail.ui32FWAddr;
+ puData->sCheckFail.ui32Value =
+ psUFOData[uiUFOIdx].sCheckFail.ui32Value;
+ puData->sCheckFail.ui32Required =
+ psUFOData[uiUFOIdx].sCheckFail.ui32Required;
+
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+ (((IMG_BYTE *) puData) + sizeof(puData->sCheckFail));
+ }
+ break;
+ case RGX_HWPERF_UFO_EV_UPDATE:
+ for (uiUFOIdx = 0; uiUFOIdx < uiNoOfUFOs; uiUFOIdx++)
+ {
+ puData->sUpdate.ui32FWAddr =
+ psUFOData[uiUFOIdx].sUpdate.ui32FWAddr;
+ puData->sUpdate.ui32OldValue =
+ psUFOData[uiUFOIdx].sUpdate.ui32OldValue;
+ puData->sUpdate.ui32NewValue =
+ psUFOData[uiUFOIdx].sUpdate.ui32NewValue;
+
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+ (((IMG_BYTE *) puData) + sizeof(puData->sUpdate));
+ }
+ break;
+ default:
+ // unknown type - this should never happen
+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO"
+ " event type"));
+ PVR_ASSERT(IMG_FALSE);
+ break;
+ }
+}
+
+void RGXHWPerfHostPostUfoEvent(RGX_HWPERF_UFO_EV eUfoType,
+ RGX_HWPERF_UFO_DATA_ELEMENT psUFOData[],
+ IMG_UINT uiNoOfUFOs)
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT32 ui32Size = _CalculateHostUfoPacketSize(eUfoType, uiNoOfUFOs);
+
+ _PostFunctionPrologue();
+
+ if ((pui8Dest = _ReserveHWPerfStream(ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+
+ _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size);
+ _SetupHostUfoPacketData(pui8Dest, eUfoType, psUFOData, uiNoOfUFOs);
+
+ _CommitHWPerfStream(ui32Size);
+
+cleanup:
+ _PostFunctionEpilogue();
+}
+
+static inline IMG_UINT32 _FixNameSizeAndCalculateHostAllocPacketSize(
+ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+ const IMG_CHAR *psName,
+ IMG_UINT32 *ui32NameSize)
+{
+ RGX_HWPERF_HOST_ALLOC_DATA *psData;
+ RGX_HWPERF_HOST_ALLOC_DETAIL *puData;
+ IMG_UINT32 ui32Size = sizeof(psData->ui32AllocType);
+
+ /* first strip the terminator */
+ if (psName[*ui32NameSize - 1] == '\0')
+ *ui32NameSize -= 1;
+ /* if string longer than maximum cut it (leave space for '\0') */
+ if (*ui32NameSize >= SYNC_MAX_CLASS_NAME_LEN)
+ *ui32NameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+
+ switch (eAllocType)
+ {
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+ ui32Size += sizeof(puData->sSyncAlloc) - SYNC_MAX_CLASS_NAME_LEN +
+ *ui32NameSize + 1; /* +1 for '\0' */
+ break;
+ default:
+ // unknown type - this should never happen
+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostAllocEvent: Invalid alloc"
+ " event type"));
+ PVR_ASSERT(IMG_FALSE);
+ break;
+ }
+
+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+ IMG_UINT32 ui32FWAddr,
+ const IMG_CHAR *psName,
+ IMG_UINT32 ui32NameSize)
+{
+ RGX_HWPERF_HOST_ALLOC_DATA *psData = (RGX_HWPERF_HOST_ALLOC_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+ psData->ui32AllocType = eAllocType;
+ psData->uAllocDetail.sSyncAlloc.ui32FWAddr = ui32FWAddr;
+ OSStringNCopy(psData->uAllocDetail.sSyncAlloc.acName, psName,
+ ui32NameSize);
+ /* we know here that string is not null terminated and that we have enough
+ * space for the terminator */
+ psData->uAllocDetail.sSyncAlloc.acName[ui32NameSize] = '\0';
+}
+
+void RGXHWPerfHostPostAllocEvent(RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+ IMG_UINT32 ui32FWAddr,
+ const IMG_CHAR *psName,
+ IMG_UINT32 ui32NameSize)
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT32 ui32Size =
+ _FixNameSizeAndCalculateHostAllocPacketSize(eAllocType, psName,
+ &ui32NameSize);
+
+ _PostFunctionPrologue();
+
+ if ((pui8Dest = _ReserveHWPerfStream(ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+
+ _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size);
+ _SetupHostAllocPacketData(pui8Dest, eAllocType, ui32FWAddr, psName,
+ ui32NameSize);
+
+ _CommitHWPerfStream(ui32Size);
+
+cleanup:
+ _PostFunctionEpilogue();
+}
+
+static inline void _SetupHostFreePacketData(IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+ IMG_UINT32 ui32FWAddr)
+{
+ RGX_HWPERF_HOST_FREE_DATA *psData = (RGX_HWPERF_HOST_FREE_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+ psData->ui32FreeType = eFreeType;
+ psData->uFreeDetail.sSyncFree.ui32FWAddr = ui32FWAddr;
+}
+
+void RGXHWPerfHostPostFreeEvent(RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+ IMG_UINT32 ui32FWAddr)
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_FREE_DATA);
+
+ _PostFunctionPrologue();
+
+ if ((pui8Dest = _ReserveHWPerfStream(ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+
+ _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size);
+ _SetupHostFreePacketData(pui8Dest, eFreeType, ui32FWAddr);
+
+ _CommitHWPerfStream(ui32Size);
+
+cleanup:
+ _PostFunctionEpilogue();
+}
+
+static inline void _SetupHostClkSyncPacketData(IMG_UINT8 *pui8Dest)
+{
+ RGX_HWPERF_HOST_CLK_SYNC_DATA *psData = (RGX_HWPERF_HOST_CLK_SYNC_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = gpsRgxDevInfo->psRGXFWIfGpuUtilFWCb;
+ IMG_UINT32 ui32CurrIdx =
+ RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount);
+ RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32CurrIdx];
+
+ psData->ui64CRTimestamp = psTimeCorr->ui64CRTimeStamp;
+ psData->ui64OSTimestamp = psTimeCorr->ui64OSTimeStamp;
+ psData->ui32ClockSpeed = psTimeCorr->ui32CoreClockSpeed;
+}
+
+void RGXHWPerfHostPostClkSyncEvent(void)
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT32 ui32Size =
+ RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_CLK_SYNC_DATA);
+
+ _PostFunctionPrologue();
+
+ if ((pui8Dest = _ReserveHWPerfStream(ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+
+ _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size);
+ _SetupHostClkSyncPacketData(pui8Dest);
+
+ _CommitHWPerfStream(ui32Size);
+
+cleanup:
+ _PostFunctionEpilogue();
+}
+
+/******************************************************************************
+ * SUPPORT_GPUTRACE_EVENTS
+ *
+ * Currently only implemented on Linux and Android. Feature can be enabled on
+ * Android builds but can also be enabled on Linux builds for testing
+ * but requires the gpu.h FTrace event header file to be present.
+ *****************************************************************************/
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+static void RGXHWPerfFTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE);
+
+typedef struct RGX_HWPERF_FTRACE_DATA {
+ /* This lock ensures the HWPerf TL stream reading resources are not destroyed
+ * by one thread disabling it while another is reading from it in. Keeps the
+ * state and resource create/destroy atomic and consistent. */
+ POS_LOCK hFTraceLock;
+ IMG_HANDLE hGPUTraceCmdCompleteHandle;
+ IMG_HANDLE hGPUTraceTLStream;
+ IMG_UINT64 ui64LastSampledTimeCorrOSTimeStamp;
+ IMG_UINT32 ui32FTraceLastOrdinal;
+ /* This lock ensures that the reference counting operation on the FTrace UFO
+ * events and enable/disable operation on firmware event are performed as
+ * one atomic operation. This should ensure that there are no race conditions
+ * between reference counting and firmware event state change.
+ * See below comment for g_uiUfoEventRef.
+ */
+ POS_LOCK hLockFTraceEventLock;
+ /* Multiple FTrace UFO events are reflected in the firmware as only one event. When
+ * we enable FTrace UFO event we want to also at the same time enable it in
+ * the firmware. Since there is a multiple-to-one relation between those events
+ * we count how many FTrace UFO events is enabled. If at least one event is
+ * enabled we enabled the firmware event. When all FTrace UFO events are disabled
+ * we disable firmware event. */
+ IMG_UINT uiUfoEventRef;
+ /* Saved value of the clock source before the trace was enabled. We're keeping
+ * it here so that know we which clock should be selected after we disable the
+ * gpu ftrace. */
+ IMG_UINT64 ui64LastTimeCorrClock;
+} RGX_HWPERF_FTRACE_DATA;
+
+/* Caller must now hold hFTraceLock before calling this method.
+ */
+static PVRSRV_ERROR RGXHWPerfFTraceGPUEnable(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(gpsRgxDevNode && gpsRgxDevInfo);
+
+ psFtraceData = gpsRgxDevInfo->pvGpuFtraceData;
+
+ PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceLock));
+
+ /* In the case where the AppHint has not been set we need to
+ * initialise the host driver HWPerf resources here. Allocated on
+ * demand to reduce RAM foot print on systems not needing HWPerf.
+ * Signal FW to enable event generation.
+ */
+ if (gpsRgxDevInfo->bFirmwareInitialised)
+ {
+ IMG_UINT64 ui64UFOFilter = RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO) &
+ gpsRgxDevInfo->ui64HWPerfFilter;
+
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, gpsRgxDevNode, IMG_FALSE,
+ RGX_HWPERF_STREAM_ID0_FW,
+ RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
+ ui64UFOFilter);
+ PVR_LOGG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM", err_out);
+ }
+ else
+ {
+ /* only set filter and exit */
+ gpsRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
+ (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO) &
+ gpsRgxDevInfo->ui64HWPerfFilter);
+ PVRGpuTraceSetPreEnabled(IMG_TRUE);
+
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%llx)",
+ (long long) gpsRgxDevInfo->ui64HWPerfFilter));
+
+ return PVRSRV_OK;
+ }
+
+ /* Open the TL Stream for HWPerf data consumption */
+ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+ PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+ PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+ &psFtraceData->hGPUTraceTLStream);
+ PVR_LOGG_IF_ERROR(eError, "TLClientOpenStream", err_out);
+
+ /* Set clock source for timer correlation data to sched_clock */
+ psFtraceData->ui64LastTimeCorrClock = RGXGPUFreqCalibrateGetClockSource();
+ RGXGPUFreqCalibrateSetClockSource(gpsRgxDevNode, RGXTIMECORR_CLOCK_SCHED);
+
+ /* Reset the OS timestamp coming from the timer correlation data
+ * associated with the latest HWPerf event we processed.
+ */
+ psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = 0;
+
+ PVRGpuTraceSetEnabled(IMG_TRUE);
+
+ /* Register a notifier to collect HWPerf data whenever the HW completes
+ * an operation.
+ */
+ eError = PVRSRVRegisterCmdCompleteNotify(
+ &psFtraceData->hGPUTraceCmdCompleteHandle,
+ &RGXHWPerfFTraceCmdCompleteNotify,
+ gpsRgxDevInfo);
+ PVR_LOGG_IF_ERROR(eError, "PVRSRVRegisterCmdCompleteNotify", err_close_stream);
+
+err_out:
+ PVR_DPF_RETURN_RC(eError);
+
+err_close_stream:
+ PVRGpuTraceSetEnabled(IMG_FALSE);
+
+ TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+ psFtraceData->hGPUTraceTLStream);
+ psFtraceData->hGPUTraceTLStream = 0;
+ goto err_out;
+}
+
+/* Caller must now hold hFTraceLock before calling this method.
+ */
+static PVRSRV_ERROR RGXHWPerfFTraceGPUDisable(IMG_BOOL bDeInit)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(gpsRgxDevNode && gpsRgxDevInfo);
+
+ psFtraceData = gpsRgxDevInfo->pvGpuFtraceData;
+
+ PVRGpuTraceSetEnabled(IMG_FALSE);
+ PVRGpuTraceSetPreEnabled(IMG_FALSE);
+
+ if (!bDeInit)
+ {
+ PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceLock));
+
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, gpsRgxDevNode, RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE, (RGX_HWPERF_EVENT_MASK_NONE));
+ PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+ }
+
+ if (psFtraceData->hGPUTraceCmdCompleteHandle)
+ {
+ /* Tracing is being turned off. Unregister the notifier. */
+ eError = PVRSRVUnregisterCmdCompleteNotify(
+ psFtraceData->hGPUTraceCmdCompleteHandle);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVUnregisterCmdCompleteNotify");
+ psFtraceData->hGPUTraceCmdCompleteHandle = NULL;
+ }
+
+ if (psFtraceData->hGPUTraceTLStream)
+ {
+ IMG_PBYTE pbTmp = NULL;
+ IMG_UINT32 ui32Tmp = 0;
+
+ /* We have to flush both the L1 (FW) and L2 (Host) buffers in case there
+ * are some events left unprocessed in this FTrace/systrace "session"
+ * (note that even if we have just disabled HWPerf on the FW some packets
+ * could have been generated and already copied to L2 by the MISR handler).
+ *
+ * With the following calls we will both copy new data to the Host buffer
+ * (done by the producer callback in TLClientAcquireData) and advance
+ * the read offset in the buffer to catch up with the latest events.
+ */
+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+ psFtraceData->hGPUTraceTLStream,
+ &pbTmp, &ui32Tmp);
+ PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+ /* Let close stream perform the release data on the outstanding acquired data */
+ eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+ psFtraceData->hGPUTraceTLStream);
+ PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+ psFtraceData->hGPUTraceTLStream = NULL;
+ }
+
+ if (psFtraceData->ui64LastTimeCorrClock != RGXTIMECORR_CLOCK_SCHED)
+ {
+ RGXGPUFreqCalibrateSetClockSource(gpsRgxDevNode,
+ psFtraceData->ui64LastTimeCorrClock);
+ }
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUEventsEnabledSet(IMG_BOOL bNewValue)
+{
+ IMG_BOOL bOldValue;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(gpsRgxDevNode && gpsRgxDevInfo);
+
+ psFtraceData = gpsRgxDevInfo->pvGpuFtraceData;
+
+ /* About to create/destroy FTrace resources, lock critical section
+ * to avoid HWPerf MISR thread contention.
+ */
+ OSLockAcquire(psFtraceData->hFTraceLock);
+
+ bOldValue = PVRGpuTraceEnabled();
+
+ if (bOldValue != bNewValue)
+ {
+ if (bNewValue)
+ {
+ eError = RGXHWPerfFTraceGPUEnable();
+ }
+ else
+ {
+ eError = RGXHWPerfFTraceGPUDisable(IMG_FALSE);
+ }
+ }
+
+ OSLockRelease(psFtraceData->hFTraceLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR PVRGpuTraceEnabledSet(IMG_BOOL bNewValue)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* This entry point from DebugFS must take the global
+ * bridge lock at this outer level of the stack before calling
+ * into the RGX part of the driver which can lead to RGX
+ * device data changes and communication with the FW which
+ * all requires the bridge lock.
+ */
+ OSAcquireBridgeLock();
+ eError = RGXHWPerfFTraceGPUEventsEnabledSet(bNewValue);
+ OSReleaseBridgeLock();
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+/* Calculate the OS timestamp given an RGX timestamp in the HWPerf event. */
+static uint64_t
+CalculateEventTimestamp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ uint32_t ui32TimeCorrIndex,
+ uint64_t ui64EventTimestamp)
+{
+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData;
+ RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32TimeCorrIndex];
+ uint64_t ui64CRTimeStamp = psTimeCorr->ui64CRTimeStamp;
+ uint64_t ui64OSTimeStamp = psTimeCorr->ui64OSTimeStamp;
+ uint32_t ui32CRDeltaToOSDeltaKNs = psTimeCorr->ui32CRDeltaToOSDeltaKNs;
+ uint64_t ui64EventOSTimestamp, deltaRgxTimer, delta_ns;
+
+ if (psFtraceData->ui64LastSampledTimeCorrOSTimeStamp > ui64OSTimeStamp)
+ {
+ /* The previous packet had a time reference (time correlation data) more
+ * recent than the one in the current packet, it means the timer
+ * correlation array wrapped too quickly (buffer too small) and in the
+ * previous call to RGXHWPerfFTraceGPUUfoEvent we read one of the
+ * newest timer correlations rather than one of the oldest ones.
+ */
+ PVR_DPF((PVR_DBG_ERROR, "%s: The timestamps computed so far could be "
+ "wrong! The time correlation array size should be increased "
+ "to avoid this.", __func__));
+ }
+
+ psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = ui64OSTimeStamp;
+
+ /* RGX CR timer ticks delta */
+ deltaRgxTimer = ui64EventTimestamp - ui64CRTimeStamp;
+ /* RGX time delta in nanoseconds */
+ delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui32CRDeltaToOSDeltaKNs);
+ /* Calculate OS time of HWPerf event */
+ ui64EventOSTimestamp = ui64OSTimeStamp + delta_ns;
+
+ PVR_DPF((PVR_DBG_VERBOSE, "%s: psCurrentDvfs RGX %llu, OS %llu, DVFSCLK %u",
+ __func__, ui64CRTimeStamp, ui64OSTimeStamp,
+ psTimeCorr->ui32CoreClockSpeed));
+
+ return ui64EventOSTimestamp;
+}
+
+void RGXHWPerfFTraceGPUEnqueueEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32CtxId, IMG_UINT32 ui32JobId,
+ RGX_HWPERF_KICK_TYPE eKickType)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUEnqueueEvent: ui32CtxId %u, "
+ "ui32JobId %u", ui32CtxId, ui32JobId));
+
+ PVRGpuTraceClientWork(ui32CtxId, ui32JobId,
+ RGXHWPerfKickTypeToStr(eKickType));
+
+ PVR_DPF_RETURN;
+}
+
+
+static void RGXHWPerfFTraceGPUSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+ IMG_UINT64 ui64Timestamp;
+ RGX_HWPERF_HW_DATA_FIELDS* psHWPerfPktData;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psHWPerfPkt);
+ PVR_ASSERT(pszWorkName);
+
+ psHWPerfPktData = (RGX_HWPERF_HW_DATA_FIELDS*) RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+ psHWPerfPkt->ui64Timestamp);
+
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUSwitchEvent: %s ui32ExtJobRef=%d, ui32IntJobRef=%d, eSwType=%d",
+ pszWorkName, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32IntJobRef, eSwType));
+
+ PVRGpuTraceWorkSwitch(ui64Timestamp, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32CtxPriority,
+ psHWPerfPktData->ui32IntJobRef, pszWorkName, eSwType);
+
+ PVR_DPF_RETURN;
+}
+
+static void RGXHWPerfFTraceGPUUfoEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt)
+{
+ IMG_UINT64 ui64Timestamp;
+ RGX_HWPERF_UFO_DATA *psHWPerfPktData;
+ IMG_UINT32 ui32UFOCount;
+ RGX_HWPERF_UFO_DATA_ELEMENT *puData;
+
+ psHWPerfPktData = (RGX_HWPERF_UFO_DATA *)
+ RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+ ui32UFOCount = RGX_HWPERF_GET_UFO_STREAMSIZE(psHWPerfPktData->ui32StreamInfo);
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) psHWPerfPktData)
+ + RGX_HWPERF_GET_UFO_STREAMOFFSET(psHWPerfPktData->ui32StreamInfo));
+
+ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+ psHWPerfPkt->ui64Timestamp);
+
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUUfoEvent: ui32ExtJobRef=%d, "
+ "ui32IntJobRef=%d", psHWPerfPktData->ui32ExtJobRef,
+ psHWPerfPktData->ui32IntJobRef));
+
+ PVRGpuTraceUfo(ui64Timestamp, psHWPerfPktData->eEvType,
+ psHWPerfPktData->ui32ExtJobRef, psHWPerfPktData->ui32DMContext,
+ psHWPerfPktData->ui32IntJobRef, ui32UFOCount, puData);
+}
+
+static void RGXHWPerfFTraceGPUFirmwareEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType)
+
+{
+ uint64_t ui64Timestamp;
+ RGX_HWPERF_FW_DATA *psHWPerfPktData = (RGX_HWPERF_FW_DATA *)
+ RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+ psHWPerfPkt->ui64Timestamp);
+
+ PVRGpuTraceFirmware(ui64Timestamp, pszWorkName, eSwType);
+}
+
+static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt)
+{
+ RGX_HWPERF_EVENT_TYPE eType;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData;
+ IMG_UINT32 ui32HwEventTypeIndex;
+ static const struct {
+ IMG_CHAR* pszName;
+ PVR_GPUTRACE_SWITCH_TYPE eSwType;
+ } aszHwEventTypeMap[] = {
+ { /* RGX_HWPERF_FW_BGSTART */ "BG", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_FW_BGEND */ "BG", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_FW_IRQSTART */ "IRQ", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_FW_IRQEND */ "IRQ", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_FW_DBGSTART */ "DBG", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_FW_DBGEND */ "DBG", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_PMOOM_TAPAUSE */ "PMOOM_TAPAUSE", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_TAKICK */ "TA", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_TAFINISHED */ "TA", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_3DTQKICK */ "TQ3D", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_3DKICK */ "3D", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_3DFINISHED */ "3D", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_CDMKICK */ "CDM", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_CDMFINISHED */ "CDM", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_TLAKICK */ "TQ2D", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_TLAFINISHED */ "TQ2D", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_3DSPMKICK */ "3DSPM", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_PERIODIC */ NULL, 0 }, /* PERIODIC not supported */
+ { /* RGX_HWPERF_HW_RTUKICK */ "RTU", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_RTUFINISHED */ "RTU", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_SHGKICK */ "SHG", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_SHGFINISHED */ "SHG", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_3DTQFINISHED */ "TQ3D", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_3DSPMFINISHED */ "3DSPM", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_PMOOM_TARESUME */ "PMOOM_TARESUME", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_TDMKICK */ "TDM", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_TDMFINISHED */ "TDM", PVR_GPUTRACE_SWITCH_TYPE_END },
+ };
+ static_assert(RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE == RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE + 1,
+ "FW and HW events are not contiguous in RGX_HWPERF_EVENT_TYPE");
+
+ PVR_ASSERT(psHWPerfPkt);
+ eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt);
+
+ if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1)
+ {
+ RGX_HWPERF_STREAM_ID eStreamId = RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt);
+ PVRGpuTraceEventsLost(eStreamId,
+ psFtraceData->ui32FTraceLastOrdinal,
+ psHWPerfPkt->ui32Ordinal);
+ PVR_DPF((PVR_DBG_ERROR, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)",
+ eStreamId, psFtraceData->ui32FTraceLastOrdinal, psHWPerfPkt->ui32Ordinal));
+ }
+
+ psFtraceData->ui32FTraceLastOrdinal = psHWPerfPkt->ui32Ordinal;
+
+ /* Process UFO packets */
+ if (eType == RGX_HWPERF_UFO)
+ {
+ RGXHWPerfFTraceGPUUfoEvent(psDevInfo, psHWPerfPkt);
+ return IMG_TRUE;
+ }
+
+ if (eType <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE)
+ {
+ /* this ID belongs to range 0, so index directly in range 0 */
+ ui32HwEventTypeIndex = eType - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+ }
+ else
+ {
+ /* this ID belongs to range 1, so first index in range 1 and skip number of slots used up for range 0 */
+ ui32HwEventTypeIndex = (eType - RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE) +
+ (RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE + 1);
+ }
+
+ if (ui32HwEventTypeIndex >= IMG_ARR_NUM_ELEMS(aszHwEventTypeMap))
+ goto err_unsupported;
+
+ if (aszHwEventTypeMap[ui32HwEventTypeIndex].pszName == NULL)
+ {
+ /* Not supported map entry, ignore event */
+ goto err_unsupported;
+ }
+
+ if (HWPERF_PACKET_IS_HW_TYPE(eType))
+ {
+ RGXHWPerfFTraceGPUSwitchEvent(psDevInfo, psHWPerfPkt,
+ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+ aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType);
+ }
+ else if (HWPERF_PACKET_IS_FW_TYPE(eType))
+ {
+ RGXHWPerfFTraceGPUFirmwareEvent(psDevInfo, psHWPerfPkt,
+ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+ aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType);
+ }
+ else
+ {
+ goto err_unsupported;
+ }
+
+ return IMG_TRUE;
+
+err_unsupported:
+ PVR_DPF((PVR_DBG_VERBOSE, "%s: Unsupported event type %d", __func__, eType));
+ return IMG_FALSE;
+}
+
+
+static void RGXHWPerfFTraceGPUProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_PBYTE pBuffer, IMG_UINT32 ui32ReadLen)
+{
+ IMG_UINT32 ui32TlPackets = 0;
+ IMG_UINT32 ui32HWPerfPackets = 0;
+ IMG_UINT32 ui32HWPerfPacketsSent = 0;
+ IMG_PBYTE pBufferEnd;
+ PVRSRVTL_PPACKETHDR psHDRptr;
+ PVRSRVTL_PACKETTYPE ui16TlType;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDevInfo);
+ PVR_ASSERT(pBuffer);
+ PVR_ASSERT(ui32ReadLen);
+
+ /* Process the TL Packets
+ */
+ pBufferEnd = pBuffer+ui32ReadLen;
+ psHDRptr = GET_PACKET_HDR(pBuffer);
+ while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd )
+ {
+ ui16TlType = GET_PACKET_TYPE(psHDRptr);
+ if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+ {
+ IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+ if (0 == ui16DataLen)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfFTraceGPUProcessPackets: ZERO Data in TL data packet: %p", psHDRptr));
+ }
+ else
+ {
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt;
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfEnd;
+
+ /* Check for lost hwperf data packets */
+ psHWPerfEnd = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)+ui16DataLen);
+ psHWPerfPkt = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr));
+ do
+ {
+ if (ValidAndEmitFTraceEvent(psDevInfo, psHWPerfPkt))
+ {
+ ui32HWPerfPacketsSent++;
+ }
+ ui32HWPerfPackets++;
+ psHWPerfPkt = RGX_HWPERF_GET_NEXT_PACKET(psHWPerfPkt);
+ }
+ while (psHWPerfPkt < psHWPerfEnd);
+ }
+ }
+ else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfFTraceGPUProcessPackets: Indication that the transport buffer was full"));
+ }
+ else
+ {
+ /* else Ignore padding packet type and others */
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfFTraceGPUProcessPackets: Ignoring TL packet, type %d", ui16TlType ));
+ }
+
+ psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+ ui32TlPackets++;
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUProcessPackets: TL "
+ "Packets processed %03d, HWPerf packets %03d, sent %03d",
+ ui32TlPackets, ui32HWPerfPackets, ui32HWPerfPacketsSent));
+
+ PVR_DPF_RETURN;
+}
+
+
+static
+void RGXHWPerfFTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_RGXDEV_INFO* psDeviceInfo = hCmdCompHandle;
+ RGX_HWPERF_FTRACE_DATA* psFtraceData;
+ PVRSRV_ERROR eError;
+ IMG_PBYTE pBuffer;
+ IMG_UINT32 ui32ReadLen;
+
+ PVR_DPF_ENTERED;
+
+ /* Exit if no HWPerf enabled device exits */
+ PVR_ASSERT(psDeviceInfo != NULL &&
+ psPVRSRVData != NULL &&
+ gpsRgxDevInfo != NULL);
+
+ psFtraceData = gpsRgxDevInfo->pvGpuFtraceData;
+
+ /* Command-complete notifiers can run concurrently. If this is
+ * happening, just bail out and let the previous call finish.
+ * This is ok because we can process the queued packets on the next call.
+ */
+ if (!PVRGpuTraceEnabled() || !(OSTryLockAcquire(psFtraceData->hFTraceLock)))
+ {
+ PVR_DPF_RETURN;
+ }
+
+ /* PVRGpuTraceSetEnabled() and hGPUTraceTLStream are now
+ * called / atomically set inside the hFTraceLock so just assert here.
+ */
+ PVR_ASSERT(psFtraceData->hGPUTraceTLStream);
+
+ /* If we have a valid stream attempt to acquire some data */
+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream, &pBuffer, &ui32ReadLen);
+ if (eError == PVRSRV_OK)
+ {
+ /* Process the HWPerf packets and release the data */
+ if (ui32ReadLen > 0)
+ {
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUThread: DATA AVAILABLE offset=%p, length=%d", pBuffer, ui32ReadLen));
+
+ /* Process the transport layer data for HWPerf packets... */
+ RGXHWPerfFTraceGPUProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen);
+
+ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "TLClientReleaseData");
+
+ /* Serious error, disable FTrace GPU events */
+
+ /* Release TraceLock so we always have the locking
+ * order BridgeLock->TraceLock to prevent AB-BA deadlocks*/
+ OSLockRelease(psFtraceData->hFTraceLock);
+ OSAcquireBridgeLock();
+ OSLockAcquire(psFtraceData->hFTraceLock);
+ RGXHWPerfFTraceGPUDisable(IMG_FALSE);
+ OSLockRelease(psFtraceData->hFTraceLock);
+ OSReleaseBridgeLock();
+ goto out;
+
+ }
+ } /* else no data, ignore */
+ }
+ else if (eError != PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_LOG_ERROR(eError, "TLClientAcquireData");
+ }
+
+ OSLockRelease(psFtraceData->hFTraceLock);
+out:
+ PVR_DPF_RETURN;
+}
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_HWPERF_FTRACE_DATA *psData = OSAllocMem(sizeof(RGX_HWPERF_FTRACE_DATA));
+ if (psData == NULL)
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+
+ /* We initialise it only once because we want to track if any
+ * packets were dropped. */
+ psData->ui32FTraceLastOrdinal = IMG_UINT32_MAX - 1;
+
+ eError = OSLockCreate(&psData->hFTraceLock, LOCK_TYPE_DISPATCH);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+ eError = OSLockCreate(&psData->hLockFTraceEventLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e1);
+
+ psData->uiUfoEventRef = 0;
+
+ psDevInfo->pvGpuFtraceData = psData;
+
+ return PVRSRV_OK;
+
+e1:
+ OSLockDestroy(psData->hFTraceLock);
+e0:
+ OSFreeMem(psData);
+
+ return eError;
+}
+
+void RGXHWPerfFTraceGPUDeInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_HWPERF_FTRACE_DATA *psData = psDevInfo->pvGpuFtraceData;
+
+ OSLockDestroy(psData->hFTraceLock);
+ OSLockDestroy(psData->hLockFTraceEventLock);
+ OSFreeMem(psDevInfo->pvGpuFtraceData);
+}
+
+void PVRGpuTraceEnableUfoCallback(void)
+{
+ PVRSRV_ERROR eError;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData = gpsRgxDevInfo->pvGpuFtraceData;
+
+ OSLockAcquire(psFtraceData->hLockFTraceEventLock);
+
+ if (psFtraceData->uiUfoEventRef++ == 0)
+ {
+ IMG_UINT64 ui64Filter = RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO) |
+ gpsRgxDevInfo->ui64HWPerfFilter;
+ /* Small chance exists that ui64HWPerfFilter can be changed here and
+ * the newest filter value will be changed to the old one + UFO event.
+ * This is not a critical problem. */
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, gpsRgxDevNode,
+ RGX_HWPERF_STREAM_ID0_FW,
+ IMG_FALSE, ui64Filter);
+ if (eError == PVRSRV_ERROR_NOT_INITIALISED)
+ {
+ /* If we land here that means that the FW is not initialised yet.
+ * We stored the filter and it will be passed to the firmware
+ * during it's initialisation phase. So ignore. */
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf event."));
+ }
+ }
+
+ OSLockRelease(psFtraceData->hLockFTraceEventLock);
+}
+
+void PVRGpuTraceDisableUfoCallback(void)
+{
+ PVRSRV_ERROR eError;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData;
+
+ /* We have to check if lock is valid because on driver unload
+ * RGXHWPerfFTraceGPUDeInit is called before kernel disables the ftrace
+ * events. This means that the lock will be destroyed before this callback
+ * is called.
+ * We can safely return if that situation happens because driver will be
+ * unloaded so we don't care about HWPerf state anymore. */
+ if (gpsRgxDevInfo == NULL || gpsRgxDevInfo->pvGpuFtraceData == NULL)
+ return;
+
+ psFtraceData = gpsRgxDevInfo->pvGpuFtraceData;
+
+ OSLockAcquire(psFtraceData->hLockFTraceEventLock);
+
+ if (--psFtraceData->uiUfoEventRef == 0)
+ {
+ IMG_UINT64 ui64Filter = ~(RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) &
+ gpsRgxDevInfo->ui64HWPerfFilter;
+ /* Small chance exists that ui64HWPerfFilter can be changed here and
+ * the newest filter value will be changed to the old one + UFO event.
+ * This is not a critical problem. */
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, gpsRgxDevNode,
+ RGX_HWPERF_STREAM_ID0_FW,
+ IMG_FALSE, ui64Filter);
+ if (eError == PVRSRV_ERROR_NOT_INITIALISED)
+ {
+ /* If we land here that means that the FW is not initialised yet.
+ * We stored the filter and it will be passed to the firmware
+ * during it's initialisation phase. So ignore. */
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Could not disable UFO HWPerf event."));
+ }
+ }
+
+ OSLockRelease(psFtraceData->hLockFTraceEventLock);
+}
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void)
+{
+ RGX_HWPERF_FTRACE_DATA *psFtraceData = gpsRgxDevInfo->pvGpuFtraceData;
+ uint64_t ui64Filter;
+ int i;
+
+ OSLockAcquire(psFtraceData->hLockFTraceEventLock);
+
+ ui64Filter = gpsRgxDevInfo->ui64HWPerfFilter;
+
+ /* Enable all FW events. */
+ for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+ i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE;
+ i++)
+ {
+ ui64Filter |= RGX_HWPERF_EVENT_MASK_VALUE(i);
+ }
+
+ if (PVRSRVRGXCtrlHWPerfKM(NULL, gpsRgxDevNode, RGX_HWPERF_STREAM_ID0_FW,
+ IMG_FALSE, ui64Filter) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Could not enable HWPerf event for firmware task timings."));
+ }
+
+ OSLockRelease(psFtraceData->hLockFTraceEventLock);
+}
+
+void PVRGpuTraceDisableFirmwareActivityCallback(void)
+{
+ RGX_HWPERF_FTRACE_DATA *psFtraceData = gpsRgxDevInfo->pvGpuFtraceData;
+ uint64_t ui64Filter;
+ int i;
+
+ if (!psFtraceData)
+ return;
+
+ OSLockAcquire(psFtraceData->hLockFTraceEventLock);
+
+ ui64Filter = gpsRgxDevInfo->ui64HWPerfFilter;
+
+ /* Disable all FW events. */
+ for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+ i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE;
+ i++)
+ {
+ ui64Filter &= ~RGX_HWPERF_EVENT_MASK_VALUE(i);
+ }
+
+ if (PVRSRVRGXCtrlHWPerfKM(NULL, gpsRgxDevNode, RGX_HWPERF_STREAM_ID0_FW,
+ IMG_FALSE, ui64Filter) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Could not disable HWPerf event for firmware task timings."));
+ }
+
+ OSLockRelease(psFtraceData->hLockFTraceEventLock);
+}
+
+#endif /* SUPPORT_GPUTRACE_EVENTS */
+
+/******************************************************************************
+ * Currently only implemented on Linux. Feature can be enabled to provide
+ * an interface to 3rd-party kernel modules that wish to access the
+ * HWPerf data. The API is documented in the rgxapi_km.h header and
+ * the rgx_hwperf* headers.
+ *****************************************************************************/
+
+/* Internal HWPerf kernel connection/device data object to track the state
+ * of a client session.
+ */
+typedef struct
+{
+ PVRSRV_DEVICE_NODE* psRgxDevNode;
+ PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+
+ /* TL Open/close state */
+ IMG_HANDLE hSD[RGX_HWPERF_STREAM_ID_LAST];
+
+ /* TL Acquire/release state */
+ IMG_PBYTE pHwpBuf[RGX_HWPERF_STREAM_ID_LAST];
+ IMG_UINT32 ui32HwpBufLen[RGX_HWPERF_STREAM_ID_LAST];
+
+} RGX_KM_HWPERF_DEVDATA;
+
+
+PVRSRV_ERROR RGXHWPerfLazyConnect(
+ IMG_HANDLE* phDevData)
+{
+ RGX_KM_HWPERF_DEVDATA* psDevData;
+
+ /* Valid input argument values supplied by the caller */
+ if (!phDevData)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Clear the handle to aid error checking by caller */
+ *phDevData = NULL;
+
+ /* Check the HWPerf module is initialised before we allow a connection */
+ if (!gpsRgxDevNode || !gpsRgxDevInfo)
+ {
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ /* Allocation the session object for this connection */
+ psDevData = OSAllocZMem(sizeof(*psDevData));
+ if (psDevData == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ psDevData->psRgxDevNode = gpsRgxDevNode;
+ psDevData->psRgxDevInfo = gpsRgxDevInfo;
+
+ *phDevData = psDevData;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXHWPerfOpen(
+ IMG_HANDLE hDevData)
+{
+ PVRSRV_ERROR eError;
+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*) hDevData;
+
+ /* Valid input argument values supplied by the caller */
+ if (!psDevData)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Check the HWPerf module is initialised before we allow a connection */
+ if (!psDevData->psRgxDevNode || !psDevData->psRgxDevInfo)
+ {
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ /* In the case where the AppHint has not been set we need to
+ * initialise the HWPerf resources here. Allocated on-demand
+ * to reduce RAM foot print on systems not needing HWPerf.
+ */
+ OSLockAcquire(gpsRgxDevInfo->hHWPerfLock);
+ if (RGXHWPerfIsInitRequired())
+ {
+ eError = RGXHWPerfInitOnDemandResources();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialization of on-demand HWPerfFW"
+ " resources failed", __FUNCTION__));
+ OSLockRelease(gpsRgxDevInfo->hHWPerfLock);
+ goto e0;
+ }
+ }
+ OSLockRelease(gpsRgxDevInfo->hHWPerfLock);
+
+ OSLockAcquire(gpsRgxDevInfo->hLockHWPerfHostStream);
+ if (gpsRgxDevInfo->hHWPerfHostStream == NULL)
+ {
+ eError = RGXHWPerfHostInitOnDemandResources();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialization of on-demand HWPerfHost"
+ " resources failed", __FUNCTION__));
+ OSLockRelease(gpsRgxDevInfo->hLockHWPerfHostStream);
+ goto e0;
+ }
+ }
+ OSLockRelease(gpsRgxDevInfo->hLockHWPerfHostStream);
+
+ /* Open the RGX TL stream for reading in this session */
+ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+ PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+ PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+ &psDevData->hSD[RGX_HWPERF_STREAM_ID0_FW]);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ /* Open the host TL stream for reading in this session */
+ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+ PVRSRV_TL_HWPERF_HOST_SERVER_STREAM,
+ PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+ &psDevData->hSD[RGX_HWPERF_STREAM_ID1_HOST]);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ return PVRSRV_OK;
+
+e1:
+ RGXHWPerfHostDeInit();
+e0:
+ RGXHWPerfDeinit();
+
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConnect(
+ IMG_HANDLE* phDevData)
+{
+ PVRSRV_ERROR eError;
+
+ eError = RGXHWPerfLazyConnect(phDevData);
+ PVR_LOGG_IF_ERROR(eError, "RGXHWPerfLazyConnect", e0);
+
+ eError = RGXHWPerfOpen(*phDevData);
+ PVR_LOGG_IF_ERROR(eError, "RGXHWPerfOpen", e1);
+
+ return PVRSRV_OK;
+
+e1:
+ RGXHWPerfFreeConnection(*phDevData);
+e0:
+ *phDevData = NULL;
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfControl(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_BOOL bToggle,
+ IMG_UINT64 ui64Mask)
+{
+ PVRSRV_ERROR eError;
+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+ /* Valid input argument values supplied by the caller */
+ if (!psDevData)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Ensure we are initialised and have a valid device node */
+ if (!psDevData->psRgxDevNode)
+ {
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ /* Call the internal server API */
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDevData->psRgxDevNode, eStreamId, bToggle, ui64Mask);
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConfigureAndEnableCounters(
+ IMG_HANDLE hDevData,
+ IMG_UINT32 ui32NumBlocks,
+ RGX_HWPERF_CONFIG_CNTBLK* asBlockConfigs)
+{
+ PVRSRV_ERROR eError;
+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+ /* Valid input argument values supplied by the caller */
+ if (!psDevData || ui32NumBlocks==0 || !asBlockConfigs)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Ensure we are initialised and have a valid device node */
+ if (!psDevData->psRgxDevNode)
+ {
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ /* Call the internal server API */
+ eError = PVRSRVRGXConfigEnableHWPerfCountersKM(NULL,
+ psDevData->psRgxDevNode, ui32NumBlocks, asBlockConfigs);
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfDisableCounters(
+ IMG_HANDLE hDevData,
+ IMG_UINT32 ui32NumBlocks,
+ IMG_UINT16* aeBlockIDs)
+{
+ PVRSRV_ERROR eError;
+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+ /* Valid input argument values supplied by the caller */
+ if (!psDevData || ui32NumBlocks==0 || !aeBlockIDs)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Ensure we are initialised and have a valid device node */
+ if (!psDevData->psRgxDevNode)
+ {
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ /* Call the internal server API */
+ eError = PVRSRVRGXCtrlHWPerfCountersKM(NULL,
+ psDevData->psRgxDevNode, IMG_FALSE, ui32NumBlocks, aeBlockIDs);
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfAcquireData(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_PBYTE* ppBuf,
+ IMG_UINT32* pui32BufLen)
+{
+ PVRSRV_ERROR eError;
+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+ IMG_PBYTE pTlBuf = NULL;
+ IMG_UINT32 ui32TlBufLen = 0;
+ IMG_PBYTE pDataDest;
+ IMG_UINT32 ui32TlPackets = 0;
+ IMG_PBYTE pBufferEnd;
+ PVRSRVTL_PPACKETHDR psHDRptr;
+ PVRSRVTL_PACKETTYPE ui16TlType;
+
+ /* Reset the output arguments in case we discover an error */
+ *ppBuf = NULL;
+ *pui32BufLen = 0;
+
+ /* Valid input argument values supplied by the caller */
+ if (!psDevData || eStreamId >= RGX_HWPERF_STREAM_ID_LAST)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Acquire some data to read from the HWPerf TL stream */
+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+ psDevData->hSD[eStreamId],
+ &pTlBuf,
+ &ui32TlBufLen);
+ PVR_LOGR_IF_ERROR(eError, "TLClientAcquireData");
+
+ /* TL indicates no data exists so return OK and zero. */
+ if ((pTlBuf == NULL) || (ui32TlBufLen == 0))
+ {
+ return PVRSRV_OK;
+ }
+
+ /* Is the client buffer allocated and too small? */
+ if (psDevData->pHwpBuf[eStreamId] && (psDevData->ui32HwpBufLen[eStreamId] < ui32TlBufLen))
+ {
+ OSFreeMem(psDevData->pHwpBuf[eStreamId]);
+ }
+
+ /* Do we need to allocate a new client buffer? */
+ if (!psDevData->pHwpBuf[eStreamId])
+ {
+ psDevData->pHwpBuf[eStreamId] = OSAllocMem(ui32TlBufLen);
+ if (psDevData->pHwpBuf[eStreamId] == NULL)
+ {
+ (void) TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[eStreamId]);
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ psDevData->ui32HwpBufLen[eStreamId] = ui32TlBufLen;
+ }
+
+ /* Process each TL packet in the data buffer we have acquired */
+ pBufferEnd = pTlBuf+ui32TlBufLen;
+ pDataDest = psDevData->pHwpBuf[eStreamId];
+ psHDRptr = GET_PACKET_HDR(pTlBuf);
+ while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd )
+ {
+ ui16TlType = GET_PACKET_TYPE(psHDRptr);
+ if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+ {
+ IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+ if (0 == ui16DataLen)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfAcquireData: ZERO Data in TL data packet: %p", psHDRptr));
+ }
+ else
+ {
+ /* For valid data copy it into the client buffer and move
+ * the write position on */
+ OSDeviceMemCopy(pDataDest, GET_PACKET_DATA_PTR(psHDRptr), ui16DataLen);
+ pDataDest += ui16DataLen;
+ }
+ }
+ else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireData: Indication that the transport buffer was full"));
+ }
+ else
+ {
+ /* else Ignore padding packet type and others */
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireData: Ignoring TL packet, type %d", ui16TlType ));
+ }
+
+ /* Update loop variable to the next packet and increment counts */
+ psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+ ui32TlPackets++;
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfAcquireData: TL Packets processed %03d", ui32TlPackets));
+
+ /* Update output arguments with client buffer details and true length */
+ *ppBuf = psDevData->pHwpBuf[eStreamId];
+ *pui32BufLen = pDataDest - psDevData->pHwpBuf[eStreamId];
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfReleaseData(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId)
+{
+ PVRSRV_ERROR eError;
+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+ /* Valid input argument values supplied by the caller */
+ if (!psDevData || eStreamId >= RGX_HWPERF_STREAM_ID_LAST)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Free the client buffer if allocated and reset length */
+ if (psDevData->pHwpBuf[eStreamId])
+ {
+ OSFreeMem(psDevData->pHwpBuf[eStreamId]);
+ }
+ psDevData->ui32HwpBufLen[eStreamId] = 0;
+
+ /* Inform the TL that we are done with reading the data. Could perform this
+ * in the acquire call but felt it worth keeping it symmetrical */
+ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[eStreamId]);
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfGetFilter(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_UINT64 *ui64Filter)
+{
+ PVRSRV_RGXDEV_INFO* psRgxDevInfo =
+ hDevData ? ((RGX_KM_HWPERF_DEVDATA*) hDevData)->psRgxDevInfo : NULL;
+
+ /* Valid input argument values supplied by the caller */
+ if (!psRgxDevInfo)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pointer to the RGX device",
+ __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* No need to take hHWPerfLock here since we are only reading data
+ * from always existing integers to return to debugfs which is an
+ * atomic operation.
+ */
+ switch (eStreamId) {
+ case RGX_HWPERF_STREAM_ID0_FW:
+ *ui64Filter = psRgxDevInfo->ui64HWPerfFilter;
+ break;
+ case RGX_HWPERF_STREAM_ID1_HOST:
+ *ui64Filter = psRgxDevInfo->ui32HWPerfHostFilter;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid stream ID",
+ __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfFreeConnection(
+ IMG_HANDLE hDevData)
+{
+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*) hDevData;
+
+ /* Check session handle is not zero */
+ if (!psDevData)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Free the session memory */
+ psDevData->psRgxDevNode = NULL;
+ psDevData->psRgxDevInfo = NULL;
+ OSFreeMem(psDevData);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfClose(
+ IMG_HANDLE hDevData)
+{
+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*) hDevData;
+ IMG_UINT uiStreamId;
+ PVRSRV_ERROR eError;
+
+ /* Check session handle is not zero */
+ if (!psDevData)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ for (uiStreamId = 0; uiStreamId < RGX_HWPERF_STREAM_ID_LAST; uiStreamId++)
+ {
+ /* If the client buffer exists they have not called ReleaseData
+ * before disconnecting so clean it up */
+ if (psDevData->pHwpBuf[uiStreamId])
+ {
+ /* RGXHWPerfReleaseData call will null out the buffer fields
+ * and length */
+ eError = RGXHWPerfReleaseData(hDevData, uiStreamId);
+ PVR_LOG_ERROR(eError, "RGXHWPerfReleaseData");
+ }
+
+ /* Close the TL stream, ignore the error if it occurs as we
+ * are disconnecting */
+ if (psDevData->hSD[uiStreamId])
+ {
+ eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+ psDevData->hSD[uiStreamId]);
+ PVR_LOG_ERROR(eError, "TLClientCloseStream");
+ psDevData->hSD[uiStreamId] = NULL;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfDisconnect(
+ IMG_HANDLE hDevData)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ eError = RGXHWPerfClose(hDevData);
+ PVR_LOG_ERROR(eError, "RGXHWPerfClose");
+
+ eError = RGXHWPerfFreeConnection(hDevData);
+ PVR_LOG_ERROR(eError, "RGXHWPerfFreeConnection");
+
+ return eError;
+}
+
+
+const IMG_CHAR *RGXHWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType)
+{
+ static const IMG_CHAR *aszKickType[RGX_HWPERF_KICK_TYPE_LAST+1] = {
+ "TA3D", "TQ2D", "TQ3D", "CDM", "RS", "VRDM", "TQTDM", "SYNC", "LAST"
+ };
+
+ /* cast in case of negative value */
+ if (((IMG_UINT32) eKickType) >= RGX_HWPERF_KICK_TYPE_LAST)
+ {
+ return "<UNKNOWN>";
+ }
+
+ return aszKickType[eKickType];
+}
+
+/******************************************************************************
+ End of file (rgxhwperf.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX HW Performance header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX HWPerf functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHWPERF_H_
+#define RGXHWPERF_H_
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#include "device.h"
+#include "connection_server.h"
+#include "rgxdevice.h"
+#include "rgx_hwperf_km.h"
+
+
+/******************************************************************************
+ * RGX HW Performance Data Transport Routines
+ *****************************************************************************/
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo);
+
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_DEVICE_NODE *psRgxDevInfo);
+PVRSRV_ERROR RGXHWPerfInitOnDemandResources(void);
+void RGXHWPerfDeinit(void);
+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/******************************************************************************
+ * RGX HW Performance Profiling API(s)
+ *****************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_BOOL bToggle,
+ IMG_UINT64 ui64Mask);
+
+
+PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32ArrayLen,
+ RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs);
+
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_BOOL bEnable,
+ IMG_UINT32 ui32ArrayLen,
+ IMG_UINT16 * psBlockIDs);
+
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT16 ui16CustomBlockID,
+ IMG_UINT16 ui16NumCustomCounters,
+ IMG_UINT32 * pui32CustomCounterIDs);
+
+/******************************************************************************
+ * RGX HW Performance Host Stream API
+ *****************************************************************************/
+
+PVRSRV_ERROR RGXHWPerfHostInit(IMG_UINT32 ui32BufSizeKB);
+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(void);
+void RGXHWPerfHostDeInit(void);
+
+void RGXHWPerfHostSetEventFilter(IMG_UINT32 ui32Filter);
+
+void RGXHWPerfHostPostCtrlEvent(RGX_HWPERF_HOST_CTRL_TYPE eEvType,
+ IMG_UINT32 ui32Pid);
+
+void RGXHWPerfHostPostEnqEvent(RGX_HWPERF_KICK_TYPE eEnqType,
+ IMG_UINT32 ui32Pid,
+ IMG_UINT32 ui32FWDMContext,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32IntJobRef);
+
+void RGXHWPerfHostPostAllocEvent(RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+ IMG_UINT32 ui32FWAddr,
+ const IMG_CHAR *psName,
+ IMG_UINT32 ui32NameSize);
+
+void RGXHWPerfHostPostFreeEvent(RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+ IMG_UINT32 ui32FWAddr);
+
+void RGXHWPerfHostPostUfoEvent(RGX_HWPERF_UFO_EV eUfoType,
+ RGX_HWPERF_UFO_DATA_ELEMENT psUFOData[],
+ IMG_UINT uiNoOfUFOs);
+
+void RGXHWPerfHostPostClkSyncEvent(void);
+
+IMG_BOOL RGXHWPerfHostIsEventEnabled(RGX_HWPERF_HOST_EVENT_TYPE eEvent);
+
+#define _RGX_HWPERF_HOST_FILTER(CTX, EV) \
+ (((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)->ui32HWPerfHostFilter \
+ & RGX_HWPERF_EVENT_MASK_VALUE(EV))
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param C context
+ * @param P process id (PID)
+ * @param X firmware context
+ * @param E ExtJobRef
+ * @param I IntJobRef
+ * @param K kick type
+ */
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+#define RGX_HWPERF_HOST_CTRL(E, P) \
+ do { \
+ PVR_UNREFERENCED_PARAMETER(P); \
+ } while (0)
+
+#define RGX_HWPERF_HOST_ENQ(C, P, X, E, I, K) \
+ do { \
+ PVR_UNREFERENCED_PARAMETER(X); \
+ PVR_UNREFERENCED_PARAMETER(E); \
+ PVR_UNREFERENCED_PARAMETER(I); \
+ } while (0)
+
+#define RGX_HWPERF_HOST_UFO(T, D, N) \
+ do { \
+ PVR_UNREFERENCED_PARAMETER(T); \
+ PVR_UNREFERENCED_PARAMETER(D); \
+ PVR_UNREFERENCED_PARAMETER(N); \
+ } while (0)
+
+#define RGX_HWPERF_HOST_ALLOC(T, F, N, Z) \
+ do { \
+ PVR_UNREFERENCED_PARAMETER(RGX_HWPERF_HOST_RESOURCE_TYPE_##T); \
+ PVR_UNREFERENCED_PARAMETER(F); \
+ PVR_UNREFERENCED_PARAMETER(N); \
+ PVR_UNREFERENCED_PARAMETER(Z); \
+ } while (0)
+
+#define RGX_HWPERF_HOST_FREE(T, F) \
+ do { \
+ PVR_UNREFERENCED_PARAMETER(RGX_HWPERF_HOST_RESOURCE_TYPE_##T); \
+ PVR_UNREFERENCED_PARAMETER(F); \
+ } while (0)
+
+#define RGX_HWPERF_HOST_CLK_SYNC()
+#else
+/**
+ * @param E event type
+ * @param P PID
+ */
+#define RGX_HWPERF_HOST_CTRL(E, P) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled(RGX_HWPERF_HOST_CTRL)) \
+ { \
+ RGXHWPerfHostPostCtrlEvent(RGX_HWPERF_CTRL_TYPE_##E, (P)); \
+ } \
+ } while (0)
+
+#define RGX_HWPERF_HOST_ENQ(C, P, X, E, I, K) \
+ do { \
+ if (_RGX_HWPERF_HOST_FILTER(C, RGX_HWPERF_HOST_ENQ)) \
+ { \
+ RGXHWPerfHostPostEnqEvent((K), (P), (X), (E), (I)); \
+ } \
+ } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param T Host UFO event type
+ * @param D UFO data array
+ * @param N number of syncs in data array
+ */
+#define RGX_HWPERF_HOST_UFO(T, D, N) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled(RGX_HWPERF_HOST_UFO)) \
+ { \
+ RGXHWPerfHostPostUfoEvent((T), (D), (N)); \
+ } \
+ } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param F sync firmware address
+ * @param S boolean value telling if this is a server sync
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGX_HWPERF_HOST_ALLOC(T, F, N, Z) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled(RGX_HWPERF_HOST_ALLOC)) \
+ { \
+ RGXHWPerfHostPostAllocEvent(RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+ (F), (N), (Z)); \
+ } \
+ } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param F sync firmware address
+ */
+#define RGX_HWPERF_HOST_FREE(T, F) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled(RGX_HWPERF_HOST_FREE)) \
+ { \
+ RGXHWPerfHostPostFreeEvent(RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+ (F)); \
+ } \
+ } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ */
+#define RGX_HWPERF_HOST_CLK_SYNC() \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled(RGX_HWPERF_HOST_CLK_SYNC)) \
+ { \
+ RGXHWPerfHostPostClkSyncEvent(); \
+ } \
+ } while (0)
+#endif
+
+/******************************************************************************
+ * RGX HW Performance To FTrace Profiling API(s)
+ *****************************************************************************/
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXHWPerfFTraceGPUDeInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+void RGXHWPerfFTraceGPUEnqueueEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32ExternalJobRef, IMG_UINT32 ui32InternalJobRef,
+ RGX_HWPERF_KICK_TYPE eKickType);
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUEventsEnabledSet(IMG_BOOL bNewValue);
+
+void RGXHWPerfFTraceGPUThread(void *pvData);
+
+#endif
+
+/******************************************************************************
+ * RGX HW utils functions
+ *****************************************************************************/
+
+const IMG_CHAR *RGXHWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType);
+
+#endif /* RGXHWPERF_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device specific initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "syscommon.h"
+#include "rgx_heaps.h"
+#include "rgxheapconfig.h"
+#include "rgxpower.h"
+
+#include "rgxinit.h"
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+#include "rgxinit_vz.h"
+#endif
+
+#include "pdump_km.h"
+#include "handle.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxmem.h"
+#include "sync_internal.h"
+#include "pvrsrv_apphint.h"
+#include "oskm_apphint.h"
+#include "debugmisc_server.h"
+
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+#include "rgxmmuinit.h"
+#include "rgxmipsmmuinit.h"
+#include "physmem.h"
+#include "devicemem_utils.h"
+#include "devicemem_server.h"
+#include "physmem_osmem.h"
+
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+#include "htbserver.h"
+
+#include "rgx_options.h"
+#include "pvrversion.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "rgx_heaps.h"
+
+#include "rgxta3d.h"
+#include "rgxtimecorr.h"
+
+#include "rgx_bvnc_table_km.h"
+#include "rgx_bvnc_defs_km.h"
+#if defined(PDUMP)
+#include "rgxstartstop.h"
+#endif
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+#include "rgx_fwif_alignchecks.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString);
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32 pui32RGXClockSpeed);
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2);
+
+#define RGX_MMU_LOG2_PAGE_SIZE_4KB (12)
+#define RGX_MMU_LOG2_PAGE_SIZE_16KB (14)
+#define RGX_MMU_LOG2_PAGE_SIZE_64KB (16)
+#define RGX_MMU_LOG2_PAGE_SIZE_256KB (18)
+#define RGX_MMU_LOG2_PAGE_SIZE_1MB (20)
+#define RGX_MMU_LOG2_PAGE_SIZE_2MB (21)
+
+#define RGX_MMU_PAGE_SIZE_4KB ( 4 * 1024)
+#define RGX_MMU_PAGE_SIZE_16KB ( 16 * 1024)
+#define RGX_MMU_PAGE_SIZE_64KB ( 64 * 1024)
+#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024)
+#define RGX_MMU_PAGE_SIZE_1MB (1024 * 1024)
+#define RGX_MMU_PAGE_SIZE_2MB (2048 * 1024)
+#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB
+#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB
+
+#define VAR(x) #x
+
+#define MAX_BVNC_LEN (12)
+#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(MAX_BVNC_LEN))+1)
+
+/* List of BVNC strings given as module param & count*/
+IMG_PCHAR gazRGXBVNCList[PVRSRV_MAX_DEVICES];
+IMG_UINT32 gui32RGXLoadTimeDevCount;
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo);
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+
+/* bits used by the LISR to provide a trace of its last execution */
+#define RGX_LISR_DEVICE_NOT_POWERED (1 << 0)
+#define RGX_LISR_FWIF_POW_OFF (1 << 1)
+#define RGX_LISR_EVENT_EN (1 << 2)
+#define RGX_LISR_COUNTS_EQUAL (1 << 3)
+#define RGX_LISR_PROCESSED (1 << 4)
+
+typedef struct _LISR_EXECUTION_INFO_
+{
+ /* bit mask showing execution flow of last LISR invocation */
+ IMG_UINT32 ui32State;
+ /* snapshot from the last LISR invocation, regardless of
+ * whether an interrupt was handled
+ */
+ IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM];
+ /* time of the last LISR invocation */
+ IMG_UINT64 ui64Clockns;
+} LISR_EXECUTION_INFO;
+
+/* information about the last execution of the LISR */
+static LISR_EXECUTION_INFO g_sLISRExecutionInfo;
+
+#endif
+
+#if !defined(NO_HARDWARE)
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ OSScheduleMISR(psDevInfo->pvMISRData);
+}
+
+/*
+ Guest Driver RGX LISR Handler
+*/
+static IMG_BOOL RGX_LISRHandler (void *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ if (psDevInfo->bRGXPowered == IMG_FALSE)
+ {
+ return IMG_FALSE;
+ }
+
+ OSScheduleMISR(psDevInfo->pvMISRData);
+ return IMG_TRUE;
+}
+#else
+
+/*************************************************************************/ /*!
+@Function SampleIRQCount
+@Description Utility function taking snapshots of RGX FW interrupt count.
+@Input paui32Input A pointer to RGX FW IRQ count array.
+ Size of the array should be equal to RGX FW thread
+ count.
+@Input paui32Output A pointer to array containing sampled RGX FW
+ IRQ counts
+@Return IMG_BOOL Returns IMG_TRUE, if RGX FW IRQ is not equal to
+ sampled RGX FW IRQ count for any RGX FW thread.
+*/ /**************************************************************************/
+static INLINE IMG_BOOL SampleIRQCount(volatile IMG_UINT32 *paui32Input,
+ volatile IMG_UINT32 *paui32Output)
+{
+ IMG_UINT32 ui32TID;
+ IMG_BOOL bReturnVal = IMG_FALSE;
+
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+ {
+ if (paui32Output[ui32TID] != paui32Input[ui32TID])
+ {
+ /**
+ * we are handling any unhandled interrupts here so align the host
+ * count with the FW count
+ */
+
+ /* Sample the current count from the FW _after_ we've cleared the interrupt. */
+ paui32Output[ui32TID] = paui32Input[ui32TID];
+ bReturnVal = IMG_TRUE;
+ }
+ }
+
+ return bReturnVal;
+}
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_BOOL bScheduleMISR = IMG_FALSE;
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ IMG_UINT32 ui32TID;
+#endif
+
+ RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo);
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ PVR_DPF((PVR_DBG_ERROR, "Last RGX_LISRHandler State: 0x%08X Clock: %llu",
+ g_sLISRExecutionInfo.ui32State,
+ g_sLISRExecutionInfo.ui64Clockns));
+
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+ {
+ PVR_DPF((PVR_DBG_ERROR, \
+ "RGX FW thread %u: InterruptCountSnapshot: 0x%X", \
+ ui32TID, g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32TID]));
+ }
+#else
+ PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION"));
+#endif
+
+
+ if(psRGXFWIfTraceBuf->ePowState != RGXFWIF_POW_OFF)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_WaitForInterruptsTimeout: FW pow state is not OFF (is %u)",
+ (unsigned int) psRGXFWIfTraceBuf->ePowState));
+ }
+
+ bScheduleMISR = SampleIRQCount(psRGXFWIfTraceBuf->aui32InterruptCount,
+ psDevInfo->aui32SampleIRQCount);
+
+ if (bScheduleMISR)
+ {
+ OSScheduleMISR(psDevInfo->pvMISRData);
+
+ if(psDevInfo->pvAPMISRData != NULL)
+ {
+ OSScheduleMISR(psDevInfo->pvAPMISRData);
+ }
+ }
+}
+
+/*
+ RGX LISR Handler
+*/
+static IMG_BOOL RGX_LISRHandler (void *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_BOOL bInterruptProcessed = IMG_FALSE;
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_UINT32 ui32IRQStatus, ui32IRQStatusReg, ui32IRQStatusEventMsk, ui32IRQClearReg, ui32IRQClearMask;
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ ui32IRQStatusReg = RGX_CR_MIPS_WRAPPER_IRQ_STATUS;
+ ui32IRQStatusEventMsk = RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN;
+ ui32IRQClearReg = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR;
+ ui32IRQClearMask = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN;
+ }else
+ {
+ ui32IRQStatusReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+ ui32IRQStatusEventMsk = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN;
+ ui32IRQClearReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+ ui32IRQClearMask = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK;
+ }
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ IMG_UINT32 ui32TID;
+
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+ {
+ g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32TID ] =
+ psRGXFWIfTraceBuf->aui32InterruptCount[ui32TID];
+ }
+ g_sLISRExecutionInfo.ui32State = 0;
+ g_sLISRExecutionInfo.ui64Clockns = OSClockns64();
+#endif
+
+ if (psDevInfo->bRGXPowered == IMG_FALSE)
+ {
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ g_sLISRExecutionInfo.ui32State |= RGX_LISR_DEVICE_NOT_POWERED;
+#endif
+ if (psRGXFWIfTraceBuf->ePowState == RGXFWIF_POW_OFF)
+ {
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ g_sLISRExecutionInfo.ui32State |= RGX_LISR_FWIF_POW_OFF;
+#endif
+ return bInterruptProcessed;
+ }
+ }
+
+ ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQStatusReg);
+ if (ui32IRQStatus & ui32IRQStatusEventMsk)
+ {
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ g_sLISRExecutionInfo.ui32State |= RGX_LISR_EVENT_EN;
+#endif
+
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQClearReg, ui32IRQClearMask);
+
+#if defined(RGX_FEATURE_OCPBUS)
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OCP_IRQSTATUS_2, RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN);
+#endif
+
+ bInterruptProcessed = SampleIRQCount(psRGXFWIfTraceBuf->aui32InterruptCount,
+ psDevInfo->aui32SampleIRQCount);
+
+ if (!bInterruptProcessed)
+ {
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ g_sLISRExecutionInfo.ui32State |= RGX_LISR_COUNTS_EQUAL;
+#endif
+ return bInterruptProcessed;
+ }
+
+ bInterruptProcessed = IMG_TRUE;
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ g_sLISRExecutionInfo.ui32State |= RGX_LISR_PROCESSED;
+#endif
+
+ OSScheduleMISR(psDevInfo->pvMISRData);
+
+ if (psDevInfo->pvAPMISRData != NULL)
+ {
+ OSScheduleMISR(psDevInfo->pvAPMISRData);
+ }
+ }
+
+ return bInterruptProcessed;
+}
+
+static void RGXCheckFWActivePowerState(void *psDevice)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevice;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+ {
+ /* The FW is IDLE and therefore could be shut down */
+ eError = RGXActivePowerRequest(psDeviceNode);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED))
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Failed RGXActivePowerRequest call (device: %p) with %s",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+ }
+ }
+
+}
+
+/* Shorter defines to keep the code a bit shorter */
+#define GPU_ACTIVE_LOW RGXFWIF_GPU_UTIL_STATE_ACTIVE_LOW
+#define GPU_IDLE RGXFWIF_GPU_UTIL_STATE_IDLE
+#define GPU_ACTIVE_HIGH RGXFWIF_GPU_UTIL_STATE_ACTIVE_HIGH
+#define GPU_BLOCKED RGXFWIF_GPU_UTIL_STATE_BLOCKED
+#define MAX_ITERATIONS 64
+
+static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hGpuUtilUser,
+ RGXFWIF_GPU_UTIL_STATS *psReturnStats)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+ IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0};
+ IMG_UINT64 ui64TimeNow;
+ IMG_UINT64 ui64LastPeriod;
+ IMG_UINT64 ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0;
+ IMG_UINT32 i = 0;
+
+
+ /***** (1) Initialise return stats *****/
+
+ psReturnStats->bValid = IMG_FALSE;
+ psReturnStats->ui64GpuStatActiveLow = 0;
+ psReturnStats->ui64GpuStatIdle = 0;
+ psReturnStats->ui64GpuStatActiveHigh = 0;
+ psReturnStats->ui64GpuStatBlocked = 0;
+ psReturnStats->ui64GpuStatCumulative = 0;
+
+ if (hGpuUtilUser == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psAggregateStats = hGpuUtilUser;
+
+
+ /***** (2) Get latest data from shared area *****/
+
+ OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+ /* Read the timer before reading the latest stats from the shared
+ * area, discard it later in case of state updates after this point.
+ */
+ ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(OSClockns64());
+ OSMemoryBarrier();
+
+ /* Keep reading the counters until the values stabilise as the FW
+ * might be updating them at the same time.
+ */
+ while(((ui64LastWord != psUtilFWCb->ui64LastWord) ||
+ (aui64TmpCounters[ui64LastState] !=
+ psUtilFWCb->aui64StatsCounters[ui64LastState])) &&
+ (i < MAX_ITERATIONS))
+ {
+ ui64LastWord = psUtilFWCb->ui64LastWord;
+ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord);
+ aui64TmpCounters[GPU_ACTIVE_LOW] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE_LOW];
+ aui64TmpCounters[GPU_IDLE] = psUtilFWCb->aui64StatsCounters[GPU_IDLE];
+ aui64TmpCounters[GPU_ACTIVE_HIGH] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE_HIGH];
+ aui64TmpCounters[GPU_BLOCKED] = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED];
+ i++;
+ }
+
+ OSLockRelease(psDevInfo->hGPUUtilLock);
+
+ if (i == MAX_ITERATIONS)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data within a short time."));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+
+ /***** (3) Compute return stats and update aggregate stats *****/
+
+ /* Update temp counters to account for the time since the last update to the shared ones */
+ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord);
+ ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+ aui64TmpCounters[ui64LastState] += ui64LastPeriod;
+
+ /* Get statistics for a user since its last request */
+ psReturnStats->ui64GpuStatActiveLow = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE_LOW],
+ psAggregateStats->ui64GpuStatActiveLow);
+ psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE],
+ psAggregateStats->ui64GpuStatIdle);
+ psReturnStats->ui64GpuStatActiveHigh = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE_HIGH],
+ psAggregateStats->ui64GpuStatActiveHigh);
+ psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED],
+ psAggregateStats->ui64GpuStatBlocked);
+ psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatActiveLow + psReturnStats->ui64GpuStatIdle +
+ psReturnStats->ui64GpuStatActiveHigh + psReturnStats->ui64GpuStatBlocked;
+
+ /* Update aggregate stats for the current user */
+ psAggregateStats->ui64GpuStatActiveLow += psReturnStats->ui64GpuStatActiveLow;
+ psAggregateStats->ui64GpuStatIdle += psReturnStats->ui64GpuStatIdle;
+ psAggregateStats->ui64GpuStatActiveHigh += psReturnStats->ui64GpuStatActiveHigh;
+ psAggregateStats->ui64GpuStatBlocked += psReturnStats->ui64GpuStatBlocked;
+
+
+ /***** (4) Convert return stats to microseconds *****/
+
+ psReturnStats->ui64GpuStatActiveLow = OSDivide64(psReturnStats->ui64GpuStatActiveLow, 1000, &i);
+ psReturnStats->ui64GpuStatIdle = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &i);
+ psReturnStats->ui64GpuStatActiveHigh = OSDivide64(psReturnStats->ui64GpuStatActiveHigh, 1000, &i);
+ psReturnStats->ui64GpuStatBlocked = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &i);
+ psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &i);
+
+ /* Check that the return stats make sense */
+ if(psReturnStats->ui64GpuStatCumulative == 0)
+ {
+ /* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD
+ * returned 0. This could happen if the GPU frequency value
+ * is not well calibrated and the FW is updating the GPU state
+ * while the Host is reading it.
+ * When such an event happens frequently, timers or the aggregate
+ * stats might not be accurate...
+ */
+ PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data."));
+ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ }
+
+ psReturnStats->bValid = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+#endif /* defined(PVRSRV_GPUVIRT_GUESTDRV) */
+
+PVRSRV_ERROR RGXRegisterGpuUtilStats(IMG_HANDLE *phGpuUtilUser)
+{
+ RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+ psAggregateStats = OSAllocMem(sizeof(RGXFWIF_GPU_UTIL_STATS));
+ if(psAggregateStats == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psAggregateStats->ui64GpuStatActiveLow = 0;
+ psAggregateStats->ui64GpuStatIdle = 0;
+ psAggregateStats->ui64GpuStatActiveHigh = 0;
+ psAggregateStats->ui64GpuStatBlocked = 0;
+
+ /* Not used */
+ psAggregateStats->bValid = IMG_FALSE;
+ psAggregateStats->ui64GpuStatCumulative = 0;
+
+ *phGpuUtilUser = psAggregateStats;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXUnregisterGpuUtilStats(IMG_HANDLE hGpuUtilUser)
+{
+ RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+ if(hGpuUtilUser == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psAggregateStats = hGpuUtilUser;
+ OSFreeMem(psAggregateStats);
+
+ return PVRSRV_OK;
+}
+
+/*
+ RGX MISR Handler
+*/
+static void RGX_MISRHandler (void *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+
+ /* Give the HWPerf service a chance to transfer some data from the FW
+ * buffer to the host driver transport layer buffer.
+ */
+ RGXHWPerfDataStoreCB(psDeviceNode);
+
+ /* Inform other services devices that we have finished an operation */
+ PVRSRVCheckStatus(psDeviceNode);
+
+#if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD)
+ /*
+ * Firmware CCB only exists for primary FW thread. Only requirement for
+ * non primary FW thread(s) to communicate with host driver is in the case
+ * of PDVFS running on non primary FW thread.
+ * This requirement is directly handled by the below
+ */
+ RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice);
+#endif
+
+ /* Process the Firmware CCB for pending commands */
+ RGXCheckFirmwareCCB(psDeviceNode->pvDevice);
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* Calibrate the GPU frequency and recorrelate Host and FW timers (done every few seconds) */
+ RGXGPUFreqCalibrateCorrelatePeriodic(psDeviceNode);
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Process Workload Estimation Specific commands from the FW */
+ WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice);
+#endif
+}
+#endif /* !defined(NO_HARDWARE) */
+
+
+/* This function puts into the firmware image some parameters for the initial boot */
+static PVRSRV_ERROR RGXBootldrDataInit(PVRSRV_DEVICE_NODE *psDeviceNode,
+ void *pvFWImage)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice;
+ IMG_UINT64 *pui64BootConfig;
+ IMG_DEV_PHYADDR sPhyAddr;
+ IMG_BOOL bValid;
+
+ /* To get a pointer to the bootloader configuration data start from a pointer to the FW image... */
+ pui64BootConfig = (IMG_UINT64 *) pvFWImage;
+
+ /* ... jump to the boot/NMI data page... */
+ pui64BootConfig += RGXMIPSFW_GET_OFFSET_IN_QWORDS(RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE);
+
+ /* ... and then jump to the bootloader data offset within the page */
+ pui64BootConfig += RGXMIPSFW_GET_OFFSET_IN_QWORDS(RGXMIPSFW_BOOTLDR_CONF_OFFSET);
+
+
+ /* Rogue Registers physical address */
+ PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+ 1, &sPhyAddr, &(psDeviceNode->psDevConfig->sRegsCpuPBase));
+ pui64BootConfig[RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET] = sPhyAddr.uiAddr;
+
+ /* MIPS Page Table physical address. There are 16 pages for a firmware heap of 32 MB */
+ MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPhyAddr);
+ pui64BootConfig[RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET] = sPhyAddr.uiAddr;
+
+ /* MIPS Stack Pointer Physical Address */
+ eError = RGXGetPhyAddr(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR,
+ &sPhyAddr,
+ RGXMIPSFW_STACK_OFFSET,
+ RGXMIPSFW_LOG2_PAGE_SIZE,
+ 1,
+ &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXBootldrDataInit: RGXGetPhyAddr failed (%u)",
+ eError));
+ return eError;
+ }
+ pui64BootConfig[RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET] = sPhyAddr.uiAddr;
+
+ /* Reserved for future use */
+ pui64BootConfig[RGXMIPSFW_RESERVED_FUTURE_OFFSET] = 0;
+
+ /* FW Init Data Structure Virtual Address */
+ pui64BootConfig[RGXMIPSFW_FWINIT_VIRTADDR_OFFSET] = psDevInfo->psRGXFWIfInitMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr;
+
+ return PVRSRV_OK;
+}
+
+#if defined(PDUMP) && !defined(PVRSRV_GPUVIRT_GUESTDRV)
+static PVRSRV_ERROR RGXPDumpBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PMR *psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+ IMG_DEV_PHYADDR sTmpAddr;
+ IMG_UINT32 ui32BootConfOffset, ui32ParamOffset;
+ PVRSRV_ERROR eError;
+
+ ui32BootConfOffset = (RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE);
+ ui32BootConfOffset += RGXMIPSFW_BOOTLDR_CONF_OFFSET;
+
+ /* The physical addresses used by a pdump player will be different
+ * than the ones we have put in the MIPS bootloader configuration data.
+ * We have to tell the pdump player to replace the original values with the real ones.
+ */
+ PDUMPCOMMENT("Pass new boot parameters to the FW");
+
+ /* Rogue Registers physical address */
+ ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+ eError = PDumpRegLabelToMem64(RGX_PDUMPREG_NAME,
+ 0x0,
+ psFWDataPMR,
+ ui32ParamOffset,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of Rogue registers phy address failed (%u)", eError));
+ return eError;
+ }
+
+ /* Page Table physical Address */
+ ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+ MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sTmpAddr);
+
+ eError = PDumpPTBaseObjectToMem64(psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psFWDataPMR,
+ 0,
+ ui32ParamOffset,
+ PDUMP_FLAGS_CONTINUOUS,
+ MMU_LEVEL_1,
+ sTmpAddr.uiAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of page tables phy address failed (%u)", eError));
+ return eError;
+ }
+
+ /* Stack physical address */
+ ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+ eError = PDumpMemLabelToMem64(psFWDataPMR,
+ psFWDataPMR,
+ RGXMIPSFW_STACK_OFFSET,
+ ui32ParamOffset,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of stack phy address failed (%u)", eError));
+ return eError;
+ }
+
+ return eError;
+}
+#endif /* PDUMP */
+
+
+PVRSRV_ERROR PVRSRVGPUVIRTPopulateLMASubArenasKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32NumElements,
+ IMG_UINT32 aui32Elements[],
+ IMG_BOOL bEnableTrustedDeviceAceConfig)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ IMG_UINT32 ui32OS, ui32Region, ui32Counter=0;
+ IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS];
+ IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS];
+
+ PVR_UNREFERENCED_PARAMETER(ui32NumElements);
+
+ for (ui32OS = 0; ui32OS < GPUVIRT_VALIDATION_NUM_OS; ui32OS++)
+ {
+ for (ui32Region = 0; ui32Region < GPUVIRT_VALIDATION_NUM_REGIONS; ui32Region++)
+ {
+ aui32OSidMin[ui32OS][ui32Region] = aui32Elements[ui32Counter++];
+ aui32OSidMax[ui32OS][ui32Region] = aui32Elements[ui32Counter++];
+
+ PVR_DPF((PVR_DBG_MESSAGE,"OS=%u, Region=%u, Min=%u, Max=%u", ui32OS, ui32Region, aui32OSidMin[ui32OS][ui32Region], aui32OSidMax[ui32OS][ui32Region]));
+ }
+ }
+
+ PopulateLMASubArenas(psDeviceNode, aui32OSidMin, aui32OSidMax);
+
+ #if defined(EMULATOR)
+ if ((bEnableTrustedDeviceAceConfig) && (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK))
+ {
+ SetTrustedDeviceAceEnabled();
+ }
+ #else
+ {
+ PVR_UNREFERENCED_PARAMETER(bEnableTrustedDeviceAceConfig);
+ }
+ #endif
+ }
+#else
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(ui32NumElements);
+ PVR_UNREFERENCED_PARAMETER(aui32Elements);
+ PVR_UNREFERENCED_PARAMETER(bEnableTrustedDeviceAceConfig);
+}
+#endif
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_ERROR eError;
+
+ /* Save information used on power transitions for later
+ * (when RGXStart and RGXStop are executed)
+ */
+ psDevInfo->sPowerParams.psDevInfo = psDevInfo;
+ psDevInfo->sPowerParams.psDevConfig = psDevConfig;
+#if defined(PDUMP)
+ psDevInfo->sPowerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+ if(psDevInfo->sDevFeatureCfg.ui32META)
+ {
+ IMG_DEV_PHYADDR sKernelMMUCtxPCAddr;
+
+ eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx,
+ &sKernelMMUCtxPCAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog"));
+ return eError;
+ }
+
+ psDevInfo->sPowerParams.sPCAddr = sKernelMMUCtxPCAddr;
+ }else
+ {
+ PMR *psFWCodePMR = (PMR *)(psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR);
+ PMR *psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+ IMG_DEV_PHYADDR sPhyAddr;
+ IMG_BOOL bValid;
+
+ /* The physical address of the GPU registers needs to be translated
+ * in case we are in a LMA scenario
+ */
+ PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+ 1,
+ &sPhyAddr,
+ &(psDevConfig->sRegsCpuPBase));
+
+ psDevInfo->sPowerParams.sGPURegAddr = sPhyAddr;
+
+ eError = RGXGetPhyAddr(psFWCodePMR,
+ &sPhyAddr,
+ RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE * RGXMIPSFW_PAGE_SIZE,
+ RGXMIPSFW_LOG2_PAGE_SIZE,
+ 1,
+ &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI code address"));
+ return eError;
+ }
+
+ psDevInfo->sPowerParams.sBootRemapAddr = sPhyAddr;
+
+ eError = RGXGetPhyAddr(psFWDataPMR,
+ &sPhyAddr,
+ RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE,
+ RGXMIPSFW_LOG2_PAGE_SIZE,
+ 1,
+ &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI data address"));
+ return eError;
+ }
+
+ psDevInfo->sPowerParams.sDataRemapAddr = sPhyAddr;
+
+ eError = RGXGetPhyAddr(psFWCodePMR,
+ &sPhyAddr,
+ RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE * RGXMIPSFW_PAGE_SIZE,
+ RGXMIPSFW_LOG2_PAGE_SIZE,
+ 1,
+ &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW exceptions address"));
+ return eError;
+ }
+
+ psDevInfo->sPowerParams.sCodeRemapAddr = sPhyAddr;
+
+ psDevInfo->sPowerParams.sTrampolineRemapAddr.uiAddr = psDevInfo->sTrampoline.sPhysAddr.uiAddr;
+ }
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+ /* Send information used on power transitions to the trusted device as
+ * in this setup the driver cannot start/stop the GPU and perform resets
+ */
+ if (psDevConfig->pfnTDSetPowerParams)
+ {
+ PVRSRV_TD_POWER_PARAMS sTDPowerParams;
+
+ if(psDevInfo->sDevFeatureCfg.ui32META)
+ {
+ sTDPowerParams.sPCAddr = psDevInfo->sPowerParams.sPCAddr;
+ }else
+ {
+ sTDPowerParams.sGPURegAddr = psDevInfo->sPowerParams.sGPURegAddr;
+ sTDPowerParams.sBootRemapAddr = psDevInfo->sPowerParams.sBootRemapAddr;
+ sTDPowerParams.sCodeRemapAddr = psDevInfo->sPowerParams.sCodeRemapAddr;
+ sTDPowerParams.sDataRemapAddr = psDevInfo->sPowerParams.sDataRemapAddr;
+ }
+ eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData,
+ &sTDPowerParams);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!"));
+ eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+#endif
+
+ return eError;
+}
+
+/*
+ * PVRSRVRGXInitDevPart2KM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitDevPart2KM (CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_INIT_COMMAND *psDbgScript,
+ IMG_UINT32 ui32DeviceFlags,
+ IMG_UINT32 ui32HWPerfHostBufSizeKB,
+ IMG_UINT32 ui32HWPerfHostFilter,
+ RGX_ACTIVEPM_CONF eActivePMConf,
+ PMR *psFWCodePMR,
+ PMR *psFWDataPMR,
+ PMR *psFWCorePMR,
+ PMR *psHWPerfPMR)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+#if defined(PDUMP)
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ RGXPDumpBootldrData(psDeviceNode, psDevInfo);
+ }
+#endif
+
+#if defined(TIMING) || defined(DEBUG)
+ OSUserModeAccessToPerfCountersEn();
+#endif
+#endif
+
+ /* Passing down the PMRs to destroy their handles */
+ PVR_UNREFERENCED_PARAMETER(psFWCodePMR);
+ PVR_UNREFERENCED_PARAMETER(psFWDataPMR);
+ PVR_UNREFERENCED_PARAMETER(psFWCorePMR);
+ PVR_UNREFERENCED_PARAMETER(psHWPerfPMR);
+
+ PDUMPCOMMENT("RGX Initialisation Part 2");
+
+ psDevInfo->ui32RegSize = psDevConfig->ui32RegsSize;
+ psDevInfo->sRegsPhysBase = psDevConfig->sRegsCpuPBase;
+
+ /* Initialise Device Flags */
+ psDevInfo->ui32DeviceFlags = 0;
+ RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE);
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* Allocate DVFS Table (needs to be allocated before SUPPORT_GPUTRACE_EVENTS
+ * is initialised because there is a dependency between them) */
+ psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable)));
+ if (psDevInfo->psGpuDVFSTable == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitDevPart2KM: failed to allocate gpu dvfs table storage"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Reset DVFS Table */
+ psDevInfo->psGpuDVFSTable->ui32CurrentDVFSId = 0;
+ psDevInfo->psGpuDVFSTable->aui32DVFSClock[0] = 0;
+#endif /* !defined(PVRSRV_GPUVIRT_GUESTDRV) */
+
+ /* Initialise HWPerfHost buffer. */
+ if (RGXHWPerfHostInit(ui32HWPerfHostBufSizeKB) == PVRSRV_OK)
+ {
+ /* If HWPerf enabled allocate all resources for the host side buffer. */
+ if (ui32DeviceFlags & RGXKMIF_DEVICE_STATE_HWPERF_HOST_EN)
+ {
+ if (RGXHWPerfHostInitOnDemandResources() == PVRSRV_OK)
+ {
+ RGXHWPerfHostSetEventFilter(ui32HWPerfHostFilter);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer on demand"
+ " initialisation failed."));
+ }
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer initialisation failed."));
+ }
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ {
+ /* The tracing might have already been enabled by pvr/gpu_tracing_on
+ * but if SUPPORT_KERNEL_SRVINIT == 1 the HWPerf has just been
+ * allocated so the initialisation wasn't full.
+ * RGXHWPerfFTraceGPUEventsEnabledSet() will perform full
+ * initialisation in such case. */
+ IMG_BOOL bInit = IMG_FALSE;
+
+ /* This can happen if SUPPORT_KERNEL_SRVINIT == 1. */
+ if (PVRGpuTracePreEnabled())
+ {
+ bInit = IMG_TRUE;
+ }
+ else
+ {
+ bInit = ui32DeviceFlags & RGXKMIF_DEVICE_STATE_FTRACE_EN ?
+ IMG_TRUE : IMG_FALSE;
+ }
+ RGXHWPerfFTraceGPUEventsEnabledSet(bInit);
+ }
+#endif
+
+ /* Initialise lists of ZSBuffers */
+ eError = OSLockCreate(&psDevInfo->hLockZSBuffer,LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ dllist_init(&psDevInfo->sZSBufferHead);
+ psDevInfo->ui32ZSBufferCurrID = 1;
+
+ /* Initialise lists of growable Freelists */
+ eError = OSLockCreate(&psDevInfo->hLockFreeList,LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ dllist_init(&psDevInfo->sFreeListHead);
+ psDevInfo->ui32FreelistCurrID = 1;
+
+#if 1//defined(SUPPORT_RAY_TRACING)
+ eError = OSLockCreate(&psDevInfo->hLockRPMFreeList,LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ dllist_init(&psDevInfo->sRPMFreeListHead);
+ psDevInfo->ui32RPMFreelistCurrID = 1;
+ eError = OSLockCreate(&psDevInfo->hLockRPMContext,LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock, LOCK_TYPE_PASSIVE);
+
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock, LOCK_TYPE_PASSIVE);
+
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ eError = OSLockCreate(&psDevInfo->hNMILock, LOCK_TYPE_DISPATCH);
+
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* Setup GPU utilisation stats update callback */
+#if !defined(NO_HARDWARE)
+ psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats;
+#endif
+
+ eError = OSLockCreate(&psDevInfo->hGPUUtilLock, LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+ psDevInfo->eActivePMConf = eActivePMConf;
+
+ /* set-up the Active Power Mgmt callback */
+#if !defined(NO_HARDWARE)
+ {
+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+ IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM;
+ IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) ||
+ (eActivePMConf == RGX_ACTIVEPM_FORCE_ON);
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ /* Disable APM for now */
+ bEnableAPM = IMG_FALSE;
+#endif
+ if (bEnableAPM)
+ {
+ eError = OSInstallMISR(&psDevInfo->pvAPMISRData, RGXCheckFWActivePowerState, psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ /* Prevent the device being woken up before there is something to do. */
+ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
+ }
+ }
+#endif
+#endif
+
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM,
+ RGXQueryAPMState,
+ RGXSetAPMState,
+ psDeviceNode,
+ NULL);
+
+ RGXGPUFreqCalibrationInitAppHintCallbacks(psDeviceNode);
+
+ /*
+ Register the device with the power manager.
+ Normal/Hyperv Drivers: Supports power management
+ Guest Drivers: Do not currently support power management
+ */
+ eError = PVRSRVRegisterPowerDevice(psDeviceNode,
+ &RGXPrePowerState, &RGXPostPowerState,
+ psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState,
+ &RGXPreClockSpeedChange, &RGXPostClockSpeedChange,
+ &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest,
+ &RGXDustCountChange,
+ (IMG_HANDLE)psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_OFF,
+ eDefaultPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitDevPart2KM: failed to register device with power manager"));
+ return eError;
+ }
+
+ eError = RGXSetPowerParams(psDevInfo, psDevConfig);
+ if (eError != PVRSRV_OK) return eError;
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /*
+ * Guest drivers do not perform on-chip firmware
+ * - Loading, Initialization and Management
+ */
+ PVR_UNREFERENCED_PARAMETER(psDbgScript);
+ PVR_UNREFERENCED_PARAMETER(eActivePMConf);
+#else
+ /*
+ * Copy scripts
+ */
+ OSCachedMemCopy(psDevInfo->psScripts->asDbgCommands, psDbgScript,
+ RGX_MAX_DEBUG_COMMANDS * sizeof(*psDbgScript));
+
+#if defined(PDUMP)
+ /* Run RGXStop with the correct PDump flags to feed the last-frame deinit buffer */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_DEINIT, "RGX deinitialisation commands");
+
+ psDevInfo->sPowerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW;
+
+ eError = RGXStop(&psDevInfo->sPowerParams);
+ if (eError != PVRSRV_OK) return eError;
+
+ psDevInfo->sPowerParams.ui32PdumpFlags &= ~(PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW);
+#endif
+#endif
+
+#if !defined(NO_HARDWARE)
+ eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ if (psDevInfo->pvAPMISRData != NULL)
+ {
+ (void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+ }
+ return eError;
+ }
+
+ /* Register the interrupt handlers */
+ eError = OSInstallMISR(&psDevInfo->pvMISRData,
+ RGX_MISRHandler, psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ if (psDevInfo->pvAPMISRData != NULL)
+ {
+ (void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+ }
+ (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+ return eError;
+ }
+
+ eError = SysInstallDeviceLISR(psDevConfig->hSysData,
+ psDevConfig->ui32IRQ,
+ PVRSRV_MODNAME,
+ RGX_LISRHandler,
+ psDeviceNode,
+ &psDevInfo->pvLISRData);
+ if (eError != PVRSRV_OK)
+ {
+ if (psDevInfo->pvAPMISRData != NULL)
+ {
+ (void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+ }
+ (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+ (void) OSUninstallMISR(psDevInfo->pvMISRData);
+ return eError;
+ }
+#endif
+
+#if defined(SUPPORT_PDVFS) && !defined(RGXFW_META_SUPPORT_2ND_THREAD)
+ psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer =
+ OSAddTimer((PFN_TIMER_FUNC)PDVFSRequestReactiveUpdate,
+ psDevInfo,
+ PDVFS_REACTIVE_INTERVAL_MS);
+
+ OSEnableTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+#endif
+
+#if defined(PDUMP)
+ if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK))
+ {
+ if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) &&
+ !PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has NO cache snooping");
+ }
+ else
+ {
+ if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig))
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has CPU cache snooping");
+ }
+ if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has DEVICE cache snooping");
+ }
+ }
+ }
+#endif
+
+ psDevInfo->bDevInit2Done = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitHWPerfCountersKM(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sKccbCmd;
+
+ /* Fill in the command structure with the parameters needed
+ */
+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT;
+
+ eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sKccbCmd,
+ sizeof(sKccbCmd),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /* set up fw memory contexts */
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+
+ /* Register callbacks for creation of device memory contexts */
+ psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+ psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+ /* Create the memory context for the firmware. */
+ eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META,
+ &psDevInfo->psKernelDevmemCtx);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXInitCreateFWKernelMemoryContext: Failed DevmemCreateContext (%u)", eError));
+ goto failed_to_create_ctx;
+ }
+
+ eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx,
+ "Firmware", /* FIXME: We need to create an IDENT macro for this string.
+ Make sure the IDENT macro is not accessible to userland */
+ &psDevInfo->psFirmwareHeap);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXInitCreateFWKernelMemoryContext: Failed DevmemFindHeapByName (%u)", eError));
+ goto failed_to_find_heap;
+ }
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ eError = RGXVzInitCreateFWKernelMemoryContext(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXInitCreateFWKernelMemoryContext: Failed RGXVzInitCreateFWKernelMemoryContext (%u)",
+ eError));
+ goto failed_to_find_heap;
+ }
+#endif
+
+ return eError;
+
+failed_to_find_heap:
+ /*
+ * Clear the mem context create callbacks before destroying the RGX firmware
+ * context to avoid a spurious callback.
+ */
+ psDeviceNode->pfnRegisterMemoryContext = NULL;
+ psDeviceNode->pfnUnregisterMemoryContext = NULL;
+ DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+ psDevInfo->psKernelDevmemCtx = NULL;
+failed_to_create_ctx:
+ return eError;
+}
+
+static void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ RGXVzDeInitDestroyFWKernelMemoryContext(psDeviceNode);
+#endif
+
+ /*
+ * Clear the mem context create callbacks before destroying the RGX firmware
+ * context to avoid a spurious callback.
+ */
+ psDeviceNode->pfnRegisterMemoryContext = NULL;
+ psDeviceNode->pfnUnregisterMemoryContext = NULL;
+
+ if (psDevInfo->psKernelDevmemCtx)
+ {
+ eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+ /* FIXME - this should return void */
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+}
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(RGXFW_ALIGNCHECKS)
+static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32AlignChecksSize,
+ IMG_UINT32 aui32AlignChecks[])
+{
+ static IMG_UINT32 aui32AlignChecksKM[] = {RGXFW_ALIGN_CHECKS_INIT_KM};
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+ IMG_UINT32 i, *paui32FWAlignChecks;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: FW Alignment Check"
+ " Mem Descriptor is NULL"));
+ return PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc,
+ (void **) &paui32FWAlignChecks);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAlignmentCheckKM: Failed to acquire"
+ " kernel address for alignment checks (%u)", eError));
+ return eError;
+ }
+
+ paui32FWAlignChecks += IMG_ARR_NUM_ELEMS(aui32AlignChecksKM) + 1;
+ if (*paui32FWAlignChecks++ != ui32AlignChecksSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: Mismatch"
+ " in number of structures to check."));
+ eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+ goto return_;
+ }
+
+ for (i = 0; i < ui32AlignChecksSize; i++)
+ {
+ if (aui32AlignChecks[i] != paui32FWAlignChecks[i])
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: Check for"
+ " structured alignment failed."));
+ eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+ goto return_;
+ }
+ }
+
+return_:
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+
+ return eError;
+}
+#endif /* defined(SUPPORT_KERNEL_SRVINIT) */
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /* FW compatibility checks are ignored in guest drivers */
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT64 ui64ResetValue1,
+ IMG_UINT64 ui64ResetValue2)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(ui64ResetValue1);
+ PVR_UNREFERENCED_PARAMETER(ui64ResetValue2);
+ return PVRSRV_OK;
+}
+
+static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgReqestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVR_UNREFERENCED_PARAMETER(hDbgReqestHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32VerbLevel);
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitAllocFWImgMemKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiFWCodeLen,
+ IMG_DEVMEM_SIZE_T uiFWDataLen,
+ IMG_DEVMEM_SIZE_T uiFWCorememLen,
+ PMR **ppsFWCodePMR,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ PMR **ppsFWDataPMR,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ PMR **ppsFWCorememPMR,
+ IMG_DEV_VIRTADDR *psFWCorememDevVAddrBase,
+ RGXFWIF_DEV_VIRTADDR *psFWCorememMetaVAddrBase)
+{
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+
+ /* Guest driver do not perform actual on-chip FW loading/initialization */
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_UNREFERENCED_PARAMETER(uiMemAllocFlags);
+
+ eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXInitCreateFWKernelMemoryContext: Failed (%u)", eError));
+ goto failFWMemoryContextAlloc;
+ }
+
+failFWMemoryContextAlloc:
+ return eError;
+}
+#else
+static
+PVRSRV_ERROR RGXAllocateFWCodeRegion(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T ui32FWCodeAllocSize,
+ IMG_UINT32 uiMemAllocFlags,
+ IMG_BOOL bFWCorememCode,
+ const IMG_PCHAR pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift();
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ uiLog2Align = RGXMIPSFW_LOG2_PAGE_SIZE_64K;
+ }
+#endif
+
+#if !defined(SUPPORT_TRUSTED_DEVICE)
+ uiMemAllocFlags |= PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PVR_UNREFERENCED_PARAMETER(bFWCorememCode);
+
+ PDUMPCOMMENT("Allocate and export FW %s memory",
+ bFWCorememCode? "coremem code" : "code");
+
+ eError = DevmemFwAllocateExportable(psDeviceNode,
+ ui32FWCodeAllocSize,
+ 1 << uiLog2Align,
+ uiMemAllocFlags,
+ pszText,
+ ppsMemDescPtr);
+ return eError;
+#else
+ PDUMPCOMMENT("Import secure FW %s memory",
+ bFWCorememCode? "coremem code" : "code");
+
+ eError = DevmemImportTDFWCode(psDeviceNode,
+ ui32FWCodeAllocSize,
+ uiLog2Align,
+ uiMemAllocFlags,
+ bFWCorememCode,
+ ppsMemDescPtr);
+ return eError;
+#endif
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver
+
+ @Description
+
+ Validate the FW build options against KM driver build options (KM build options only)
+
+ Following check is redundant, because next check checks the same bits.
+ Redundancy occurs because if client-server are build-compatible and client-firmware are
+ build-compatible then server-firmware are build-compatible as well.
+
+ This check is left for clarity in error messages if any incompatibility occurs.
+
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_INIT *psRGXFWInit)
+{
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch;
+
+ if (psRGXFWInit == NULL)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+
+ ui32BuildOptionsFWKMPart = psRGXFWInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_KM;
+
+ if (ui32BuildOptions != ui32BuildOptionsFWKMPart)
+ {
+ ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart;
+#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
+ /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
+ ui32BuildOptionsMismatch &= ~OPTIONS_DEBUG_MASK;
+#endif
+ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; "
+ "extra options present in the KM driver: (0x%x). Please check rgx_options.h",
+ ui32BuildOptions & ui32BuildOptionsMismatch ));
+ return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+ }
+
+ if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; "
+ "extra options present in Firmware: (0x%x). Please check rgx_options.h",
+ ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch ));
+ return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+ }
+ PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ."));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]"));
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK version against driver DDK version
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+ IMG_UINT32 ui32DDKVersion;
+ PVRSRV_ERROR eError;
+
+ ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+#endif
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Compatibility check: KM driver and FW DDK version");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion),
+ ui32DDKVersion,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+#endif
+
+#if !defined(NO_HARDWARE)
+ if (psRGXFWInit == NULL)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ if (psRGXFWInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK revision (%u.%u).",
+ PVRVERSION_MAJ, PVRVERSION_MIN,
+ PVRVERSION_UNPACK_MAJ(psRGXFWInit->sRGXCompChecks.ui32DDKVersion),
+ PVRVERSION_UNPACK_MIN(psRGXFWInit->sRGXCompChecks.ui32DDKVersion)));
+ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+ PVR_DBG_BREAK;
+ return eError;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK revision (%u.%u) match. [ OK ]",
+ PVRVERSION_MAJ, PVRVERSION_MIN,
+ PVRVERSION_MAJ, PVRVERSION_MIN));
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK build against driver DDK build
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit)
+{
+ PVRSRV_ERROR eError=PVRSRV_OK;
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+ IMG_UINT32 ui32DDKBuild;
+
+ ui32DDKBuild = PVRVERSION_BUILD;
+#endif
+
+#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK)
+ PDUMPCOMMENT("Compatibility check: KM driver and FW DDK build");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild),
+ ui32DDKBuild,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+#endif
+
+#if !defined(NO_HARDWARE)
+ if (psRGXFWInit == NULL)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ if (psRGXFWInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild)
+ {
+ PVR_LOG(("(WARN) RGXDevInitCompatCheck: Incompatible driver DDK build version (%d) / Firmware DDK build version (%d).",
+ ui32DDKBuild, psRGXFWInit->sRGXCompChecks.ui32DDKBuild));
+#if defined(PVRSRV_STRICT_COMPAT_CHECK)
+ eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+ PVR_DBG_BREAK;
+ return eError;
+#endif
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]",
+ ui32DDKBuild, psRGXFWInit->sRGXCompChecks.ui32DDKBuild));
+ }
+#endif
+ return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXDevInitCompatCheck_BVNC_FWAgainstDriver
+
+ @Description
+
+ Validate FW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)
+ IMG_UINT32 i;
+#endif
+#if !defined(NO_HARDWARE)
+ IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV;
+#endif
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+ IMG_UINT32 ui32B, ui32V, ui32N, ui32C;
+ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+ PVRSRV_ERROR eError;
+ IMG_CHAR szV[8];
+
+ ui32B = psDevInfo->sDevFeatureCfg.ui32B;
+ ui32V = psDevInfo->sDevFeatureCfg.ui32V;
+ ui32N = psDevInfo->sDevFeatureCfg.ui32N;
+ ui32C = psDevInfo->sDevFeatureCfg.ui32C;
+
+ OSSNPrintf(szV, sizeof(szV),"%d",ui32V);
+
+ rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, ui32B, szV, ui32N, ui32C);
+#endif
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (struct version)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+ sBVNC.ui32LayoutVersion,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ }
+
+ PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (maxlen)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32VLenMax),
+ sBVNC.ui32VLenMax,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ }
+
+ PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BNC part - lower 32 bits)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC),
+ (IMG_UINT32)sBVNC.ui64BNC,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ }
+
+ PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BNC part - Higher 32 bits)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC) +
+ sizeof(IMG_UINT32),
+ (IMG_UINT32)(sBVNC.ui64BNC >> 32),
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ }
+
+ for (i = 0; i < sBVNC.ui32VLenMax; i += sizeof(IMG_UINT32))
+ {
+ PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (V part)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, aszV) +
+ i,
+ *((IMG_UINT32 *)(sBVNC.aszV + i)),
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ }
+ }
+#endif
+
+#if !defined(NO_HARDWARE)
+ if (psRGXFWInit == NULL)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ RGX_BVNC_EQUAL(sBVNC, psRGXFWInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+
+ if (!bCompatibleAll)
+ {
+ if (!bCompatibleVersion)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%d) and firmware (%d).",
+ __FUNCTION__,
+ sBVNC.ui32LayoutVersion,
+ psRGXFWInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleLenMax)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible V maxlen of driver (%d) and firmware (%d).",
+ __FUNCTION__,
+ sBVNC.ui32VLenMax,
+ psRGXFWInit->sRGXCompChecks.sFWBVNC.ui32VLenMax));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleBNC)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BNC (%d._.%d.%d) and Firmware BNC (%d._.%d.%d)",
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(psRGXFWInit->sRGXCompChecks.sFWBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleV)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%d.%s.%d.%d) and Firmware BVNC (%d.%s.%d.%d)",
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+ RGX_BVNC_PACKED_EXTR_V(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(psRGXFWInit->sRGXCompChecks.sFWBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]"));
+ }
+#endif
+ return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXDevInitCompatCheck_BVNC_HWAgainstDriver
+
+ @Description
+
+ Validate HW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+#if ((!defined(NO_HARDWARE))&&(!defined(EMULATOR)))
+#define TARGET_SILICON /* definition for everything that is not emu and not nohw configuration */
+#endif
+
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP) || defined(TARGET_SILICON)
+ IMG_UINT64 ui64MaskBNC = RGX_BVNC_PACK_MASK_B |
+ RGX_BVNC_PACK_MASK_N |
+ RGX_BVNC_PACK_MASK_C;
+
+ IMG_UINT32 bMaskV = IMG_FALSE;
+
+ PVRSRV_ERROR eError;
+ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC);
+#endif
+
+#if defined(TARGET_SILICON)
+ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC);
+ IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV;
+#endif
+
+#if defined(PDUMP) || defined(TARGET_SILICON)
+ IMG_UINT32 ui32B, ui32V, ui32N, ui32C;
+ IMG_CHAR szV[8];
+
+ /*if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_38835_BIT_MASK)
+ {
+ ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_B;
+ bMaskV = IMG_TRUE;
+ }*/
+#if defined(COMPAT_BVNC_MASK_N)
+ ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_N;
+#endif
+#if defined(COMPAT_BVNC_MASK_C)
+ ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_C;
+#endif
+ ui32B = psDevInfo->sDevFeatureCfg.ui32B;
+ ui32V = psDevInfo->sDevFeatureCfg.ui32V;
+ ui32N = psDevInfo->sDevFeatureCfg.ui32N;
+ ui32C = psDevInfo->sDevFeatureCfg.ui32C;
+
+ OSSNPrintf(szV, sizeof(szV),"%d",ui32V);
+ rgx_bvnc_packed(&sSWBVNC.ui64BNC, sSWBVNC.aszV, sSWBVNC.ui32VLenMax, ui32B, szV, ui32N, ui32C);
+
+
+ if((psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_38344_BIT_MASK) && (ui32C >= 10))
+ {
+ ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_C;
+ }
+
+ if ((ui64MaskBNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) || bMaskV)
+ {
+ PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.",
+ ((!(ui64MaskBNC & RGX_BVNC_PACK_MASK_B))?("B"):("")),
+ ((bMaskV)?("V"):("")),
+ ((!(ui64MaskBNC & RGX_BVNC_PACK_MASK_N))?("N"):("")),
+ ((!(ui64MaskBNC & RGX_BVNC_PACK_MASK_C))?("C"):(""))));
+ }
+#endif
+
+#if defined(EMULATOR)
+ PVR_LOG(("Compatibility checks for emu target: Ignoring HW BVNC checks."));
+#endif
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Compatibility check: Layout version of compchecks struct");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+ sSWBVNC.ui32LayoutVersion,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+
+ PDUMPCOMMENT("Compatibility check: HW V max len and FW V max len");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32VLenMax),
+ sSWBVNC.ui32VLenMax,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+
+ if (ui64MaskBNC != 0)
+ {
+ PDUMPIF("DISABLE_HWBNC_CHECK");
+ PDUMPELSE("DISABLE_HWBNC_CHECK");
+ PDUMPCOMMENT("Compatibility check: HW BNC and FW BNC (Lower 32 bits)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC),
+ (IMG_UINT32)sSWBVNC.ui64BNC ,
+ (IMG_UINT32)ui64MaskBNC,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+
+ PDUMPCOMMENT("Compatibility check: HW BNC and FW BNC (Higher 32 bits)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC) +
+ sizeof(IMG_UINT32),
+ (IMG_UINT32)(sSWBVNC.ui64BNC >> 32),
+ (IMG_UINT32)(ui64MaskBNC >> 32),
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+
+ PDUMPFI("DISABLE_HWBNC_CHECK");
+ }
+ if (!bMaskV)
+ {
+ IMG_UINT32 i;
+ PDUMPIF("DISABLE_HWV_CHECK");
+ PDUMPELSE("DISABLE_HWV_CHECK");
+ for (i = 0; i < sSWBVNC.ui32VLenMax; i += sizeof(IMG_UINT32))
+ {
+ PDUMPCOMMENT("Compatibility check: HW V and FW V");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, aszV) +
+ i,
+ *((IMG_UINT32 *)(sSWBVNC.aszV + i)),
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+ }
+ PDUMPFI("DISABLE_HWV_CHECK");
+ }
+#endif
+
+#if defined(TARGET_SILICON)
+ if (psRGXFWInit == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ sHWBVNC = psRGXFWInit->sRGXCompChecks.sHWBVNC;
+
+ sHWBVNC.ui64BNC &= ui64MaskBNC;
+ sSWBVNC.ui64BNC &= ui64MaskBNC;
+
+ if (bMaskV)
+ {
+ sHWBVNC.aszV[0] = '\0';
+ sSWBVNC.aszV[0] = '\0';
+ }
+
+ RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+
+ if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_42480_BIT_MASK)
+ {
+ if (!bCompatibleAll && bCompatibleVersion)
+ {
+ if ((RGX_BVNC_PACKED_EXTR_B(sSWBVNC) == 1) &&
+ !(OSStringCompare(RGX_BVNC_PACKED_EXTR_V(sSWBVNC),"76")) &&
+ (RGX_BVNC_PACKED_EXTR_N(sSWBVNC) == 4) &&
+ (RGX_BVNC_PACKED_EXTR_C(sSWBVNC) == 6))
+ {
+ if ((RGX_BVNC_PACKED_EXTR_B(sHWBVNC) == 1) &&
+ !(OSStringCompare(RGX_BVNC_PACKED_EXTR_V(sHWBVNC),"69")) &&
+ (RGX_BVNC_PACKED_EXTR_N(sHWBVNC) == 4) &&
+ (RGX_BVNC_PACKED_EXTR_C(sHWBVNC) == 4))
+ {
+ bCompatibleBNC = IMG_TRUE;
+ bCompatibleLenMax = IMG_TRUE;
+ bCompatibleV = IMG_TRUE;
+ bCompatibleAll = IMG_TRUE;
+ }
+ }
+ }
+ }
+
+ if (!bCompatibleAll)
+ {
+ if (!bCompatibleVersion)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).",
+ __FUNCTION__,
+ sHWBVNC.ui32LayoutVersion,
+ sSWBVNC.ui32LayoutVersion));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleLenMax)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible V maxlen of HW (%d) and FW (%d).",
+ __FUNCTION__,
+ sHWBVNC.ui32VLenMax,
+ sSWBVNC.ui32VLenMax));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleBNC)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BNC (%d._.%d.%d) and FW BNC (%d._.%d.%d).",
+ RGX_BVNC_PACKED_EXTR_B(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_B(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleV)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%s.%d.%d) and FW BVNC (%d.%s.%d.%d).",
+ RGX_BVNC_PACKED_EXTR_B(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_B(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%s.%d.%d) and FW BVNC (%d.%s.%d.%d) match. [ OK ]",
+ RGX_BVNC_PACKED_EXTR_B(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_B(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXDevInitCompatCheck_METACoreVersion_AgainstDriver
+
+ @Description
+
+ Validate HW META version against driver META version
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+ PVRSRV_ERROR eError;
+#endif
+
+ IMG_UINT32 ui32FWCoreIDValue = 0;
+ IMG_CHAR *pcRGXFW_PROCESSOR = NULL;
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ ui32FWCoreIDValue = RGXMIPSFW_CORE_ID_VALUE;
+ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS;
+ }else if (psDevInfo->sDevFeatureCfg.ui32META)
+ {
+ switch(psDevInfo->sDevFeatureCfg.ui32META)
+ {
+ case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break;
+ case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break;
+ case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break;
+ case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,"%s: Undefined FW_CORE_ID_VALUE", __func__));
+ PVR_ASSERT(0);
+ }
+ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+ }else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Undefined FW_CORE_ID_VALUE", __func__));
+ PVR_ASSERT(0);
+ }
+
+#if defined(PDUMP)
+ PDUMPIF("DISABLE_HWMETA_CHECK");
+ PDUMPELSE("DISABLE_HWMETA_CHECK");
+ PDUMPCOMMENT("Compatibility check: KM driver and HW FW Processor version");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion),
+ ui32FWCoreIDValue,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+ PDUMPFI("DISABLE_HWMETA_CHECK");
+#endif
+
+#if !defined(NO_HARDWARE)
+ if (psRGXFWInit == NULL)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ if (psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue)
+ {
+ PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).",
+ pcRGXFW_PROCESSOR,
+ ui32FWCoreIDValue,
+ pcRGXFW_PROCESSOR,
+ psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion));
+ eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH;
+ PVR_DBG_BREAK;
+ return eError;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].",
+ pcRGXFW_PROCESSOR,
+ ui32FWCoreIDValue,
+ pcRGXFW_PROCESSOR,
+ psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion));
+ }
+#endif
+ return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXDevInitCompatCheck
+
+ @Description
+
+ Check compatibility of host driver and firmware (DDK and build options)
+ for RGX devices at services/device initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_INIT *psRGXFWInit = NULL;
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 ui32RegValue;
+
+ /* Retrieve the FW information */
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+ (void **)&psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel fw compatibility check info (%u)",
+ __FUNCTION__, eError));
+ return eError;
+ }
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if(*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+ {
+ /* No need to wait if the FW has already updated the values */
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ ui32RegValue = 0;
+
+ if(psDevInfo->sDevFeatureCfg.ui32META)
+ {
+ eError = RGXReadMETAAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)",
+ __FUNCTION__, eError));
+ goto chk_exit;
+ }
+
+ if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT))
+ {
+ eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED;
+ PVR_DPF((PVR_DBG_ERROR,"%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)",
+ __FUNCTION__, psRGXFWInit->sRGXCompChecks.bUpdated, eError));
+ goto chk_exit;
+ }
+ }
+
+ if (!*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+ {
+ eError = PVRSRV_ERROR_TIMEOUT;
+ PVR_DPF((PVR_DBG_ERROR,"%s: Missing compatibility info from FW (%u)",
+ __FUNCTION__, eError));
+ goto chk_exit;
+ }
+#endif /* defined(NO_HARDWARE) */
+
+ eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+
+ eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+
+ eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+
+ eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+
+ eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+ eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+
+ eError = PVRSRV_OK;
+chk_exit:
+#if !defined(NO_HARDWARE)
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+#endif
+ return eError;
+}
+
+/**************************************************************************/ /*!
+@Function RGXSoftReset
+@Description Resets some modules of the RGX device
+@Input psDeviceNode Device node
+@Input ui64ResetValue1 A mask for which each bit set corresponds
+ to a module to reset (via the SOFT_RESET
+ register).
+@Input ui64ResetValue2 A mask for which each bit set corresponds
+ to a module to reset (via the SOFT_RESET2
+ register).
+@Return PVRSRV_ERROR
+*/ /***************************************************************************/
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT64 ui64ResetValue1,
+ IMG_UINT64 ui64ResetValue2)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ IMG_BOOL bSoftReset = IMG_FALSE;
+ IMG_UINT64 ui64SoftResetMask = 0;
+
+ PVR_ASSERT(psDeviceNode != NULL);
+ PVR_ASSERT(psDeviceNode->pvDevice != NULL);
+
+ /* the device info */
+ psDevInfo = psDeviceNode->pvDevice;
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PBE2_IN_XE_BIT_MASK)
+ {
+ ui64SoftResetMask = RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL;
+ }else
+ {
+ ui64SoftResetMask = RGX_CR_SOFT_RESET_MASKFULL;
+ }
+
+ if((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) && \
+ ((ui64ResetValue2 & RGX_CR_SOFT_RESET2_MASKFULL) != ui64ResetValue2))
+ {
+ bSoftReset = IMG_TRUE;
+ }
+
+ if (((ui64ResetValue1 & ui64SoftResetMask) != ui64ResetValue1) || bSoftReset)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Set in soft-reset */
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1);
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, ui64ResetValue2);
+ }
+
+
+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2);
+ }
+
+ /* Take the modules out of reset... */
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, 0);
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, 0);
+ }
+
+ /* ...and fence again */
+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2);
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RGXDebugRequestNotify
+
+ @Description Dump the debug data for RGX
+
+******************************************************************************/
+static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgReqestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = hDbgReqestHandle;
+
+ /* Only action the request if we've fully init'ed */
+ if (psDevInfo->bDevInit2Done)
+ {
+ RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel);
+ }
+}
+
+static const RGX_MIPS_ADDRESS_TRAMPOLINE sNullTrampoline =
+{
+#if defined(PDUMP)
+ .hPdumpPages = 0,
+#endif
+ .sPages = {{0}},
+ .sPhysAddr = {0}
+};
+
+static void RGXFreeTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ DevPhysMemFree(psDeviceNode,
+#if defined(PDUMP)
+ psDevInfo->sTrampoline.hPdumpPages,
+#endif
+ &psDevInfo->sTrampoline.sPages);
+ psDevInfo->sTrampoline = sNullTrampoline;
+}
+
+static PVRSRV_ERROR RGXAllocTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ IMG_INT32 i, j;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_MIPS_ADDRESS_TRAMPOLINE asTrampoline[RGXMIPSFW_TRAMPOLINE_NUMPAGES];
+
+ PDUMPCOMMENT("Allocate pages for trampoline");
+
+ /* Retry the allocation of the trampoline, retaining any allocations
+ * overlapping with the target range until we get an allocation that
+ * doesn't overlap with the target range. Any allocation like this
+ * will require a maximum of 3 tries.
+ * Free the unused allocations only after the desired range is obtained
+ * to prevent the alloc function from returning the same bad range
+ * repeatedly.
+ */
+ #define RANGES_OVERLAP(x,y,size) (x < (y+size) && y < (x+size))
+ for (i = 0; i < 3; i++)
+ {
+ eError = DevPhysMemAlloc(psDeviceNode,
+ RGXMIPSFW_TRAMPOLINE_SIZE,
+ 0, // (init) u8Value
+ IMG_FALSE, // bInitPage,
+#if defined(PDUMP)
+ psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+ "TrampolineRegion",
+ &asTrampoline[i].hPdumpPages,
+#endif
+ &asTrampoline[i].sPages,
+ &asTrampoline[i].sPhysAddr);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s failed (%u)",
+ __func__, eError));
+ goto fail;
+ }
+
+ if (!RANGES_OVERLAP(asTrampoline[i].sPhysAddr.uiAddr,
+ RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR,
+ RGXMIPSFW_TRAMPOLINE_SIZE))
+ {
+ break;
+ }
+ }
+ if (RGXMIPSFW_TRAMPOLINE_NUMPAGES == i)
+ {
+ eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s failed to allocate non-overlapping pages (%u)",
+ __func__, eError));
+ goto fail;
+ }
+ #undef RANGES_OVERLAP
+
+ psDevInfo->sTrampoline = asTrampoline[i];
+
+fail:
+ /* free all unused allocations */
+ for (j = 0; j < i; j++)
+ {
+ DevPhysMemFree(psDeviceNode,
+#if defined(PDUMP)
+ asTrampoline[j].hPdumpPages,
+#endif
+ &asTrampoline[j].sPages);
+ }
+
+ return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitAllocFWImgMemKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiFWCodeLen,
+ IMG_DEVMEM_SIZE_T uiFWDataLen,
+ IMG_DEVMEM_SIZE_T uiFWCorememLen,
+ PMR **ppsFWCodePMR,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ PMR **ppsFWDataPMR,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ PMR **ppsFWCorememPMR,
+ IMG_DEV_VIRTADDR *psFWCorememDevVAddrBase,
+ RGXFWIF_DEV_VIRTADDR *psFWCorememMetaVAddrBase)
+{
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitAllocFWImgMemKM: Failed RGXInitCreateFWKernelMemoryContext (%u)", eError));
+ goto failFWMemoryContextAlloc;
+ }
+
+ /*
+ * Set up Allocation for FW code section
+ */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+
+ eError = RGXAllocateFWCodeRegion(psDeviceNode,
+ uiFWCodeLen,
+ uiMemAllocFlags,
+ IMG_FALSE,
+ "FwExCodeRegion",
+ &psDevInfo->psRGXFWCodeMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw code mem (%u)",
+ eError));
+ goto failFWCodeMemDescAlloc;
+ }
+
+ eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWCodeMemDesc, (void**) ppsFWCodePMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+ goto failFWCodeMemDescAqDevVirt;
+ }
+
+ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc,
+ &psDevInfo->sFWCodeDevVAddrBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw code mem (%u)",
+ eError));
+ goto failFWCodeMemDescAqDevVirt;
+ }
+ *psFWCodeDevVAddrBase = psDevInfo->sFWCodeDevVAddrBase;
+
+ if (0 == (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK))
+ {
+ /*
+ * The FW code must be the first allocation in the firmware heap, otherwise
+ * the bootloader will not work (META will not be able to find the bootloader).
+ */
+ PVR_ASSERT(psFWCodeDevVAddrBase->uiAddr == RGX_FIRMWARE_HEAP_BASE);
+ }
+
+ /*
+ * Set up Allocation for FW data section
+ */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate and export data memory for fw");
+
+ eError = DevmemFwAllocateExportable(psDeviceNode,
+ uiFWDataLen,
+ OSGetPageSize(),
+ uiMemAllocFlags,
+ "FwExDataRegion",
+ &psDevInfo->psRGXFWDataMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw data mem (%u)",
+ eError));
+ goto failFWDataMemDescAlloc;
+ }
+
+ eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWDataMemDesc, (void **) ppsFWDataPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+ goto failFWDataMemDescAqDevVirt;
+ }
+
+ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+ &psDevInfo->sFWDataDevVAddrBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw data mem (%u)",
+ eError));
+ goto failFWDataMemDescAqDevVirt;
+ }
+ *psFWDataDevVAddrBase = psDevInfo->sFWDataDevVAddrBase;
+
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ eError = RGXAllocTrampoline(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Failed to allocate trampoline region (%u)",
+ eError));
+ goto failTrampolineMemDescAlloc;
+ }
+ }
+
+ if (uiFWCorememLen != 0)
+ {
+ /*
+ * Set up Allocation for FW coremem section
+ */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ eError = RGXAllocateFWCodeRegion(psDeviceNode,
+ uiFWCorememLen,
+ uiMemAllocFlags,
+ IMG_TRUE,
+ "FwExCorememRegion",
+ &psDevInfo->psRGXFWCorememMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw coremem mem, size: %lld, flags: %x (%u)",
+ uiFWCorememLen, uiMemAllocFlags, eError));
+ goto failFWCorememMemDescAlloc;
+ }
+
+ eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWCorememMemDesc, (void**) ppsFWCorememPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+ goto failFWCorememMemDescAqDevVirt;
+ }
+
+ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememMemDesc,
+ &psDevInfo->sFWCorememCodeDevVAddrBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw coremem mem (%u)",
+ eError));
+ goto failFWCorememMemDescAqDevVirt;
+ }
+
+ RGXSetFirmwareAddress(&psDevInfo->sFWCorememCodeFWAddr,
+ psDevInfo->psRGXFWCorememMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+ }
+ else
+ {
+ psDevInfo->sFWCorememCodeDevVAddrBase.uiAddr = 0;
+ psDevInfo->sFWCorememCodeFWAddr.ui32Addr = 0;
+ }
+
+ *psFWCorememDevVAddrBase = psDevInfo->sFWCorememCodeDevVAddrBase;
+ *psFWCorememMetaVAddrBase = psDevInfo->sFWCorememCodeFWAddr;
+
+ return PVRSRV_OK;
+
+failFWCorememMemDescAqDevVirt:
+ if (uiFWCorememLen != 0)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCorememMemDesc);
+ psDevInfo->psRGXFWCorememMemDesc = NULL;
+ }
+failFWCorememMemDescAlloc:
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ RGXFreeTrampoline(psDeviceNode);
+ }
+failTrampolineMemDescAlloc:
+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+failFWDataMemDescAqDevVirt:
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+ psDevInfo->psRGXFWDataMemDesc = NULL;
+failFWDataMemDescAlloc:
+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+failFWCodeMemDescAqDevVirt:
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+ psDevInfo->psRGXFWCodeMemDesc = NULL;
+failFWCodeMemDescAlloc:
+failFWMemoryContextAlloc:
+ return eError;
+}
+#endif /* defined(PVRSRV_GPUVIRT_GUESTDRV) */
+
+/*
+ AppHint parameter interface
+*/
+static
+PVRSRV_ERROR RGXFWTraceQueryFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVRSRV_ERROR eResult;
+
+ eResult = PVRSRVRGXDebugMiscQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+ *pui32Value &= RGXFWIF_LOG_TYPE_GROUP_MASK;
+ return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceQueryLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVRSRV_ERROR eResult;
+
+ eResult = PVRSRVRGXDebugMiscQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+ if (PVRSRV_OK == eResult)
+ {
+ if (*pui32Value & RGXFWIF_LOG_TYPE_TRACE)
+ {
+ *pui32Value = 2; /* Trace */
+ }
+ else if (*pui32Value & RGXFWIF_LOG_TYPE_GROUP_MASK)
+ {
+ *pui32Value = 1; /* TBI */
+ }
+ else
+ {
+ *pui32Value = 0; /* None */
+ }
+ }
+ return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eResult;
+ IMG_UINT32 ui32RGXFWLogType;
+
+ eResult = RGXFWTraceQueryLogType(psDeviceNode, NULL, &ui32RGXFWLogType);
+ if (PVRSRV_OK == eResult)
+ {
+ if (ui32Value && 1 != ui32RGXFWLogType)
+ {
+ ui32Value |= RGXFWIF_LOG_TYPE_TRACE;
+ }
+ eResult = PVRSRVRGXDebugMiscSetFWLogKM(NULL, psDeviceNode, ui32Value);
+ }
+ return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eResult;
+ IMG_UINT32 ui32RGXFWLogType = ui32Value;
+
+ /* 0 - none, 1 - tbi, 2 - trace */
+ if (ui32Value)
+ {
+ eResult = RGXFWTraceQueryFilter(psDeviceNode, NULL, &ui32RGXFWLogType);
+ if (PVRSRV_OK != eResult)
+ {
+ return eResult;
+ }
+ if (!ui32RGXFWLogType)
+ {
+ ui32RGXFWLogType = RGXFWIF_LOG_TYPE_GROUP_MAIN;
+ }
+ if (2 == ui32Value)
+ {
+ ui32RGXFWLogType |= RGXFWIF_LOG_TYPE_TRACE;
+ }
+ }
+
+ eResult = PVRSRVRGXDebugMiscSetFWLogKM(NULL, psDeviceNode, ui32RGXFWLogType);
+ return eResult;
+}
+
+static
+PVRSRV_ERROR RGXQueryFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_BOOL *pbValue)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+ *pbValue = psDevInfo->bEnableFWPoisonOnFree;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_BOOL bValue)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+ psDevInfo->bEnableFWPoisonOnFree = bValue;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXQueryFWPoisonOnFreeValue(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ *pui32Value = psDevInfo->ubFWPoisonOnFreeValue;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXSetFWPoisonOnFreeValue(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ psDevInfo->ubFWPoisonOnFreeValue = (IMG_BYTE) ui32Value;
+ return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXInitFirmwareKM
+ */
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXInitFirmwareKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32HWPerfFWBufSizeKB,
+ IMG_UINT64 ui64HWPerfFilter,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32ConfigFlags,
+ IMG_UINT32 ui32LogType,
+ IMG_UINT32 ui32FilterFlags,
+ IMG_UINT32 ui32JonesDisableMask,
+ IMG_UINT32 ui32HWRDebugDumpLimit,
+ RGXFWIF_COMPCHECKS_BVNC *psClientBVNC,
+ IMG_UINT32 ui32HWPerfCountersDataSize,
+ PMR **ppsHWPerfPMR,
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+ FW_PERF_CONF eFirmwarePerf)
+{
+ PVRSRV_ERROR eError;
+ void *pvAppHintState = NULL;
+ IMG_UINT32 ui32AppHintDefault;
+ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+ IMG_BOOL bCompatibleAll=IMG_TRUE, bCompatibleVersion=IMG_TRUE, bCompatibleLenMax=IMG_TRUE, bCompatibleBNC=IMG_TRUE, bCompatibleV=IMG_TRUE;
+ IMG_UINT32 ui32NumBIFTilingConfigs, *pui32BIFTilingXStrides, i, ui32B, ui32V, ui32N, ui32C;
+ RGXFWIF_BIFTILINGMODE eBIFTilingMode;
+ IMG_CHAR szV[8];
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ ui32B = psDevInfo->sDevFeatureCfg.ui32B;
+ ui32V = psDevInfo->sDevFeatureCfg.ui32V;
+ ui32N = psDevInfo->sDevFeatureCfg.ui32N;
+ ui32C = psDevInfo->sDevFeatureCfg.ui32C;
+
+ OSSNPrintf(szV, sizeof(szV),"%d",ui32V);
+
+ /* Check if BVNC numbers of client and driver are compatible */
+ rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, ui32B, szV, ui32N, ui32C);
+
+ RGX_BVNC_EQUAL(sBVNC, *psClientBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+
+ if (!bCompatibleAll)
+ {
+ if (!bCompatibleVersion)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%d) and client (%d).",
+ __FUNCTION__,
+ sBVNC.ui32LayoutVersion,
+ psClientBVNC->ui32LayoutVersion));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleLenMax)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible V maxlen of driver (%d) and client (%d).",
+ __FUNCTION__,
+ sBVNC.ui32VLenMax,
+ psClientBVNC->ui32VLenMax));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleBNC)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible driver BNC (%d._.%d.%d) / client BNC (%d._.%d.%d).",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleV)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible driver BVNC (%d.%s.%d.%d) / client BVNC (%d.%s.%d.%d).",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_V(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver BVNC (%d.%s.%d.%d) and client BVNC (%d.%s.%d.%d) match. [ OK ]",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_V(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+ }
+
+ PVRSRVSystemBIFTilingGetConfig(psDeviceNode->psDevConfig,
+ &eBIFTilingMode,
+ &ui32NumBIFTilingConfigs);
+ pui32BIFTilingXStrides = OSAllocMem(sizeof(IMG_UINT32) * ui32NumBIFTilingConfigs);
+ if(pui32BIFTilingXStrides == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitFirmwareKM: OSAllocMem failed (%u)", eError));
+ goto failed_BIF_tiling_alloc;
+ }
+ for(i = 0; i < ui32NumBIFTilingConfigs; i++)
+ {
+ eError = PVRSRVSystemBIFTilingHeapGetXStride(psDeviceNode->psDevConfig,
+ i+1,
+ &pui32BIFTilingXStrides[i]);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get BIF tiling X stride for heap %u (%u)",
+ __func__, i + 1, eError));
+ goto failed_BIF_heap_init;
+ }
+ }
+
+ eError = RGXSetupFirmware(psDeviceNode,
+ bEnableSignatureChecks,
+ ui32SignatureChecksBufSize,
+ ui32HWPerfFWBufSizeKB,
+ ui64HWPerfFilter,
+ ui32RGXFWAlignChecksArrLength,
+ pui32RGXFWAlignChecks,
+ ui32ConfigFlags,
+ ui32LogType,
+ eBIFTilingMode,
+ ui32NumBIFTilingConfigs,
+ pui32BIFTilingXStrides,
+ ui32FilterFlags,
+ ui32JonesDisableMask,
+ ui32HWRDebugDumpLimit,
+ ui32HWPerfCountersDataSize,
+ ppsHWPerfPMR,
+ psRGXFwInit,
+ eRGXRDPowerIslandingConf,
+ eFirmwarePerf);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)", eError));
+ goto failed_init_firmware;
+ }
+
+ OSFreeMem(pui32BIFTilingXStrides);
+
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup,
+ RGXFWTraceQueryFilter,
+ RGXFWTraceSetFilter,
+ psDeviceNode,
+ NULL);
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FirmwareLogType,
+ RGXFWTraceQueryLogType,
+ RGXFWTraceSetLogType,
+ psDeviceNode,
+ NULL);
+
+ /* FW Poison values are not passed through from the init code
+ * so grab them here */
+ OSCreateKMAppHintState(&pvAppHintState);
+
+ ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE;
+ OSGetKMAppHintBOOL(pvAppHintState,
+ EnableFWPoisonOnFree,
+ &ui32AppHintDefault,
+ &psDevInfo->bEnableFWPoisonOnFree);
+
+ ui32AppHintDefault = PVRSRV_APPHINT_FWPOISONONFREEVALUE;
+ OSGetKMAppHintUINT32(pvAppHintState,
+ FWPoisonOnFreeValue,
+ &ui32AppHintDefault,
+ (IMG_UINT32*)&psDevInfo->ubFWPoisonOnFreeValue);
+
+ OSFreeKMAppHintState(pvAppHintState);
+
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree,
+ RGXQueryFWPoisonOnFree,
+ RGXSetFWPoisonOnFree,
+ psDeviceNode,
+ NULL);
+
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FWPoisonOnFreeValue,
+ RGXQueryFWPoisonOnFreeValue,
+ RGXSetFWPoisonOnFreeValue,
+ psDeviceNode,
+ NULL);
+
+ return PVRSRV_OK;
+
+failed_init_firmware:
+failed_BIF_heap_init:
+ OSFreeMem(pui32BIFTilingXStrides);
+failed_BIF_tiling_alloc:
+failed_to_pass_compatibility_check:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ * PVRSRVRGXInitFirmwareExtendedKM
+ */
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXInitFirmwareExtendedKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+ PMR **ppsHWPerfPMR,
+ RGX_FW_INIT_IN_PARAMS *psInParams)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+ IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV;
+ RGXFWIF_COMPCHECKS_BVNC *psFirmwareBVNC = &(psInParams->sFirmwareBVNC);
+
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_CHAR szV[8];
+
+ OSSNPrintf(szV, sizeof(szV),"%d",psDevInfo->sDevFeatureCfg.ui32V);
+ rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, psDevInfo->sDevFeatureCfg.ui32B, szV, psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C);
+
+ /* Check if BVNC numbers of firmware and driver are compatible */
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ RGX_BVNC_EQUAL(sBVNC, *psFirmwareBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+#else
+ bCompatibleAll = IMG_TRUE;
+#endif
+ if (!bCompatibleAll)
+ {
+ if (!bCompatibleVersion)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%d) and firmware (%d).",
+ __FUNCTION__,
+ sBVNC.ui32LayoutVersion,
+ psFirmwareBVNC->ui32LayoutVersion));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleLenMax)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible V maxlen of driver (%d) and firmware (%d).",
+ __FUNCTION__,
+ sBVNC.ui32VLenMax,
+ psFirmwareBVNC->ui32VLenMax));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleBNC)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible driver BNC (%d._.%d.%d) / firmware BNC (%d._.%d.%d).",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psFirmwareBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleV)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible driver BVNC (%d.%s.%d.%d) / firmware BVNC (%d.%s.%d.%d).",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_V(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psFirmwareBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver BVNC (%d.%s.%d.%d) and firmware BVNC (%d.%s.%d.%d) match. [ OK ]",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_V(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psFirmwareBVNC)));
+ }
+
+ eError = PVRSRVRGXInitFirmwareKM(psConnection,
+ psDeviceNode,
+ psRGXFwInit,
+ psInParams->bEnableSignatureChecks,
+ psInParams->ui32SignatureChecksBufSize,
+ psInParams->ui32HWPerfFWBufSizeKB,
+ psInParams->ui64HWPerfFilter,
+ ui32RGXFWAlignChecksArrLength,
+ pui32RGXFWAlignChecks,
+ psInParams->ui32ConfigFlags,
+ psInParams->ui32LogType,
+ psInParams->ui32FilterFlags,
+ psInParams->ui32JonesDisableMask,
+ psInParams->ui32HWRDebugDumpLimit,
+ &(psInParams->sClientBVNC),
+ psInParams->ui32HWPerfCountersDataSize,
+ ppsHWPerfPMR,
+ psInParams->eRGXRDPowerIslandingConf,
+ psInParams->eFirmwarePerf);
+ return eError;
+
+failed_to_pass_compatibility_check:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+/* See device.h for function declaration */
+static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC **psMemDesc,
+ IMG_UINT32 *puiSyncPrimVAddr,
+ IMG_UINT32 *puiSyncPrimBlockSize)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError;
+ RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+ IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32);
+ IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32);
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ /* Size and align are 'expanded' because we request an Exportalign allocation */
+ DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareHeap),
+ &uiUFOBlockSize,
+ &ui32UFOBlockAlign);
+
+ eError = DevmemFwAllocateExportable(psDeviceNode,
+ uiUFOBlockSize,
+ ui32UFOBlockAlign,
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_CACHE_COHERENT |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+ "FwExUFOBlock",
+ psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+ *puiSyncPrimVAddr = pFirmwareAddr.ui32Addr;
+ *puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize);
+
+ return PVRSRV_OK;
+
+e0:
+ return eError;
+}
+
+/* See device.h for function declaration */
+static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psMemDesc)
+{
+ /*
+ If the system has snooping of the device cache then the UFO block
+ might be in the cache so we need to flush it out before freeing
+ the memory
+
+ When the device is being shutdown/destroyed we don't care anymore.
+ Several necessary data structures to issue a flush were destroyed
+ already.
+ */
+ if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) &&
+ psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT)
+ {
+ RGXFWIF_KCCB_CMD sFlushInvalCmd;
+ PVRSRV_ERROR eError;
+
+ /* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+ sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+ eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sFlushInvalCmd,
+ sizeof(sFlushInvalCmd),
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFreeUFOBlock: Failed to schedule SLC flush command with error (%u)", eError));
+ }
+ else
+ {
+ /* Wait for the SLC flush to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFreeUFOBlock: SLC flush and invalidate aborted with error (%u)", eError));
+ }
+ }
+ }
+
+ RGXUnsetFirmwareAddress(psMemDesc);
+ DevmemFwFree(psDeviceNode->pvDevice, psMemDesc);
+}
+
+/*
+ DevDeInitRGX
+*/
+PVRSRV_ERROR DevDeInitRGX (PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ IMG_UINT32 ui32Temp=0;
+ if (!psDevInfo)
+ {
+ /* Can happen if DevInitRGX failed */
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Null DevInfo"));
+ return PVRSRV_OK;
+ }
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0);
+ PVR_UNREFERENCED_PARAMETER(ui32Temp);
+ }
+ else
+#else
+ {
+ /*Delete the Dummy page related info */
+ ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter);
+ if(0 != ui32Temp)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Dummy page reference counter is non zero", __func__));
+ PVR_ASSERT(0);
+ }
+ }
+#endif
+#if defined(PDUMP)
+ if(NULL != psDeviceNode->sDummyPage.hPdumpDummyPg)
+ {
+ PDUMPCOMMENT("Error dummy page handle is still active");
+ }
+#endif
+
+#if defined(SUPPORT_PDVFS) && !defined(RGXFW_META_SUPPORT_2ND_THREAD)
+ OSDisableTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+ OSRemoveTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+#endif
+
+ /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */
+ OSLockDestroy(psDeviceNode->sDummyPage.psDummyPgLock);
+
+ /* Unregister debug request notifiers first as they could depend on anything. */
+ if (psDevInfo->hDbgReqNotify)
+ {
+ PVRSRVUnregisterDbgRequestNotify(psDevInfo->hDbgReqNotify);
+ }
+
+ /* Cancel notifications to this device */
+ PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify);
+ psDeviceNode->hCmdCompNotify = NULL;
+
+ /*
+ * De-initialise in reverse order, so stage 2 init is undone first.
+ */
+ if (psDevInfo->bDevInit2Done)
+ {
+ psDevInfo->bDevInit2Done = IMG_FALSE;
+
+#if !defined(NO_HARDWARE)
+ (void) SysUninstallDeviceLISR(psDevInfo->pvLISRData);
+ (void) OSUninstallMISR(psDevInfo->pvMISRData);
+ (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+ if (psDevInfo->pvAPMISRData != NULL)
+ {
+ (void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+ }
+#endif /* !NO_HARDWARE */
+
+ /* Remove the device from the power manager */
+ eError = PVRSRVRemovePowerDevice(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ OSLockDestroy(psDevInfo->hGPUUtilLock);
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* Free DVFS Table */
+ if (psDevInfo->psGpuDVFSTable != NULL)
+ {
+ OSFreeMem(psDevInfo->psGpuDVFSTable);
+ psDevInfo->psGpuDVFSTable = NULL;
+ }
+#endif
+
+ /* De-init Freelists/ZBuffers... */
+ OSLockDestroy(psDevInfo->hLockFreeList);
+ OSLockDestroy(psDevInfo->hLockZSBuffer);
+
+ /* Unregister MMU related stuff */
+ eError = RGXMMUInit_Unregister(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)", eError));
+ return eError;
+ }
+
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ /* Unregister MMU related stuff */
+ eError = RGXMipsMMUInit_Unregister(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed RGXMipsMMUInit_Unregister (0x%x)", eError));
+ return eError;
+ }
+ }
+ }
+
+ /* UnMap Regs */
+ if (psDevInfo->pvRegsBaseKM != NULL)
+ {
+#if !defined(NO_HARDWARE)
+ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
+ psDevInfo->ui32RegSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+#endif /* !NO_HARDWARE */
+ psDevInfo->pvRegsBaseKM = NULL;
+ }
+
+#if 0 /* not required at this time */
+ if (psDevInfo->hTimer)
+ {
+ eError = OSRemoveTimer(psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed to remove timer"));
+ return eError;
+ }
+ psDevInfo->hTimer = NULL;
+ }
+#endif
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+
+ RGXDeInitHeaps(psDevMemoryInfo);
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ if (psDevInfo->psRGXFWCodeMemDesc)
+ {
+ /* Free fw code */
+ PDUMPCOMMENT("Freeing FW code memory");
+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+ psDevInfo->psRGXFWCodeMemDesc = NULL;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,"No firmware code memory to free"));
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ if (psDevInfo->sTrampoline.sPages.u.pvHandle)
+ {
+ /* Free trampoline region */
+ PDUMPCOMMENT("Freeing trampoline memory");
+ RGXFreeTrampoline(psDeviceNode);
+ }
+ }
+
+ if (psDevInfo->psRGXFWDataMemDesc)
+ {
+ /* Free fw data */
+ PDUMPCOMMENT("Freeing FW data memory");
+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+ psDevInfo->psRGXFWDataMemDesc = NULL;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,"No firmware data memory to free"));
+ }
+
+ if (psDevInfo->psRGXFWCorememMemDesc)
+ {
+ /* Free fw data */
+ PDUMPCOMMENT("Freeing FW coremem memory");
+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememMemDesc);
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCorememMemDesc);
+ psDevInfo->psRGXFWCorememMemDesc = NULL;
+ }
+#endif
+
+ /*
+ Free the firmware allocations.
+ */
+ RGXFreeFirmware(psDevInfo);
+ RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode);
+
+ /* De-initialise non-device specific (TL) users of RGX device memory */
+ RGXHWPerfHostDeInit();
+ eError = HTBDeInit();
+ PVR_LOG_IF_ERROR(eError, "HTBDeInit");
+
+ /* destroy the context list locks */
+ OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+ OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+ OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+ OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+ OSWRLockDestroy(psDevInfo->hRaytraceCtxListLock);
+ OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+ OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+
+
+ if ((psDevInfo->hNMILock != NULL) && (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK))
+ {
+ OSLockDestroy(psDevInfo->hNMILock);
+ }
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if (psDevInfo->hDebugFaultInfoLock != NULL)
+ {
+ OSLockDestroy(psDevInfo->hDebugFaultInfoLock);
+ }
+ if (psDevInfo->hMMUCtxUnregLock != NULL)
+ {
+ OSLockDestroy(psDevInfo->hMMUCtxUnregLock);
+ }
+#endif
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* Free the init scripts. */
+ OSFreeMem(psDevInfo->psScripts);
+#endif
+
+ /* Free device BVNC string */
+ if(NULL != psDevInfo->sDevFeatureCfg.pszBVNCString)
+ {
+ OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString);
+ }
+
+ /* DeAllocate devinfo */
+ OSFreeMem(psDevInfo);
+
+ psDeviceNode->pvDevice = NULL;
+
+ return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static
+PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+
+ psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+static INLINE DEVMEM_HEAP_BLUEPRINT _blueprint_init(IMG_CHAR *name,
+ IMG_UINT64 heap_base,
+ IMG_DEVMEM_SIZE_T heap_length,
+ IMG_UINT32 log2_import_alignment,
+ IMG_UINT32 tiling_mode)
+{
+ DEVMEM_HEAP_BLUEPRINT b = {
+ .pszName = name,
+ .sHeapBaseAddr.uiAddr = heap_base,
+ .uiHeapLength = heap_length,
+ .uiLog2DataPageSize = RGXHeapDerivePageSize(OSGetPageShift()),
+ .uiLog2ImportAlignment = log2_import_alignment,
+ .uiLog2TilingStrideFactor = (RGX_BIF_TILING_HEAP_LOG2_ALIGN_TO_STRIDE_BASE - tiling_mode)
+ };
+ void *pvAppHintState = NULL;
+ IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE;
+ IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+
+ if (!OSStringCompare(name, RGX_GENERAL_NON4K_HEAP_IDENT))
+ {
+ OSCreateKMAppHintState(&pvAppHintState);
+ OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize,
+ &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize);
+ switch (ui32GeneralNon4KHeapPageSize)
+ {
+ case (1<<RGX_HEAP_4KB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_4KB_PAGE_SHIFT;
+ break;
+ case (1<<RGX_HEAP_16KB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+ break;
+ case (1<<RGX_HEAP_64KB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_64KB_PAGE_SHIFT;
+ break;
+ case (1<<RGX_HEAP_256KB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_256KB_PAGE_SHIFT;
+ break;
+ case (1<<RGX_HEAP_1MB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_1MB_PAGE_SHIFT;
+ break;
+ case (1<<RGX_HEAP_2MB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_2MB_PAGE_SHIFT;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,"Invalid AppHint GeneralAltHeapPageSize [%d] value, using 4KB",
+ ui32AppHintDefault));
+ break;
+ }
+ OSFreeKMAppHintState(pvAppHintState);
+ }
+
+ return b;
+}
+
+#define INIT_HEAP(NAME) \
+do { \
+ *psDeviceMemoryHeapCursor = _blueprint_init( \
+ RGX_ ## NAME ## _HEAP_IDENT, \
+ RGX_ ## NAME ## _HEAP_BASE, \
+ RGX_ ## NAME ## _HEAP_SIZE, \
+ 0, 0); \
+ psDeviceMemoryHeapCursor++; \
+} while (0)
+
+#define INIT_HEAP_NAME(STR, NAME) \
+do { \
+ *psDeviceMemoryHeapCursor = _blueprint_init( \
+ STR, \
+ RGX_ ## NAME ## _HEAP_BASE, \
+ RGX_ ## NAME ## _HEAP_SIZE, \
+ 0, 0); \
+ psDeviceMemoryHeapCursor++; \
+} while (0)
+
+#define INIT_TILING_HEAP(D, N, M) \
+do { \
+ IMG_UINT32 xstride; \
+ PVRSRVSystemBIFTilingHeapGetXStride((D)->psDeviceNode->psDevConfig, N, &xstride); \
+ *psDeviceMemoryHeapCursor = _blueprint_init( \
+ RGX_BIF_TILING_HEAP_ ## N ## _IDENT, \
+ RGX_BIF_TILING_HEAP_ ## N ## _BASE, \
+ RGX_BIF_TILING_HEAP_SIZE, \
+ RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE(xstride), \
+ M); \
+ psDeviceMemoryHeapCursor++; \
+} while (0)
+
+static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DEVICE_MEMORY_INFO *psNewMemoryInfo,
+ IMG_UINT32 *pui32DummyPgSize)
+{
+ IMG_UINT64 ui64ErnsBrns;
+ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor;
+ IMG_UINT32 uiTilingMode;
+ IMG_UINT32 uiNumHeaps;
+
+ ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+
+ /* FIXME - consider whether this ought not to be on the device node itself */
+ psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * RGX_MAX_HEAP_ID);
+ if(psNewMemoryInfo->psDeviceMemoryHeap == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_BLUEPRINT"));
+ goto e0;
+ }
+
+ PVRSRVSystemBIFTilingGetConfig(psDevInfo->psDeviceNode->psDevConfig, &uiTilingMode, &uiNumHeaps);
+
+ /* Calculate the dummy page size which is the maximum page size supported
+ * by heaps which can have sparse allocations
+ *
+ * The heaps that can have sparse allocations are general and Doppler for now.
+ * As it was suggested the doppler allocations doesn't have to be backed by dummy
+ * and taking into account its 2MB page size supported in future, we take
+ * general heap page size as reference for now */
+ *pui32DummyPgSize = RGXHeapDerivePageSize(OSGetPageShift());
+
+ /* Initialise the heaps */
+ psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap;
+
+ INIT_HEAP(GENERAL_SVM);
+ INIT_HEAP(GENERAL);
+
+ if (ui64ErnsBrns & FIX_HW_BRN_63142_BIT_MASK)
+ {
+ /* BRN63142 heap must be at the top of an aligned 16GB range. */
+ INIT_HEAP(RGNHDR_BRN_63142);
+ PVR_ASSERT((RGX_RGNHDR_BRN_63142_HEAP_BASE & IMG_UINT64_C(0x3FFFFFFFF)) +
+ RGX_RGNHDR_BRN_63142_HEAP_SIZE == IMG_UINT64_C(0x400000000));
+ }
+
+ INIT_HEAP(GENERAL_NON4K);
+ INIT_HEAP(VISTEST);
+
+ if (ui64ErnsBrns & FIX_HW_BRN_52402_BIT_MASK)
+ {
+ INIT_HEAP_NAME("PDS Code and Data", PDSCODEDATA_BRN_52402);
+ INIT_HEAP_NAME("USC Code", USCCODE_BRN_52402);
+ }
+ else
+ {
+ INIT_HEAP(PDSCODEDATA);
+ INIT_HEAP(USCCODE);
+ }
+
+ if (ui64ErnsBrns & (FIX_HW_BRN_52402_BIT_MASK | FIX_HW_BRN_55091_BIT_MASK))
+ {
+ INIT_HEAP_NAME("TQ3DParameters", TQ3DPARAMETERS_BRN_52402_55091);
+ }
+ else
+ {
+ INIT_HEAP(TQ3DPARAMETERS);
+ }
+
+ INIT_TILING_HEAP(psDevInfo, 1, uiTilingMode);
+ INIT_TILING_HEAP(psDevInfo, 2, uiTilingMode);
+ INIT_TILING_HEAP(psDevInfo, 3, uiTilingMode);
+ INIT_TILING_HEAP(psDevInfo, 4, uiTilingMode);
+ INIT_HEAP(DOPPLER);
+ INIT_HEAP(DOPPLER_OVERFLOW);
+ INIT_HEAP(TDM_TPU_YUV_COEFFS);
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)
+ {
+ INIT_HEAP(SERVICES_SIGNALS);
+ INIT_HEAP(SIGNALS);
+ }
+ INIT_HEAP_NAME("HWBRN37200", HWBRN37200);
+ INIT_HEAP_NAME("Firmware", FIRMWARE);
+
+ /* set the heap count */
+ psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap);
+
+ PVR_ASSERT(psNewMemoryInfo->ui32HeapCount <= RGX_MAX_HEAP_ID);
+
+ /* the new way: we'll set up 2 heap configs: one will be for Meta
+ only, and has only the firmware heap in it.
+ The remaining one shall be for clients only, and shall have all
+ the other heaps in it */
+
+ psNewMemoryInfo->uiNumHeapConfigs = 2;
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs);
+ if (psNewMemoryInfo->psDeviceMemoryHeapConfigArray == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_CONFIG"));
+ goto e1;
+ }
+
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration";
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount-1;
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap;
+
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration";
+ if(ui64ErnsBrns & FIX_HW_BRN_37200_BIT_MASK)
+ {
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = 2;
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor-2;
+ }else
+ {
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = 1;
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor-1;
+ }
+
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ if (RGXVzInitHeaps(psNewMemoryInfo, psDeviceMemoryHeapCursor) != PVRSRV_OK)
+ {
+ goto e1;
+ }
+#endif
+
+ return PVRSRV_OK;
+e1:
+ OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap);
+e0:
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+#undef INIT_HEAP
+#undef INIT_HEAP_NAME
+#undef INIT_TILING_HEAP
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
+{
+#if defined(SUPPORT_PVRSRV_GPUVIRT)
+ RGXVzDeInitHeaps(psDevMemoryInfo);
+#endif
+ OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray);
+ OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap);
+}
+
+/*This function searches the given array for a given search value */
+static void *RGXSearchTable( IMG_UINT64 *pui64Array,
+ IMG_UINT uiEnd,
+ IMG_UINT64 ui64SearchValue,
+ IMG_UINT uiRowCount)
+{
+ IMG_UINT uiStart = 0, index;
+ IMG_UINT64 value, *pui64Ptr = NULL;
+
+ while(uiStart < uiEnd)
+ {
+ index = (uiStart + uiEnd)/2;
+ pui64Ptr = pui64Array + (index * uiRowCount);
+ value = *(pui64Ptr);
+
+ if(value == ui64SearchValue)
+ {
+ return (void *)pui64Ptr;
+ }
+
+ if(value > ui64SearchValue)
+ {
+ uiEnd = index;
+ }else
+ {
+ uiStart = index + 1;
+ }
+ }
+ return NULL;
+}
+
+#if defined(DEBUG)
+static void RGXDumpParsedBVNCConfig(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+ IMG_UINT64 ui64Temp = 0, ui64Temp2 = 1;
+
+ PVR_LOG(( "NC: %d", psDevInfo->sDevFeatureCfg.ui32NumClusters));
+ PVR_LOG(( "CSF: %d", psDevInfo->sDevFeatureCfg.ui32CtrlStreamFormat));
+ PVR_LOG(( "FBCDCA: %d", psDevInfo->sDevFeatureCfg.ui32FBCDCArch));
+ PVR_LOG(( "MCMB: %d", psDevInfo->sDevFeatureCfg.ui32MCMB));
+ PVR_LOG(( "MCMS: %d", psDevInfo->sDevFeatureCfg.ui32MCMS));
+ PVR_LOG(( "MDMACnt: %d", psDevInfo->sDevFeatureCfg.ui32MDMACount));
+ PVR_LOG(( "NIIP: %d", psDevInfo->sDevFeatureCfg.ui32NIIP));
+ PVR_LOG(( "PBW: %d", psDevInfo->sDevFeatureCfg.ui32PBW));
+ PVR_LOG(( "STEArch: %d", psDevInfo->sDevFeatureCfg.ui32STEArch));
+ PVR_LOG(( "SVCEA: %d", psDevInfo->sDevFeatureCfg.ui32SVCE));
+ PVR_LOG(( "SLCBanks: %d", psDevInfo->sDevFeatureCfg.ui32SLCBanks));
+ PVR_LOG(( "SLCCLS: %d", psDevInfo->sDevFeatureCfg.ui32CacheLineSize));
+ PVR_LOG(( "SLCSize: %d", psDevInfo->sDevFeatureCfg.ui32SLCSize));
+ PVR_LOG(( "VASB: %d", psDevInfo->sDevFeatureCfg.ui32VASB));
+ PVR_LOG(( "META: %d", psDevInfo->sDevFeatureCfg.ui32META));
+
+ /* Dump the features with no values */
+ ui64Temp = psDevInfo->sDevFeatureCfg.ui64Features;
+ while(ui64Temp)
+ {
+ if(ui64Temp & 0x01)
+ {
+ IMG_PCHAR psString = "Unknown feature, debug list should be updated....";
+ switch(ui64Temp2)
+ {
+ case RGX_FEATURE_AXI_ACELITE_BIT_MASK: psString = "RGX_FEATURE_AXI_ACELITE";break;
+ case RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK:
+ psString = "RGX_FEATURE_CLUSTER_GROUPING";break;
+ case RGX_FEATURE_COMPUTE_BIT_MASK:
+ psString = "RGX_FEATURE_COMPUTE";break;
+ case RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK:
+ psString = "RGX_FEATURE_COMPUTE_MORTON_CAPABLE";break;
+ case RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK:
+ psString = "RGX_FEATURE_COMPUTE_OVERLAP";break;
+ case RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK:
+ psString = "RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS"; break;
+ case RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK:
+ psString = "RGX_FEATURE_DYNAMIC_DUST_POWER";break;
+ case RGX_FEATURE_FASTRENDER_DM_BIT_MASK:
+ psString = "RGX_FEATURE_FASTRENDER_DM";break;
+ case RGX_FEATURE_GPU_CPU_COHERENCY_BIT_MASK:
+ psString = "RGX_FEATURE_GPU_CPU_COHERENCY";break;
+ case RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK:
+ psString = "RGX_FEATURE_GPU_VIRTUALISATION";break;
+ case RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK:
+ psString = "RGX_FEATURE_GS_RTA_SUPPORT";break;
+ case RGX_FEATURE_META_DMA_BIT_MASK:
+ psString = "RGX_FEATURE_META_DMA";break;
+ case RGX_FEATURE_MIPS_BIT_MASK:
+ psString = "RGX_FEATURE_MIPS";break;
+ case RGX_FEATURE_PBE2_IN_XE_BIT_MASK:
+ psString = "RGX_FEATURE_PBE2_IN_XE";break;
+ case RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK:
+ psString = "RGX_FEATURE_PBVNC_COREID_REG";break;
+ case RGX_FEATURE_PDS_PER_DUST_BIT_MASK:
+ psString = "RGX_FEATURE_PDS_PER_DUST";break;
+ case RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK:
+ psString = "RGX_FEATURE_PDS_TEMPSIZE8";break;
+ case RGX_FEATURE_PERFBUS_BIT_MASK:
+ psString = "RGX_FEATURE_PERFBUS";break;
+ case RGX_FEATURE_RAY_TRACING_BIT_MASK:
+ psString = "RGX_FEATURE_RAY_TRACING";break;
+ case RGX_FEATURE_ROGUEXE_BIT_MASK:
+ psString = "RGX_FEATURE_ROGUEXE";break;
+ case RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK:
+ psString = "RGX_FEATURE_S7_CACHE_HIERARCHY";break;
+ case RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK:
+ psString = "RGX_FEATURE_S7_TOP_INFRASTRUCTURE";break;
+ case RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK:
+ psString = "RGX_FEATURE_SCALABLE_VDM_GPP";break;
+ case RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK:
+ psString = "RGX_FEATURE_SIGNAL_SNOOPING";break;
+ case RGX_FEATURE_SINGLE_BIF_BIT_MASK:
+ psString = "RGX_FEATURE_SINGLE_BIF";break;
+ case RGX_FEATURE_SLCSIZE8_BIT_MASK:
+ psString = "RGX_FEATURE_SLCSIZE8";break;
+ case RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_BIT_MASK:
+ psString = "RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128"; break;
+ case RGX_FEATURE_SLC_VIVT_BIT_MASK:
+ psString = "RGX_FEATURE_SLC_VIVT";break;
+ case RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK:
+ psString = "RGX_FEATURE_SYS_BUS_SECURE_RESET"; break;
+ case RGX_FEATURE_TESSELLATION_BIT_MASK:
+ psString = "RGX_FEATURE_TESSELLATION";break;
+ case RGX_FEATURE_TLA_BIT_MASK:
+ psString = "RGX_FEATURE_TLA";break;
+ case RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK:
+ psString = "RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS";break;
+ case RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK:
+ psString = "RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS";break;
+ case RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK:
+ psString = "RGX_FEATURE_TPU_FILTERING_MODE_CONTROL";break;
+ case RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK:
+ psString = "RGX_FEATURE_VDM_DRAWINDIRECT";break;
+ case RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK:
+ psString = "RGX_FEATURE_VDM_OBJECT_LEVEL_LLS";break;
+ case RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK:
+ psString = "RGX_FEATURE_XT_TOP_INFRASTRUCTURE";break;
+
+
+ default:PVR_DPF((PVR_DBG_WARNING,"Feature with Mask doesn't not exist: 0x%016llx", ui64Temp));
+ break;
+ }
+ PVR_LOG(("%s", psString));
+ }
+ ui64Temp >>= 1;
+ ui64Temp2 <<= 1;
+ }
+
+ /*Dump the ERN and BRN flags for this core */
+ ui64Temp = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+ ui64Temp2 = 1;
+
+ while(ui64Temp)
+ {
+ if(ui64Temp & 0x1)
+ {
+ IMG_UINT32 ui32ErnBrnId = 0;
+ switch(ui64Temp2)
+ {
+ case HW_ERN_36400_BIT_MASK: ui32ErnBrnId = 36400; break;
+ case FIX_HW_BRN_37200_BIT_MASK: ui32ErnBrnId = 37200; break;
+ case FIX_HW_BRN_37918_BIT_MASK: ui32ErnBrnId = 37918; break;
+ case FIX_HW_BRN_38344_BIT_MASK: ui32ErnBrnId = 38344; break;
+ case HW_ERN_41805_BIT_MASK: ui32ErnBrnId = 41805; break;
+ case HW_ERN_42290_BIT_MASK: ui32ErnBrnId = 42290; break;
+ case FIX_HW_BRN_42321_BIT_MASK: ui32ErnBrnId = 42321; break;
+ case FIX_HW_BRN_42480_BIT_MASK: ui32ErnBrnId = 42480; break;
+ case HW_ERN_42606_BIT_MASK: ui32ErnBrnId = 42606; break;
+ case FIX_HW_BRN_43276_BIT_MASK: ui32ErnBrnId = 43276; break;
+ case FIX_HW_BRN_44455_BIT_MASK: ui32ErnBrnId = 44455; break;
+ case FIX_HW_BRN_44871_BIT_MASK: ui32ErnBrnId = 44871; break;
+ case HW_ERN_44885_BIT_MASK: ui32ErnBrnId = 44885; break;
+ case HW_ERN_45914_BIT_MASK: ui32ErnBrnId = 45914; break;
+ case HW_ERN_46066_BIT_MASK: ui32ErnBrnId = 46066; break;
+ case HW_ERN_47025_BIT_MASK: ui32ErnBrnId = 47025; break;
+ case HW_ERN_49144_BIT_MASK: ui32ErnBrnId = 49144; break;
+ case HW_ERN_50539_BIT_MASK: ui32ErnBrnId = 50539; break;
+ case FIX_HW_BRN_50767_BIT_MASK: ui32ErnBrnId = 50767; break;
+ case FIX_HW_BRN_51281_BIT_MASK: ui32ErnBrnId = 51281; break;
+ case HW_ERN_51468_BIT_MASK: ui32ErnBrnId = 51468; break;
+ case FIX_HW_BRN_52402_BIT_MASK: ui32ErnBrnId = 52402; break;
+ case FIX_HW_BRN_52563_BIT_MASK: ui32ErnBrnId = 52563; break;
+ case FIX_HW_BRN_54141_BIT_MASK: ui32ErnBrnId = 54141; break;
+ case FIX_HW_BRN_54441_BIT_MASK: ui32ErnBrnId = 54441; break;
+ case FIX_HW_BRN_55091_BIT_MASK: ui32ErnBrnId = 55091; break;
+ case FIX_HW_BRN_57193_BIT_MASK: ui32ErnBrnId = 57193; break;
+ case HW_ERN_57596_BIT_MASK: ui32ErnBrnId = 57596; break;
+ case FIX_HW_BRN_60084_BIT_MASK: ui32ErnBrnId = 60084; break;
+ case HW_ERN_61389_BIT_MASK: ui32ErnBrnId = 61389; break;
+ case FIX_HW_BRN_61450_BIT_MASK: ui32ErnBrnId = 61450; break;
+ case FIX_HW_BRN_62204_BIT_MASK: ui32ErnBrnId = 62204; break;
+ default:
+ PVR_LOG(("Unknown ErnBrn bit: 0x%0llx", ui64Temp2));
+ break;
+ }
+ PVR_LOG(("ERN/BRN : %d",ui32ErnBrnId));
+ }
+ ui64Temp >>= 1;
+ ui64Temp2 <<= 1;
+ }
+
+}
+#endif
+
+static void RGXConfigFeaturesWithValues(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_MIN_CNT;
+ psDevInfo->sDevFeatureCfg.ui32MAXDMMTSCount = RGXFWIF_DM_MIN_MTS_CNT;
+
+ /* ui64Features must be already initialized */
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ psDevInfo->sDevFeatureCfg.ui32MAXDMCount += RGXFWIF_RAY_TRACING_DM_CNT;
+ psDevInfo->sDevFeatureCfg.ui32MAXDMMTSCount += RGXFWIF_RAY_TRACING_DM_MTS_CNT;
+ }
+
+ /* Get the max number of dusts in the core */
+ if(0 != psDevInfo->sDevFeatureCfg.ui32NumClusters)
+ {
+ psDevInfo->sDevFeatureCfg.ui32MAXDustCount = MAX(1, (psDevInfo->sDevFeatureCfg.ui32NumClusters / 2)) ;
+ }
+}
+
+static inline
+IMG_UINT32 GetFeatureValue(IMG_UINT64 ui64CfgInfo,
+ IMG_PCHAR pcFeature,
+ IMG_PUINT32 pui32FeatureValList,
+ IMG_UINT64 ui64FeatureMask,
+ IMG_UINT32 ui32FeaturePos,
+ IMG_UINT32 ui64FeatureMaxValue)
+{
+ IMG_UINT64 ui64Indx = 0;
+ IMG_UINT32 uiValue = 0;
+ ui64Indx = (ui64CfgInfo & ui64FeatureMask) >> ui32FeaturePos;
+ if(ui64Indx < ui64FeatureMaxValue)
+ {
+ uiValue = pui32FeatureValList[ui64Indx];
+ }else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Array out of bounds access attempted %s", pcFeature));
+ }
+ return uiValue;
+}
+
+#define GET_FEAT_VALUE(CfgInfo, Feature, FeatureValList) \
+ GetFeatureValue(CfgInfo, #Feature, (IMG_PUINT32)FeatureValList, \
+ Feature##_BIT_MASK, Feature##_POS, FeatureValList##_MAX_VALUE)
+
+static void RGXParseBVNCFeatures(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64CfgInfo)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ /*Get the SLC cacheline size info in kilo bytes */
+ psDevInfo->sDevFeatureCfg.ui32SLCSize = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_SLC_SIZE_IN_BYTES, SLCSKB) *1024 ;
+
+ /*Get the control stream format architecture info */
+ psDevInfo->sDevFeatureCfg.ui32CtrlStreamFormat = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT, CSF);
+
+ psDevInfo->sDevFeatureCfg.ui32FBCDCArch = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_FBCDC_ARCHITECTURE, FBCDCArch);
+
+ psDevInfo->sDevFeatureCfg.ui32MCMB = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_META_COREMEM_BANKS, MCRMB);
+
+ psDevInfo->sDevFeatureCfg.ui32MCMS = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_META_COREMEM_SIZE, MCRMS) *1024;
+
+ psDevInfo->sDevFeatureCfg.ui32MDMACount = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_META_DMA_CHANNEL_COUNT, MDCC);
+
+ if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK))
+ {
+ psDevInfo->sDevFeatureCfg.ui32META = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_META, META);
+ }else
+ {
+ psDevInfo->sDevFeatureCfg.ui32META = 0;
+ }
+
+ psDevInfo->sDevFeatureCfg.ui32NumClusters = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_NUM_CLUSTERS, NC);
+
+ psDevInfo->sDevFeatureCfg.ui32NIIP = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_NUM_ISP_IPP_PIPES, NIIP);
+
+ psDevInfo->sDevFeatureCfg.ui32PBW = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_PHYS_BUS_WIDTH, PBW);
+
+ psDevInfo->sDevFeatureCfg.ui32SLCBanks = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_SLC_BANKS, SLCB);
+
+ psDevInfo->sDevFeatureCfg.ui32CacheLineSize = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS, SLCCLSb);
+
+ psDevInfo->sDevFeatureCfg.ui32STEArch = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_SCALABLE_TE_ARCH, STEA);
+
+ psDevInfo->sDevFeatureCfg.ui32SVCE = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_SCALABLE_VCE, SVCEA);
+
+ psDevInfo->sDevFeatureCfg.ui32VASB = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS, VASB);
+
+ RGXConfigFeaturesWithValues(psDeviceNode);
+}
+#undef GET_FEAT_VALUE
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+static void RGXAcquireBVNCAppHint(IMG_CHAR *pszBVNCAppHint,
+ IMG_CHAR **apszRGXBVNCList,
+ IMG_UINT32 ui32BVNCListCount,
+ IMG_UINT32 *pui32BVNCCount)
+{
+ IMG_CHAR *pszAppHintDefault = NULL;
+ void *pvAppHintState = NULL;
+ IMG_UINT32 ui32BVNCIndex = 0;
+
+ OSCreateKMAppHintState(&pvAppHintState);
+ pszAppHintDefault = PVRSRV_APPHINT_RGXBVNC;
+ if (!OSGetKMAppHintSTRING(pvAppHintState,
+ RGXBVNC,
+ &pszAppHintDefault,
+ pszBVNCAppHint,
+ RGXBVNC_BUFFER_SIZE))
+ {
+ *pui32BVNCCount = 0;
+ return;
+ }
+ OSFreeKMAppHintState(pvAppHintState);
+
+ while (*pszBVNCAppHint != '\0')
+ {
+ if (ui32BVNCIndex >= ui32BVNCListCount)
+ {
+ break;
+ }
+ apszRGXBVNCList[ui32BVNCIndex++] = pszBVNCAppHint;
+ while (1)
+ {
+ if (*pszBVNCAppHint == ',')
+ {
+ pszBVNCAppHint[0] = '\0';
+ pszBVNCAppHint++;
+ break;
+ } else if (*pszBVNCAppHint == '\0')
+ {
+ break;
+ }
+ pszBVNCAppHint++;
+ }
+ }
+ *pui32BVNCCount = ui32BVNCIndex;
+}
+#endif
+
+/*Function that parses the BVNC List passed as module parameter */
+static PVRSRV_ERROR RGXParseBVNCList(IMG_UINT64 *pB,
+ IMG_UINT64 *pV,
+ IMG_UINT64 *pN,
+ IMG_UINT64 *pC,
+ const IMG_UINT32 ui32RGXDevCount)
+{
+ unsigned int ui32ScanCount = 0;
+ IMG_CHAR *pszBVNCString = NULL;
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ if (ui32RGXDevCount == 0) {
+ IMG_CHAR pszBVNCAppHint[RGXBVNC_BUFFER_SIZE] = {};
+ RGXAcquireBVNCAppHint(pszBVNCAppHint, gazRGXBVNCList, PVRSRV_MAX_DEVICES, &gui32RGXLoadTimeDevCount);
+ }
+#endif
+
+ /*4 components of a BVNC string is B, V, N & C */
+#define RGX_BVNC_INFO_PARAMS (4)
+
+ /*If only one BVNC parameter is specified, the same is applied for all RGX
+ * devices detected */
+ if(1 == gui32RGXLoadTimeDevCount)
+ {
+ pszBVNCString = gazRGXBVNCList[0];
+ }else
+ {
+
+#if defined(DEBUG)
+ int i =0;
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: No. of BVNC module params : %u", __func__, gui32RGXLoadTimeDevCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list ... ",__func__));
+ for(i=0; i < gui32RGXLoadTimeDevCount; i++)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s, ", gazRGXBVNCList[i]));
+ }
+#endif
+
+ if (gui32RGXLoadTimeDevCount == 0)
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+
+ /* total number of RGX devices detected should always be
+ * less than the gazRGXBVNCList count */
+ if(ui32RGXDevCount < gui32RGXLoadTimeDevCount)
+ {
+ pszBVNCString = gazRGXBVNCList[ui32RGXDevCount];
+ }else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Given module parameters list is shorter than "
+ "number of actual devices", __func__));
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+ }
+
+ if(NULL == pszBVNCString)
+ {
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+
+ /* Parse the given RGX_BVNC string */
+ ui32ScanCount = OSVSScanf(pszBVNCString, "%llu.%llu.%llu.%llu", pB, pV, pN, pC);
+ if(RGX_BVNC_INFO_PARAMS != ui32ScanCount)
+ {
+ ui32ScanCount = OSVSScanf(pszBVNCString, "%llu.%llup.%llu.%llu", pB, pV, pN, pC);
+ }
+ if(RGX_BVNC_INFO_PARAMS == ui32ScanCount)
+ {
+ PVR_LOG(("BVNC module parameter honoured: %s", pszBVNCString));
+ }else
+ {
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*This function detects the rogue variant and configures the
+ * essential config info associated with such a device
+ * The config info include features, errata etc etc */
+static PVRSRV_ERROR RGXGetBVNCConfig(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ static IMG_UINT32 ui32RGXDevCnt = 0;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bDetectBVNC = IMG_TRUE;
+
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT64 ui64BVNC, *pui64Cfg, B=0, V=0, N=0, C=0;
+
+ /*Order of BVNC rules
+ 1. RGX_BVNC Module parameter
+ 2. Detected BVNC (Hardware) / Compiled BVNC (No Hardware)
+ 3. If none of above report failure */
+
+ /* Check for load time RGX BVNC config */
+ eError = RGXParseBVNCList(&B,&V,&N,&C, ui32RGXDevCnt);
+ if(PVRSRV_OK == eError)
+ {
+ bDetectBVNC = IMG_FALSE;
+ }
+
+ /*if BVNC is not specified as module parameter or if specified BVNC list is insufficient
+ * Try to detect the device */
+ if(IMG_TRUE == bDetectBVNC)
+ {
+#if !defined(NO_HARDWARE) && !defined(PVRSRV_GPUVIRT_GUESTDRV) && defined(SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION)
+ IMG_UINT64 ui32ID;
+ IMG_HANDLE hSysData;
+ PVRSRV_ERROR ePreErr = PVRSRV_OK, ePostErr = PVRSRV_OK;
+
+ hSysData = psDeviceNode->psDevConfig->hSysData;
+
+ /* Power-up the device as required to read the registers */
+ if(psDeviceNode->psDevConfig->pfnPrePowerState)
+ {
+ ePreErr = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON,
+ PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE);
+ if (PVRSRV_OK != ePreErr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: System Pre-Power up failed", __func__));
+ return ePreErr;
+ }
+ }
+
+ if(psDeviceNode->psDevConfig->pfnPostPowerState)
+ {
+ ePostErr = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON,
+ PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE);
+ if (PVRSRV_OK != ePostErr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: System Post Power up failed", __func__));
+ return ePostErr;
+ }
+ }
+
+ ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID__PBVNC);
+
+ if(GET_B(ui32ID))
+ {
+ B = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK) >>
+ RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT;
+ V = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK) >>
+ RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT;
+ N = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK) >>
+ RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT;
+ C = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK) >>
+ RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT;
+
+ }
+ else
+ {
+ IMG_UINT64 ui32CoreID, ui32CoreRev;
+ ui32CoreRev = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_REVISION);
+ ui32CoreID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID);
+ B = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MAJOR_CLRMSK) >>
+ RGX_CR_CORE_REVISION_MAJOR_SHIFT;
+ V = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MINOR_CLRMSK) >>
+ RGX_CR_CORE_REVISION_MINOR_SHIFT;
+ N = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_N_CLRMSK) >>
+ RGX_CR_CORE_ID_CONFIG_N_SHIFT;
+ C = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_C_CLRMSK) >>
+ RGX_CR_CORE_ID_CONFIG_C_SHIFT;
+ }
+ PVR_LOG(("%s: Read BVNC %llu.%llu.%llu.%llu from device registers", __func__, B, V, N, C));
+
+ /* Power-down the device */
+ if(psDeviceNode->psDevConfig->pfnPrePowerState)
+ {
+ ePreErr = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF,
+ PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE);
+ if (PVRSRV_OK != ePreErr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: System Pre-Power down failed", __func__));
+ return ePreErr;
+ }
+ }
+
+ if(psDeviceNode->psDevConfig->pfnPostPowerState)
+ {
+ ePostErr = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF,
+ PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE);
+ if (PVRSRV_OK != ePostErr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: System Post Power down failed", __func__));
+ return ePostErr;
+ }
+ }
+#else
+#if defined(RGX_BVNC_KM_B) && defined(RGX_BVNC_KM_V) && defined(RGX_BVNC_KM_N) && defined(RGX_BVNC_KM_C)
+ B = RGX_BVNC_KM_B;
+ N = RGX_BVNC_KM_N;
+ C = RGX_BVNC_KM_C;
+ {
+ IMG_UINT32 ui32ScanCount = 0;
+ ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%llu", &V);
+ if(1 != ui32ScanCount)
+ {
+ ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%llup", &V);
+ if(1 != ui32ScanCount)
+ {
+ V = 0;
+ }
+ }
+ }
+ PVR_LOG(("%s: Reverting to compile time BVNC %s", __func__, RGX_BVNC_KM));
+#else
+ PVR_LOG(("%s: Unable to determine the BVNC", __func__));
+#endif
+#endif
+ }
+ ui64BVNC = BVNC_PACK(B,0,N,C);
+
+ /* Get the BVNC configuration */
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Detected BVNC INFO: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",__func__,
+ B,
+ V,
+ N,
+ C,
+ ui64BVNC));
+
+ /*Extract the information from the BVNC & ERN/BRN Table */
+ pui64Cfg = (IMG_UINT64 *)RGXSearchTable((IMG_UINT64 *)gaFeatures, sizeof(gaFeatures)/sizeof(gaFeatures[0]),
+ ui64BVNC,
+ sizeof(gaFeatures[0])/sizeof(IMG_UINT64));
+ if(pui64Cfg)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC Feature Cfg: 0x%016llx 0x%016llx 0x%016llx\n",__func__,
+ pui64Cfg[0], pui64Cfg[1], pui64Cfg[2]));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: BVNC Feature Lookup failed. Unsupported BVNC: 0x%016llx",
+ __func__, ui64BVNC));
+ return PVRSRV_ERROR_BVNC_UNSUPPORTED;
+ }
+
+
+ psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1];
+ /*Parsing feature config depends on available features on the core
+ * hence this parsing should always follow the above feature assignment */
+ RGXParseBVNCFeatures(psDeviceNode, pui64Cfg[2]);
+
+ /* Get the ERN and BRN configuration */
+ ui64BVNC = BVNC_PACK(B,V,N,C);
+
+ pui64Cfg = (IMG_UINT64 *)RGXSearchTable((IMG_UINT64 *)gaErnsBrns, sizeof(gaErnsBrns)/sizeof(gaErnsBrns[0]),
+ ui64BVNC,
+ sizeof(gaErnsBrns[0])/sizeof(IMG_UINT64));
+ if(pui64Cfg)
+ {
+ psDevInfo->sDevFeatureCfg.ui64ErnsBrns = pui64Cfg[1];
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC ERN/BRN Cfg: 0x%016llx 0x%016llx \n",
+ __func__, *pui64Cfg, psDevInfo->sDevFeatureCfg.ui64ErnsBrns));
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: BVNC ERN/BRN Lookup failed. Unsupported BVNC: 0x%016llx",
+ __func__, ui64BVNC));
+ psDevInfo->sDevFeatureCfg.ui64ErnsBrns = 0;
+ return PVRSRV_ERROR_BVNC_UNSUPPORTED;
+ }
+
+ psDevInfo->sDevFeatureCfg.ui32B = (IMG_UINT32)B;
+ psDevInfo->sDevFeatureCfg.ui32V = (IMG_UINT32)V;
+ psDevInfo->sDevFeatureCfg.ui32N = (IMG_UINT32)N;
+ psDevInfo->sDevFeatureCfg.ui32C = (IMG_UINT32)C;
+
+ ui32RGXDevCnt++;
+#if defined(DEBUG)
+ RGXDumpParsedBVNCConfig(psDeviceNode);
+#endif
+ return PVRSRV_OK;
+}
+
+/*
+ * This function checks if a particular feature is available on the given rgx device */
+static IMG_BOOL RGXCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT64 ui64FeatureMask)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+ /* FIXME: need to implement a bounds check for passed feature mask */
+ if(psDevInfo->sDevFeatureCfg.ui64Features & ui64FeatureMask)
+ {
+ return IMG_TRUE;
+ }
+ return IMG_FALSE;
+}
+
+/*
+ * * This function returns the value of a feature on the given rgx device */
+static IMG_INT32 RGXGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT64 ui64FeatureMask)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+ /*FIXME: need to implement a bounds check for passed feature mask */
+
+ switch(ui64FeatureMask)
+ {
+ case RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK:
+ return psDevInfo->sDevFeatureCfg.ui32PBW;
+ case RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK:
+ return psDevInfo->sDevFeatureCfg.ui32CacheLineSize;
+ default:
+ return -1;
+ }
+}
+
+/*
+ RGXRegisterDevice
+*/
+PVRSRV_ERROR RGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PDUMPCOMMENT("Device Name: %s", psDeviceNode->psDevConfig->pszName);
+
+ if (psDeviceNode->psDevConfig->pszVersion)
+ {
+ PDUMPCOMMENT("Device Version: %s", psDeviceNode->psDevConfig->pszVersion);
+ }
+
+ #if defined(RGX_FEATURE_SYSTEM_CACHE)
+ PDUMPCOMMENT("RGX System Level Cache is present");
+ #endif /* RGX_FEATURE_SYSTEM_CACHE */
+
+ PDUMPCOMMENT("RGX Initialisation (Part 1)");
+
+ /*********************
+ * Device node setup *
+ *********************/
+ /* Setup static data and callbacks on the device agnostic device node */
+#if defined(PDUMP)
+ psDeviceNode->sDevId.pszPDumpRegName = RGX_PDUMPREG_NAME;
+ /*
+ FIXME: This should not be required as PMR's should give the memspace
+ name. However, due to limitations within PDump we need a memspace name
+ when pdumping with MMU context with virtual address in which case we
+ don't have a PMR to get the name from.
+
+ There is also the issue obtaining a namespace name for the catbase which
+ is required when we PDump the write of the physical catbase into the FW
+ structure
+ */
+ psDeviceNode->sDevId.pszPDumpDevName = PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+ psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump;
+#endif /* PDUMP */
+
+ OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK);
+ OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE);
+
+ /* Configure MMU specific stuff */
+ RGXMMUInit_Register(psDeviceNode);
+
+ psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate;
+
+ psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick;
+
+ /* Register RGX to receive notifies when other devices complete some work */
+ PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode);
+
+ psDeviceNode->pfnInitDeviceCompatCheck = &RGXDevInitCompatCheck;
+
+ /* Register callbacks for creation of device memory contexts */
+ psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+ psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+ /* Register callbacks for Unified Fence Objects */
+ psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock;
+ psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock;
+
+ /* Register callback for checking the device's health */
+ psDeviceNode->pfnUpdateHealthStatus = RGXUpdateHealthStatus;
+
+ /* Register method to service the FW HWPerf buffer */
+ psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB;
+
+ /* Register callback for getting the device version information string */
+ psDeviceNode->pfnDeviceVersionString = RGXDevVersionString;
+
+ /* Register callback for getting the device clock speed */
+ psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed;
+
+ /* Register callback for soft resetting some device modules */
+ psDeviceNode->pfnSoftReset = RGXSoftReset;
+
+ /* Register callback for resetting the HWR logs */
+ psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs;
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(RGXFW_ALIGNCHECKS)
+ /* Register callback for checking alignment of UM structures */
+ psDeviceNode->pfnAlignmentCheck = RGXAlignmentCheck;
+#endif
+
+ /*Register callback for checking the supported features and getting the
+ * corresponding values */
+ psDeviceNode->pfnCheckDeviceFeature = RGXCheckFeatureSupported;
+ psDeviceNode->pfnGetDeviceFeatureValue = RGXGetSupportedFeatureValue;
+
+ /*Set up required support for dummy page */
+ OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0);
+
+ /*Set the order to 0 */
+ psDeviceNode->sDummyPage.sDummyPageHandle.ui32Order = 0;
+
+ /*Set the size of the Dummy page to zero */
+ psDeviceNode->sDummyPage.ui32Log2DummyPgSize = 0;
+
+ /*Set the Dummy page phys addr */
+ psDeviceNode->sDummyPage.ui64DummyPgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+ /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */
+ eError = OSLockCreate(&psDeviceNode->sDummyPage.psDummyPgLock ,LOCK_TYPE_DISPATCH);
+ if(PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__));
+ return eError;
+ }
+#if defined(PDUMP)
+ psDeviceNode->sDummyPage.hPdumpDummyPg = NULL;
+#endif
+
+ /*********************
+ * Device info setup *
+ *********************/
+ /* Allocate device control block */
+ psDevInfo = OSAllocZMem(sizeof(*psDevInfo));
+ if (psDevInfo == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitRGXPart1 : Failed to alloc memory for DevInfo"));
+ return (PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+
+ /* create locks for the context lists stored in the DevInfo structure.
+ * these lists are modified on context create/destroy and read by the
+ * watchdog thread
+ */
+
+ eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__));
+ goto e0;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__));
+ goto e1;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__));
+ goto e2;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hTDMCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create TDM context list lock", __func__));
+ goto e3;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hRaytraceCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create raytrace context list lock", __func__));
+ goto e4;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hKickSyncCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create kick sync context list lock", __func__));
+ goto e5;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__));
+ goto e6;
+ }
+
+ dllist_init(&(psDevInfo->sKCCBDeferredCommandsListHead));
+
+ dllist_init(&(psDevInfo->sRenderCtxtListHead));
+ dllist_init(&(psDevInfo->sComputeCtxtListHead));
+ dllist_init(&(psDevInfo->sTransferCtxtListHead));
+ dllist_init(&(psDevInfo->sTDMCtxtListHead));
+ dllist_init(&(psDevInfo->sRaytraceCtxtListHead));
+ dllist_init(&(psDevInfo->sKickSyncCtxtListHead));
+
+ dllist_init(&(psDevInfo->sCommonCtxtListHead));
+ psDevInfo->ui32CommonCtxtCurrentID = 1;
+
+ dllist_init(&psDevInfo->sMemoryContextList);
+
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /* Allocate space for scripts. */
+ psDevInfo->psScripts = OSAllocMem(sizeof(*psDevInfo->psScripts));
+ if (!psDevInfo->psScripts)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate memory for scripts", __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e7;
+ }
+#endif
+
+ /* Setup static data and callbacks on the device specific device info */
+ psDevInfo->psDeviceNode = psDeviceNode;
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+ /*
+ * Map RGX Registers
+ */
+#if !defined(NO_HARDWARE)
+ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase,
+ psDeviceNode->psDevConfig->ui32RegsSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+ if (psDevInfo->pvRegsBaseKM == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to create RGX register mapping", __func__));
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto e8;
+ }
+#endif
+
+ psDeviceNode->pvDevice = psDevInfo;
+
+ eError = RGXGetBVNCConfig(psDeviceNode);
+ if(PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Unsupported Device detected by driver", __func__));
+ goto e9;
+ }
+
+ /* pdump info about the core */
+ PDUMPCOMMENT("RGX Version Information (KM): %d.%d.%d.%d",
+ psDevInfo->sDevFeatureCfg.ui32B,
+ psDevInfo->sDevFeatureCfg.ui32V,
+ psDevInfo->sDevFeatureCfg.ui32N,
+ psDevInfo->sDevFeatureCfg.ui32C);
+
+ eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo,
+ &psDeviceNode->sDummyPage.ui32Log2DummyPgSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e9;
+ }
+
+ eError = RGXHWPerfInit(psDeviceNode);
+ PVR_LOGG_IF_ERROR(eError, "RGXHWPerfInit", e9);
+
+ /* Register callback for dumping debug info */
+ eError = PVRSRVRegisterDbgRequestNotify(&psDevInfo->hDbgReqNotify,
+ psDeviceNode,
+ RGXDebugRequestNotify,
+ DEBUG_REQUEST_SYS,
+ psDevInfo);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ RGXMipsMMUInit_Register(psDeviceNode);
+ }
+
+ /* The device shared-virtual-memory heap address-space size is stored here for faster
+ look-up without having to walk the device heap configuration structures during
+ client device connection (i.e. this size is relative to a zero-based offset) */
+ if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & (FIX_HW_BRN_52402_BIT_MASK | FIX_HW_BRN_55091_BIT_MASK))
+ {
+ psDeviceNode->ui64GeneralSVMHeapSize = 0;
+ }else
+ {
+ psDeviceNode->ui64GeneralSVMHeapSize = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE;
+ }
+
+ if(NULL != psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit)
+ {
+ psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit(psDeviceNode->psDevConfig, \
+ psDevInfo->sDevFeatureCfg.ui64Features);
+ }
+
+ return PVRSRV_OK;
+
+e9:
+#if !defined(NO_HARDWARE)
+ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
+ psDevInfo->ui32RegSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+e8:
+#endif /* !NO_HARDWARE */
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ OSFreeMem(psDevInfo->psScripts);
+e7:
+#endif
+ OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+e6:
+ OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+e5:
+ OSWRLockDestroy(psDevInfo->hRaytraceCtxListLock);
+e4:
+ OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+e3:
+ OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+e2:
+ OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+e1:
+ OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+e0:
+ OSFreeMem(psDevInfo);
+
+ /*Destroy the dummy page lock created above */
+ OSLockDestroy(psDeviceNode->sDummyPage.psDummyPgLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitGuestKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32DeviceFlags,
+ RGXFWIF_COMPCHECKS_BVNC *psClientBVNC)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /*
+ * Guest drivers do not support the following functionality:
+ * - Perform actual on-chip firmware loading, config & init
+ * - Perform actual on-chip firmware RDPowIsland(ing)
+ * - Perform actual on-chip firmware tracing, HWPerf
+ * - Configure firmware perf counters
+ */
+ eError = PVRSRVRGXInitAllocFWImgMemKM(psConnection,
+ psDeviceNode,
+ 0, /* uiFWCodeLen */
+ 0, /* uiFWDataLen */
+ 0, /* uiFWCorememLen */
+ NULL, /* ppsFWCodePMR */
+ NULL, /* psFWCodeDevVAddrBase */
+ NULL, /* ppsFWDataPMR */
+ NULL, /* psFWDataDevVAddrBase */
+ NULL, /* ppsFWCorememPMR */
+ NULL, /* psFWCorememDevVAddrBase */
+ NULL /* psFWCorememMetaVAddrBase */);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitGuest: PVRSRVRGXInitAllocFWImgMemKM failed (%u)", eError));
+ goto e0;
+ }
+
+ eError = PVRSRVRGXInitFirmwareKM(psConnection,
+ psDeviceNode,
+ NULL, /* psRGXFwInit */
+ bEnableSignatureChecks,
+ ui32SignatureChecksBufSize,
+ 0, /* ui32HWPerfFWBufSizeKB */
+ 0, /* ui64HWPerfFilter */
+ ui32RGXFWAlignChecksArrLength,
+ pui32RGXFWAlignChecks,
+ 0, /* ui32ConfigFlags */
+ 0, /* ui32LogType */
+ 0, /* ui32FilterFlags */
+ 0, /* ui32JonesDisableMask */
+ 0, /* ui32HWRDebugDumpLimit */
+ psClientBVNC,
+ 0, /* ui32HWPerfCountersDataSize */
+ NULL, /* ppsHWPerfPMR */
+ 0, /* eRGXRDPowerIslandingConf */
+ 0 /* eFirmwarePerf */);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitGuest: PVRSRVRGXInitFirmwareKM failed (%u)", eError));
+ goto e0;
+ }
+
+ eError = PVRSRVRGXInitDevPart2KM(psConnection,
+ psDeviceNode,
+ NULL, /* psDbgScript */
+ ui32DeviceFlags,
+ 0, /* ui32HWPerfHostBufSizeKB */
+ 0, /* ui32HWPerfHostFilter */
+ 0, /* eActivePMConf */
+ NULL, /* psFWCodePMR */
+ NULL, /* psFWDataPMR */
+ NULL, /* psFWCorePMR */
+ NULL /* psHWPerfPMR */);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitGuest: PVRSRVRGXInitDevPart2KM failed (%u)", eError));
+ goto e0;
+ }
+e0:
+#else
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+
+ return eError;
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXInitFinaliseFWImageKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ void *pvFWImage;
+ PVRSRV_ERROR eError;
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWImage);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXInitFinaliseFWImageKM: Acquire mapping for FW data failed (%u)",
+ eError));
+ return eError;
+ }
+
+ eError = RGXBootldrDataInit(psDeviceNode, pvFWImage);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXInitLoadFWImageKM: ELF parameters injection failed (%u)",
+ eError));
+ return eError;
+ }
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+
+ }
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDevVersionString
+@Description Gets the version string for the given device node and returns
+ a pointer to it in ppszVersionString. It is then the
+ responsibility of the caller to free this memory.
+@Input psDeviceNode Device node from which to obtain the
+ version string
+@Output ppszVersionString Contains the version string upon return
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR **ppszVersionString)
+{
+#if defined(NO_HARDWARE) || defined(EMULATOR)
+ IMG_PCHAR pszFormatString = "Rogue Version: %s (SW)";
+#else
+ IMG_PCHAR pszFormatString = "Rogue Version: %s (HW)";
+#endif
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ size_t uiStringLength;
+
+ if (psDeviceNode == NULL || ppszVersionString == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ if(NULL == psDevInfo->sDevFeatureCfg.pszBVNCString)
+ {
+ IMG_CHAR pszBVNCInfo[MAX_BVNC_STRING_LEN];
+ size_t uiBVNCStringSize;
+
+ OSSNPrintf(pszBVNCInfo, MAX_BVNC_STRING_LEN, "%d.%d.%d.%d", \
+ psDevInfo->sDevFeatureCfg.ui32B, \
+ psDevInfo->sDevFeatureCfg.ui32V, \
+ psDevInfo->sDevFeatureCfg.ui32N, \
+ psDevInfo->sDevFeatureCfg.ui32C);
+
+ uiBVNCStringSize = (OSStringLength(pszBVNCInfo) + 1) * sizeof(IMG_CHAR);
+ psDevInfo->sDevFeatureCfg.pszBVNCString = OSAllocMem(uiBVNCStringSize);
+ if(NULL == psDevInfo->sDevFeatureCfg.pszBVNCString)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Allocating memory for BVNC Info string failed ",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ OSCachedMemCopy(psDevInfo->sDevFeatureCfg.pszBVNCString,pszBVNCInfo,uiBVNCStringSize);
+ }
+
+ uiStringLength = OSStringLength(psDevInfo->sDevFeatureCfg.pszBVNCString) +
+ OSStringLength(pszFormatString);
+ *ppszVersionString = OSAllocZMem(uiStringLength);
+ if (*ppszVersionString == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ OSSNPrintf(*ppszVersionString, uiStringLength, pszFormatString,
+ psDevInfo->sDevFeatureCfg.pszBVNCString);
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function RGXDevClockSpeed
+@Description Gets the clock speed for the given device node and returns
+ it in pui32RGXClockSpeed.
+@Input psDeviceNode Device node
+@Output pui32RGXClockSpeed Variable for storing the clock speed
+@Return PVRSRV_ERROR
+*/ /***************************************************************************/
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_PUINT32 pui32RGXClockSpeed)
+{
+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+
+ /* get clock speed */
+ *pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+ return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxinit.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX initialisation header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX initialisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXINIT_H__)
+#define __RGXINIT_H__
+
+#include "connection_server.h"
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "rgxscript.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_bridge.h"
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXInitDevPart2KM
+
+ @Description
+
+ Second part of server-side RGX initialisation
+
+ @Input pvDeviceNode - device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRGXInitDevPart2KM (CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_INIT_COMMAND *psDbgScript,
+ IMG_UINT32 ui32DeviceFlags,
+ IMG_UINT32 ui32HWPerfHostBufSizeKB,
+ IMG_UINT32 ui32HWPerfHostFilter,
+ RGX_ACTIVEPM_CONF eActivePMConf,
+ PMR *psFWCodePMR,
+ PMR *psFWDataPMR,
+ PMR *psFWCorememPMR,
+ PMR *psHWPerfPMR);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitAllocFWImgMemKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T ui32FWCodeLen,
+ IMG_DEVMEM_SIZE_T ui32FWDataLen,
+ IMG_DEVMEM_SIZE_T uiFWCorememLen,
+ PMR **ppsFWCodePMR,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ PMR **ppsFWDataPMR,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ PMR **ppsFWCorememPMR,
+ IMG_DEV_VIRTADDR *psFWCorememDevVAddrBase,
+ RGXFWIF_DEV_VIRTADDR *psFWCorememMetaVAddrBase);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitMipsWrapperRegistersKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Remap1Config1Offset,
+ IMG_UINT32 ui32Remap1Config2Offset,
+ IMG_UINT32 ui32WrapperConfigOffset,
+ IMG_UINT32 ui32BootCodeOffset);
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVRGXInitGuestKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32DeviceFlags,
+ RGXFWIF_COMPCHECKS_BVNC *psClientBVNC);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXPdumpBootldrDataInitKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32BootConfOffset,
+ IMG_UINT32 ui32ExceptionVectorsBaseAddress);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXInitFirmwareKM
+
+ @Description
+
+ Server-side RGX firmware initialisation
+
+ @Input pvDeviceNode - device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVRGXInitFirmwareKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32HWPerfFWBufSizeKB,
+ IMG_UINT64 ui64HWPerfFilter,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32ConfigFlags,
+ IMG_UINT32 ui32LogType,
+ IMG_UINT32 ui32FilterFlags,
+ IMG_UINT32 ui32JonesDisableMask,
+ IMG_UINT32 ui32HWRDebugDumpLimit,
+ RGXFWIF_COMPCHECKS_BVNC *psClientBVNC,
+ IMG_UINT32 ui32HWPerfCountersDataSize,
+ PMR **ppsHWPerfPMR,
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+ FW_PERF_CONF eFirmwarePerf);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXInitFirmwareExtendedKM
+
+ @Description
+
+ Server-side RGX firmware initialisation, extends PVRSRVRGXInitFirmwareKM
+
+ @Input pvDeviceNode - device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVRGXInitFirmwareExtendedKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+ PMR **ppsHWPerfPMR,
+ RGX_FW_INIT_IN_PARAMS *psInParams);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXInitFinaliseFWImageKM
+
+ @Description
+
+ Perform final steps of FW code setup when necessary
+
+ @Input psDeviceNode - Device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXInitFinaliseFWImageKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXInitHWPerfCountersKM
+
+ @Description
+
+ Initialisation of the performance counters
+
+ @Input pvDeviceNode - device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRGXInitHWPerfCountersKM (PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+
+ @Function RGXRegisterDevice
+
+ @Description
+
+ Registers the device with the system
+
+ @Input: psDeviceNode - device node
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+/*!
+*******************************************************************************
+
+ @Function DevDeInitRGX
+
+ @Description
+
+ Reset and deinitialise Chip
+
+ @Input pvDeviceNode - device info. structure
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#if !defined(NO_HARDWARE)
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function RGXRegisterGpuUtilStats
+
+ @Description Initialise data used to compute GPU utilisation statistics
+ for a particular user (identified by the handle passed as
+ argument). This function must be called only once for each
+ different user/handle.
+
+ @Input phGpuUtilUser - Pointer to handle used to identify a user of
+ RGXGetGpuUtilStats
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXRegisterGpuUtilStats(IMG_HANDLE *phGpuUtilUser);
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXUnregisterGpuUtilStats
+
+ @Description Free data previously used to compute GPU utilisation statistics
+ for a particular user (identified by the handle passed as
+ argument).
+
+ @Input hGpuUtilUser - Handle used to identify a user of
+ RGXGetGpuUtilStats
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXUnregisterGpuUtilStats(IMG_HANDLE hGpuUtilUser);
+#endif /* !defined(NO_HARDWARE) */
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVGPUVIRTPopulateLMASubArenasKM
+
+ @Description Populates the LMA arenas based on the min max values passed by
+ the client during initialization. GPU Virtualisation Validation
+ only.
+
+ @Input pvDeviceNode : Pointer to a device info structure.
+ ui32NumElements : Total number of min / max values passed by
+ the client
+ pui32Elements : The array containing all the min / max values
+ passed by the client, all bundled together
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVGPUVIRTPopulateLMASubArenasKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32NumElements,
+ IMG_UINT32 aui32Elements[],
+ IMG_BOOL bEnableTrustedDeviceAceConfig);
+
+#endif /* __RGXINIT_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxkicksync.c
+@Title Server side of the sync only kick API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxkicksync.h"
+
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "rgxfwutils.h"
+#include "allocmem.h"
+#include "sync.h"
+#include "rgxhwperf.h"
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+struct _RGX_SERVER_KICKSYNC_CONTEXT_
+{
+ PVRSRV_DEVICE_NODE * psDeviceNode;
+ RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+ PVRSRV_CLIENT_SYNC_PRIM * psSync;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListFence;
+ SYNC_ADDR_LIST sSyncAddrListUpdate;
+ ATOMIC_T hJobId;
+};
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_KICKSYNC_CONTEXT ** ppsKickSyncContext)
+{
+ PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext;
+ RGX_COMMON_CONTEXT_INFO sInfo;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Prepare cleanup struct */
+ * ppsKickSyncContext = NULL;
+ psKickSyncContext = OSAllocZMem(sizeof(*psKickSyncContext));
+ if (psKickSyncContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psKickSyncContext->psDeviceNode = psDeviceNode;
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ & psKickSyncContext->psSync,
+ "kick sync cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXCreateKickSyncContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ sInfo.psFWFrameworkMemDesc = NULL;
+ sInfo.psMCUFenceAddr = NULL;
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_KICKSYNC,
+ RGXFWIF_DM_GP,
+ NULL,
+ 0,
+ psFWMemContextMemDesc,
+ NULL,
+ RGX_KICKSYNC_CCB_SIZE_LOG2,
+ 0, /* priority */
+ & sInfo,
+ & psKickSyncContext->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+ OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sKickSyncCtxtListHead), &(psKickSyncContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock);
+
+ SyncAddrListInit(&psKickSyncContext->sSyncAddrListFence);
+ SyncAddrListInit(&psKickSyncContext->sSyncAddrListUpdate);
+
+ * ppsKickSyncContext = psKickSyncContext;
+ return PVRSRV_OK;
+
+fail_contextalloc:
+fail_syncalloc:
+ OSFreeMem(psKickSyncContext);
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO * psDevInfo = psKickSyncContext->psDeviceNode->pvDevice;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psKickSyncContext->psDeviceNode,
+ psKickSyncContext->psServerCommonContext,
+ psKickSyncContext->psSync,
+ RGXFWIF_DM_3D,
+ PDUMP_FLAGS_NONE);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free its resources */
+
+ OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock);
+ dllist_remove_node(&(psKickSyncContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock);
+
+ FWCommonContextFree(psKickSyncContext->psServerCommonContext);
+ SyncPrimFree(psKickSyncContext->psSync);
+
+ SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListFence);
+ SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListUpdate);
+
+ OSFreeMem(psKickSyncContext);
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext,
+
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientFenceOffset,
+ IMG_UINT32 * paui32ClientFenceValue,
+
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientUpdateOffset,
+ IMG_UINT32 * paui32ClientUpdateValue,
+
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 * paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ** pasServerSyncs,
+
+ IMG_INT32 i32CheckFenceFD,
+ IMG_INT32 i32UpdateTimelineFD,
+ IMG_INT32 * pi32UpdateFenceFD,
+ IMG_CHAR szFenceName[32],
+
+ IMG_UINT32 ui32ExtJobRef)
+{
+ RGXFWIF_KCCB_CMD sKickSyncKCCBCmd;
+ RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1];
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ IMG_UINT32 i;
+ PRGXFWIF_UFO_ADDR *pauiClientFenceUFOAddress;
+ PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress;
+ IMG_INT32 i32UpdateFenceFD = -1;
+ IMG_UINT32 ui32JobId;
+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ /* Android fd sync update info */
+ struct pvr_sync_append_data *psFDFenceData = NULL;
+#endif
+
+ ui32JobId = OSAtomicIncrement(&psKickSyncContext->hJobId);
+
+ eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListFence,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOSyncPrimBlock,
+ paui32ClientFenceOffset);
+
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_syncaddrlist;
+ }
+
+ pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs;
+
+ eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOSyncPrimBlock,
+ paui32ClientUpdateOffset);
+
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_syncaddrlist;
+ }
+
+ pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs;
+
+ /* Sanity check the server fences */
+ for (i = 0; i < ui32ServerSyncPrims; i++)
+ {
+ if (0 == (paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on Kick Sync) must fence", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ }
+ }
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szFenceName[31] = '\0';
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ /* Android FD fences are hardcoded to updates (IMG_TRUE below), Fences go to the TA and updates to the 3D */
+ if (i32UpdateTimelineFD >= 0 && !pi32UpdateFenceFD)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (i32CheckFenceFD >= 0 || i32UpdateTimelineFD >= 0)
+ {
+ eError =
+ pvr_sync_append_fences(szFenceName,
+ i32CheckFenceFD,
+ i32UpdateTimelineFD,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOAddress,
+ paui32ClientUpdateValue,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOAddress,
+ paui32ClientFenceValue,
+ &psFDFenceData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_fdsync;
+ }
+ pvr_sync_get_updates(psFDFenceData, &ui32ClientUpdateCount,
+ &pauiClientUpdateUFOAddress, &paui32ClientUpdateValue);
+ pvr_sync_get_checks(psFDFenceData, &ui32ClientFenceCount,
+ &pauiClientFenceUFOAddress, &paui32ClientFenceValue);
+ }
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext),
+ ui32ClientFenceCount,
+ pauiClientFenceUFOAddress,
+ paui32ClientFenceValue,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOAddress,
+ paui32ClientUpdateValue,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ RGXFWIF_CCB_CMD_TYPE_NULL,
+ ui32ExtJobRef,
+ ui32JobId,
+ PDUMP_FLAGS_NONE,
+ NULL,
+ "KickSync",
+ asCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_cmdinit;
+ }
+
+ eError = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asCmdHelperData), asCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_cmdaquire;
+ }
+
+ /*
+ * We should reserved space in the kernel CCB here and fill in the command
+ * directly.
+ * This is so if there isn't space in the kernel CCB we can return with
+ * retry back to services client before we take any operations
+ */
+
+ /*
+ * We might only be kicking for flush out a padding packet so only submit
+ * the command if the create was successful
+ */
+
+ /*
+ * All the required resources are ready at this point, we can't fail so
+ * take the required server sync operations and commit all the resources
+ */
+ RGXCmdHelperReleaseCmdCCB(1,
+ asCmdHelperData,
+ "KickSync",
+ FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr);
+
+ /* Construct the kernel kicksync CCB command. */
+ sKickSyncKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext);
+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext));
+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.sWorkloadDataFWAddress.ui32Addr = 0;
+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+ /*
+ * Submit the kicksync command to the firmware.
+ */
+ RGX_HWPERF_HOST_ENQ(psKickSyncContext, OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx, ui32ExtJobRef, ui32JobId,
+ RGX_HWPERF_KICK_TYPE_SYNC);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psKickSyncContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_3D,
+ & sKickSyncKCCBCmd,
+ sizeof(sKickSyncKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ PDUMP_FLAGS_NONE);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psKickSyncContext->psDeviceNode->pvDevice,
+ ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_SYNC);
+#endif
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXKickSync failed to schedule kernel CCB command. (0x%x)",
+ eError2));
+ }
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ if (i32UpdateTimelineFD >= 0)
+ {
+ /* If we get here, this should never fail. Hitting that likely implies
+ * a code error above */
+ i32UpdateFenceFD = pvr_sync_get_update_fd(psFDFenceData);
+ if (i32UpdateFenceFD < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+ __FUNCTION__));
+ /* If we fail here, we cannot rollback the syncs as the hw already
+ * has references to resources they may be protecting in the kick
+ * so fallthrough */
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_free_append_data;
+ }
+ }
+
+#if defined(NO_HARDWARE)
+ pvr_sync_nohw_complete_fences(psFDFenceData);
+#endif
+ pvr_sync_free_append_fences_data(psFDFenceData);
+#endif
+
+ *pi32UpdateFenceFD = i32UpdateFenceFD;
+
+ return PVRSRV_OK;
+
+fail_cmdaquire:
+fail_cmdinit:
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ pvr_sync_rollback_append_fences(psFDFenceData);
+fail_free_append_data:
+ pvr_sync_free_append_fences_data(psFDFenceData);
+fail_fdsync:
+#endif
+fail_syncaddrlist:
+ return eError;
+}
+
+
+/**************************************************************************//**
+ End of file (rgxkicksync.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxkicksync.h
+@Title Server side of the sync only kick API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined (__RGXKICKSYNC_H__)
+#define __RGXKICKSYNC_H__
+
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "sync_server.h"
+
+
+typedef struct _RGX_SERVER_KICKSYNC_CONTEXT_ RGX_SERVER_KICKSYNC_CONTEXT;
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXCreateKickSyncContextKM
+@Description Server-side implementation of RGXCreateKicksyncContext
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_KICKSYNC_CONTEXT ** ppsKicksyncContext);
+
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXDestroyKickSyncContextKM
+@Description Server-side implementation of RGXDestroyKicksyncContext
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXKickSyncKM
+@Description Kicks a sync only command
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext,
+
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientFenceSyncOffset,
+ IMG_UINT32 * paui32ClientFenceValue,
+
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientUpdateSyncOffset,
+ IMG_UINT32 * paui32ClientUpdateValue,
+
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 * paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ** pasServerSyncs,
+
+ IMG_INT32 i32CheckFenceFD,
+ IMG_INT32 i32UpdateTimelineFD,
+ IMG_INT32 * pi32UpdateFenceFD,
+ IMG_CHAR szFenceName[32],
+
+ IMG_UINT32 ui32ExtJobRef);
+
+#endif /* __RGXKICKSYNC_H__ */
+
+/**************************************************************************//**
+ End of file (rgxkicksync.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Header for Services abstraction layer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declaration of an interface layer used to abstract code that
+ can be compiled outside of the DDK, potentially in a
+ completely different OS.
+ All the headers included by this file must also be copied to
+ the alternative source tree.
+ All the functions declared here must have a DDK implementation
+ inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and
+ another different implementation in case they are used outside
+ of the DDK.
+ All of the functions accept as a first parameter a
+ "const void *hPrivate" argument. It should be used to pass
+ around any implementation specific data required.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGXLAYER_H__)
+#define __RGXLAYER_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */
+#if defined(SUPPORT_KERNEL_SRVINIT)
+#include "rgx_bvnc_defs_km.h"
+#endif
+
+#include "rgx_firmware_processor.h"
+/* includes:
+ * rgx_meta.h and rgx_mips.h,
+ * rgxdefs_km.h,
+ * rgx_cr_defs_km.h (under SUPPORT_KERNEL_SRVINIT),
+ * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h),
+ * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h)
+ */
+
+#include "rgx_fwif_shared.h"
+/* FIXME required because of RGXFWIF_DEV_VIRTADDR but this header
+ * includes a lot of other headers.. RGXFWIF_DEV_VIRTADDR must be moved
+ * somewhere else (either img_types.h or a new header) */
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXMemCopy
+
+ @Description MemCopy implementation
+
+ @Input hPrivate : Implementation specific data
+ @Input pvDst : Pointer to the destination
+ @Input pvSrc : Pointer to the source location
+ @Input uiSize : The amount of memory to copy in bytes
+
+ @Return void
+
+******************************************************************************/
+IMG_INTERNAL
+void RGXMemCopy(const void *hPrivate,
+ void *pvDst,
+ void *pvSrc,
+ size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function RGXMemSet
+
+ @Description MemSet implementation
+
+ @Input hPrivate : Implementation specific data
+ @Input pvDst : Pointer to the start of the memory region
+ @Input ui8Value : The value to be written
+ @Input uiSize : The number of bytes to be set to ui8Value
+
+ @Return void
+
+******************************************************************************/
+IMG_INTERNAL
+void RGXMemSet(const void *hPrivate,
+ void *pvDst,
+ IMG_UINT8 ui8Value,
+ size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function RGXCommentLogInit
+
+ @Description Generic log function used for debugging or other purposes
+
+ @Input hPrivate : Implementation specific data
+ @Input pszString : Message to be printed
+ @Input ... : Variadic arguments
+
+ @Return void
+
+******************************************************************************/
+IMG_INTERNAL
+void RGXCommentLogInit(const void *hPrivate,
+ const IMG_CHAR *pszString,
+ ...) __printf(2, 3);
+
+/*!
+*******************************************************************************
+
+ @Function RGXErrorLogInit
+
+ @Description Generic error log function used for debugging or other purposes
+
+ @Input hPrivate : Implementation specific data
+ @Input pszString : Message to be printed
+ @Input ... : Variadic arguments
+
+ @Return void
+
+******************************************************************************/
+IMG_INTERNAL
+void RGXErrorLogInit(const void *hPrivate,
+ const IMG_CHAR *pszString,
+ ...) __printf(2, 3);
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+/*!
+*******************************************************************************
+
+ @Function RGXDeviceHasFeatureInit
+
+ @Description Checks if a device has a particular feature
+
+ @Input hPrivate : Implementation specific data
+ @Input ui64Feature : Feature to check
+
+ @Return IMG_TRUE if the given feature is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL RGXDeviceHasFeatureInit(const void *hPrivate, IMG_UINT64 ui64Feature);
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXGetFWCorememSize
+
+ @Description Get the FW coremem size
+
+ @Input hPrivate : Implementation specific data
+
+ @Return FW coremem size
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* !defined (__RGXLAYER_H__) */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title DDK implementation of the Services abstraction layer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description DDK implementation of the Services abstraction layer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#include "pdump_km.h"
+#else
+#include <stdio.h>
+#include <stdarg.h>
+#include "pdump_um.h"
+#endif
+
+#include "rgxlayer_impl.h"
+#include "srvinit_osfunc.h"
+#include "pvr_debug.h"
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+#include "device.h"
+#include "rgxdevice.h"
+#endif
+
+#if defined(PDUMP)
+#include "client_pdump_bridge.h"
+#endif
+
+
+void RGXMemCopy(const void *hPrivate,
+ void *pvDst,
+ void *pvSrc,
+ size_t uiSize)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+ SRVINITDeviceMemCopy(pvDst, pvSrc, uiSize);
+}
+
+void RGXMemSet(const void *hPrivate,
+ void *pvDst,
+ IMG_UINT8 ui8Value,
+ size_t uiSize)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+ SRVINITDeviceMemSet(pvDst, ui8Value, uiSize);
+}
+
+void RGXCommentLogInit(const void *hPrivate,
+ const IMG_CHAR *pszString,
+ ...)
+{
+#if defined(PDUMP)
+ IMG_CHAR szBuffer[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
+ va_list argList;
+ SHARED_DEV_CONNECTION hServices;
+
+ PVR_ASSERT(hPrivate != NULL);
+ hServices = ((RGX_INIT_LAYER_PARAMS*)hPrivate)->hServices;
+
+ va_start(argList, pszString);
+ vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList);
+ va_end(argList);
+
+ (void) BridgePVRSRVPDumpComment(hServices,
+ szBuffer,
+ PDUMP_FLAGS_CONTINUOUS);
+#else
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+ PVR_UNREFERENCED_PARAMETER(pszString);
+#endif
+}
+
+void RGXErrorLogInit(const void *hPrivate,
+ const IMG_CHAR *pszString,
+ ...)
+{
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+ va_list argList;
+
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+
+ va_start(argList, pszString);
+ vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList);
+ va_end(argList);
+
+ PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer));
+}
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+IMG_BOOL RGXDeviceHasFeatureInit(const void *hPrivate, IMG_UINT64 ui64Feature)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)(((RGX_INIT_LAYER_PARAMS *)hPrivate)->hServices);
+ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0;
+}
+#endif
+
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate)
+{
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)(((RGX_INIT_LAYER_PARAMS *)hPrivate)->hServices);
+ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ return psDevInfo->sDevFeatureCfg.ui32MCMS;
+#elif defined(RGX_META_COREMEM_CODE) || defined(RGX_META_COREMEM_DATA)
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+
+ return RGX_META_COREMEM_SIZE;
+#else
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+
+ return 0;
+#endif
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Header for DDK implementation of the Services abstraction layer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for DDK implementation of the Services abstraction layer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGXLAYER_IMPL_H__)
+#define __RGXLAYER_IMPL_H__
+
+#include "rgxlayer.h"
+#include "device_connection.h"
+
+typedef struct _RGX_INIT_LAYER_PARAMS_
+{
+ SHARED_DEV_CONNECTION hServices;
+} RGX_INIT_LAYER_PARAMS;
+
+#endif /* !defined (__RGXLAYER_IMPL_H__) */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Header for Services abstraction layer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declaration of an interface layer used to abstract code that
+ can be compiled outside of the DDK, potentially in a
+ completely different OS.
+ All the headers included by this file must also be copied to
+ the alternative source tree.
+ All the functions declared here must have a DDK implementation
+ inside the DDK source tree (e.g. rgxlayer_km_impl.h/.c) and
+ another different implementation in case they are used outside
+ of the DDK.
+ All of the functions accept as a first parameter a
+ "const void *hPrivate" argument. It should be used to pass
+ around any implementation specific data required.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGXLAYER_KM_H__)
+#define __RGXLAYER_KM_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */
+#include "rgx_bvnc_defs_km.h"
+
+#include "rgx_firmware_processor.h"
+/* includes:
+ * rgx_meta.h and rgx_mips.h,
+ * rgxdefs_km.h,
+ * rgx_cr_defs_km.h,
+ * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h),
+ * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h)
+ */
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXWriteReg32/64
+
+ @Description Write a value to a 32/64 bit RGX register
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32RegAddr : Register offset inside the register bank
+ @Input ui32/64RegValue : New register value
+
+ @Return void
+
+******************************************************************************/
+void RGXWriteReg32(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue);
+
+void RGXWriteReg64(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64RegValue);
+
+/*!
+*******************************************************************************
+
+ @Function RGXReadReg32/64
+
+ @Description Read a 32/64 bit RGX register
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32RegAddr : Register offset inside the register bank
+
+ @Return Register value
+
+******************************************************************************/
+IMG_UINT32 RGXReadReg32(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr);
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXPollReg32/64
+
+ @Description Poll on a 32/64 bit RGX register until some bits are set/unset
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32RegAddr : Register offset inside the register bank
+ @Input ui32/64RegValue : Value expected from the register
+ @Input ui32/64RegMask : Only the bits set in this mask will be
+ checked against uiRegValue
+
+ @Return PVRSRV_OK if the poll succeeds,
+ PVRSRV_ERROR_TIMEOUT if the poll takes too long
+
+******************************************************************************/
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32RegMask);
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64RegValue,
+ IMG_UINT64 ui64RegMask);
+
+/*!
+*******************************************************************************
+
+ @Function RGXWaitCycles
+
+ @Description Wait for a number of GPU cycles and/or microseconds
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32Cycles : Number of GPU cycles to wait for in pdumps,
+ it can also be used when running driver-live
+ if desired (ignoring the next parameter)
+ @Input ui32WaitUs : Number of microseconds to wait for when running
+ driver-live
+
+ @Return void
+
+******************************************************************************/
+void RGXWaitCycles(const void *hPrivate,
+ IMG_UINT32 ui32Cycles,
+ IMG_UINT32 ui32WaitUs);
+
+/*!
+*******************************************************************************
+
+ @Function RGXCommentLogPower
+
+ @Description This function is called with debug messages during
+ the RGX start/stop process
+
+ @Input hPrivate : Implementation specific data
+ @Input pszString : Message to be printed
+ @Input ... : Variadic arguments
+
+ @Return void
+
+******************************************************************************/
+void RGXCommentLogPower(const void *hPrivate,
+ const IMG_CHAR *pszString,
+ ...) __printf(2, 3);
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireKernelMMUPC
+
+ @Description Acquire the Kernel MMU Page Catalogue device physical address
+
+ @Input hPrivate : Implementation specific data
+ @Input psPCAddr : Returned page catalog address
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXWriteKernelMMUPC32/64
+
+ @Description Write the Kernel MMU Page Catalogue to the 32/64 bit
+ RGX register passed as argument.
+ In a driver-live scenario without PDump these functions
+ are the same as RGXWriteReg32/64 and they don't need
+ to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32PCReg : Register offset inside the register bank
+ @Input ui32AlignShift : PC register alignshift
+ @Input ui32Shift : PC register shift
+ @Input ui32/64PCVal : Page catalog value (aligned and shifted)
+
+ @Return void
+
+******************************************************************************/
+#if defined(PDUMP)
+
+void RGXWriteKernelMMUPC64(const void *hPrivate,
+ IMG_UINT32 ui32PCReg,
+ IMG_UINT32 ui32PCRegAlignShift,
+ IMG_UINT32 ui32PCRegShift,
+ IMG_UINT64 ui64PCVal);
+
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+ IMG_UINT32 ui32PCReg,
+ IMG_UINT32 ui32PCRegAlignShift,
+ IMG_UINT32 ui32PCRegShift,
+ IMG_UINT32 ui32PCVal);
+
+
+#else /* defined(PDUMP) */
+
+#define RGXWriteKernelMMUPC64(priv, pcreg, alignshift, shift, pcval) \
+ RGXWriteReg64(priv, pcreg, pcval)
+
+#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \
+ RGXWriteReg32(priv, pcreg, pcval)
+
+#endif /* defined(PDUMP) */
+
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireGPURegsAddr
+
+ @Description Acquire the GPU registers base device physical address
+
+ @Input hPrivate : Implementation specific data
+ @Input psGPURegsAddr : Returned GPU registers base address
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXMIPSWrapperConfig
+
+ @Description Write GPU register bank transaction ID and MIPS boot mode
+ to the MIPS wrapper config register (passed as argument).
+ In a driver-live scenario without PDump this is the same as
+ RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32RegAddr : Register offset inside the register bank
+ @Input ui64GPURegsAddr : GPU registers base address
+ @Input ui32GPURegsAlign : Register bank transactions alignment
+ @Input ui32BootMode : Mips BOOT ISA mode
+
+ @Return void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXMIPSWrapperConfig(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64GPURegsAddr,
+ IMG_UINT32 ui32GPURegsAlign,
+ IMG_UINT32 ui32BootMode);
+#else
+#define RGXMIPSWrapperConfig(priv, regaddr, gpuregsaddr, gpuregsalign, bootmode) \
+ RGXWriteReg64(priv, regaddr, ((gpuregsaddr) >> (gpuregsalign)) | (bootmode))
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireBootRemapAddr
+
+ @Description Acquire the device physical address of the MIPS bootloader
+ accessed through remap region
+
+ @Input hPrivate : Implementation specific data
+ @Output psBootRemapAddr : Base address of the remapped bootloader
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXBootRemapConfig
+
+ @Description Configure the bootloader remap registers passed as arguments.
+ In a driver-live scenario without PDump this is the same as
+ two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32Config1RegAddr : Remap config1 register offset
+ @Input ui64Config1RegValue : Remap config1 register value
+ @Input ui32Config2RegAddr : Remap config2 register offset
+ @Input ui64Config2PhyAddr : Output remapped aligned physical address
+ @Input ui64Config2PhyMask : Mask for the output physical address
+ @Input ui64Config2Settings : Extra settings for this remap region
+
+ @Return void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXBootRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXBootRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+ RGXWriteReg64(priv, c1reg, (c1val)); \
+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+ } while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireCodeRemapAddr
+
+ @Description Acquire the device physical address of the MIPS code
+ accessed through remap region
+
+ @Input hPrivate : Implementation specific data
+ @Output psCodeRemapAddr : Base address of the remapped code
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXCodeRemapConfig
+
+ @Description Configure the code remap registers passed as arguments.
+ In a driver-live scenario without PDump this is the same as
+ two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32Config1RegAddr : Remap config1 register offset
+ @Input ui64Config1RegValue : Remap config1 register value
+ @Input ui32Config2RegAddr : Remap config2 register offset
+ @Input ui64Config2PhyAddr : Output remapped aligned physical address
+ @Input ui64Config2PhyMask : Mask for the output physical address
+ @Input ui64Config2Settings : Extra settings for this remap region
+
+ @Return void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXCodeRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXCodeRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+ RGXWriteReg64(priv, c1reg, (c1val)); \
+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+ } while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireDataRemapAddr
+
+ @Description Acquire the device physical address of the MIPS data
+ accessed through remap region
+
+ @Input hPrivate : Implementation specific data
+ @Output psDataRemapAddr : Base address of the remapped data
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDataRemapConfig
+
+ @Description Configure the data remap registers passed as arguments.
+ In a driver-live scenario without PDump this is the same as
+ two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32Config1RegAddr : Remap config1 register offset
+ @Input ui64Config1RegValue : Remap config1 register value
+ @Input ui32Config2RegAddr : Remap config2 register offset
+ @Input ui64Config2PhyAddr : Output remapped aligned physical address
+ @Input ui64Config2PhyMask : Mask for the output physical address
+ @Input ui64Config2Settings : Extra settings for this remap region
+
+ @Return void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXDataRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXDataRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+ RGXWriteReg64(priv, c1reg, (c1val)); \
+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+ } while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireTrampolineRemapAddr
+
+ @Description Acquire the device physical address of the MIPS data
+ accessed through remap region
+
+ @Input hPrivate : Implementation specific data
+ @Output psTrampolineRemapAddr: Base address of the remapped data
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXTrampolineRemapConfig
+
+ @Description Configure the trampoline remap registers passed as arguments.
+ In a driver-live scenario without PDump this is the same as
+ two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32Config1RegAddr : Remap config1 register offset
+ @Input ui64Config1RegValue : Remap config1 register value
+ @Input ui32Config2RegAddr : Remap config2 register offset
+ @Input ui64Config2PhyAddr : Output remapped aligned physical address
+ @Input ui64Config2PhyMask : Mask for the output physical address
+ @Input ui64Config2Settings : Extra settings for this remap region
+
+ @Return void
+
+******************************************************************************/
+#define RGXTrampolineRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+ RGXWriteReg64(priv, c1reg, (c1val)); \
+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+ } while (0)
+
+/*!
+*******************************************************************************
+
+ @Function RGXDoFWSlaveBoot
+
+ @Description Returns whether or not a FW Slave Boot is required
+ while powering on
+
+ @Input hPrivate : Implementation specific data
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXIOCoherencyTest
+
+ @Description Performs a coherency test
+
+ @Input hPrivate : Implementation specific data
+
+ @Return PVRSRV_OK if the test succeeds,
+ PVRSRV_ERROR_INIT_FAILURE if the test fails at some point
+
+******************************************************************************/
+PVRSRV_ERROR RGXIOCoherencyTest(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDeviceHasFeaturePower
+
+ @Description Checks if a device has a particular feature
+
+ @Input hPrivate : Implementation specific data
+ @Input ui64Feature : Feature to check
+
+ @Return IMG_TRUE if the given feature is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDeviceHasFeaturePower(const void *hPrivate, IMG_UINT64 ui64Feature);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDeviceHasErnBrnPower
+
+ @Description Checks if a device has a particular errata
+
+ @Input hPrivate : Implementation specific data
+ @Input ui64ErnsBrns : Flags to check
+
+ @Return IMG_TRUE if the given errata is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDeviceHasErnBrnPower(const void *hPrivate, IMG_UINT64 ui64ErnsBrns);
+
+/*!
+*******************************************************************************
+
+ @Function RGXGetDeviceSLCBanks
+
+ @Description Returns the number of SLC banks used by the device
+
+ @Input hPrivate : Implementation specific data
+
+ @Return Number of SLC banks
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXGetDeviceSLCSize
+
+ @Description Returns the device SLC size
+
+ @Input hPrivate : Implementation specific data
+
+ @Return SLC size
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceSLCSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXGetDeviceCacheLineSize
+
+ @Description Returns the device cache line size
+
+ @Input hPrivate : Implementation specific data
+
+ @Return Cache line size
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* !defined (__RGXLAYER_KM_H__) */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title DDK implementation of the Services abstraction layer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description DDK implementation of the Services abstraction layer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined (PDUMP)
+#include <stdarg.h>
+#endif
+
+#include "rgxlayer_km_impl.h"
+#include "pdump_km.h"
+#include "devicemem_utils.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+
+void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+ RGX_POWER_LAYER_PARAMS *psPowerParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void *pvRegsBase;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psPowerParams = (RGX_POWER_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psPowerParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if( !(psPowerParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+#endif
+ {
+ OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue);
+ }
+
+ PDUMPREG32(RGX_PDUMPREG_NAME, ui32RegAddr, ui32RegValue, psPowerParams->ui32PdumpFlags);
+}
+
+void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue)
+{
+ RGX_POWER_LAYER_PARAMS *psPowerParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void *pvRegsBase;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psPowerParams = (RGX_POWER_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psPowerParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if( !(psPowerParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+#endif
+ {
+ OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue);
+ }
+
+ PDUMPREG64(RGX_PDUMPREG_NAME, ui32RegAddr, ui64RegValue, psPowerParams->ui32PdumpFlags);
+}
+
+IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+ RGX_POWER_LAYER_PARAMS *psPowerParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void *pvRegsBase;
+ IMG_UINT32 ui32RegValue;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psPowerParams = (RGX_POWER_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psPowerParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if(psPowerParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+ {
+ ui32RegValue = IMG_UINT32_MAX;
+ }
+ else
+#endif
+ {
+ ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr);
+ }
+
+ PDUMPREGREAD32(RGX_PDUMPREG_NAME, ui32RegAddr, psPowerParams->ui32PdumpFlags);
+
+ return ui32RegValue;
+}
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+ RGX_POWER_LAYER_PARAMS *psPowerParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void *pvRegsBase;
+ IMG_UINT64 ui64RegValue;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psPowerParams = (RGX_POWER_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psPowerParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if(psPowerParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+ {
+ ui64RegValue = IMG_UINT64_MAX;
+ }
+ else
+#endif
+ {
+ ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+ }
+
+ PDUMPREGREAD64(RGX_PDUMPREG_NAME, ui32RegAddr, PDUMP_FLAGS_CONTINUOUS);
+
+ return ui64RegValue;
+}
+
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32RegMask)
+{
+ RGX_POWER_LAYER_PARAMS *psPowerParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void *pvRegsBase;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psPowerParams = (RGX_POWER_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psPowerParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if( !(psPowerParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+#endif
+ {
+ if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)pvRegsBase + ui32RegAddr),
+ ui32RegValue,
+ ui32RegMask) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ }
+
+ PDUMPREGPOL(RGX_PDUMPREG_NAME,
+ ui32RegAddr,
+ ui32RegValue,
+ ui32RegMask,
+ psPowerParams->ui32PdumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64RegValue,
+ IMG_UINT64 ui64RegMask)
+{
+ RGX_POWER_LAYER_PARAMS *psPowerParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void *pvRegsBase;
+
+ /* Split lower and upper words */
+ IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32);
+ IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue);
+ IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32);
+ IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask);
+
+ PVR_ASSERT(hPrivate != NULL);
+ psPowerParams = (RGX_POWER_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psPowerParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if( !(psPowerParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+#endif
+ {
+ if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)pvRegsBase + ui32RegAddr + 4),
+ ui32UpperValue,
+ ui32UpperMask) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)pvRegsBase + ui32RegAddr),
+ ui32LowerValue,
+ ui32LowerMask) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ }
+
+ PDUMPREGPOL(RGX_PDUMPREG_NAME,
+ ui32RegAddr + 4,
+ ui32UpperValue,
+ ui32UpperMask,
+ psPowerParams->ui32PdumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+
+
+ PDUMPREGPOL(RGX_PDUMPREG_NAME,
+ ui32RegAddr,
+ ui32LowerValue,
+ ui32LowerMask,
+ psPowerParams->ui32PdumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+
+ return PVRSRV_OK;
+}
+
+void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+ OSWaitus(ui32TimeUs);
+ PDUMPIDLWITHFLAGS(ui32Cycles, PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXCommentLogPower(const void *hPrivate, const IMG_CHAR *pszString, ...)
+{
+#if defined(PDUMP)
+ va_list argList;
+ va_start(argList, pszString);
+ PDumpCommentWithFlagsVA(PDUMP_FLAGS_CONTINUOUS, pszString, argList);
+ va_end(argList);
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+#else
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+ PVR_UNREFERENCED_PARAMETER(pszString);
+#endif
+}
+
+
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psPCAddr = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->sPCAddr;
+}
+
+#if defined(PDUMP)
+
+void RGXWriteKernelMMUPC64(const void *hPrivate,
+ IMG_UINT32 ui32PCReg,
+ IMG_UINT32 ui32PCRegAlignShift,
+ IMG_UINT32 ui32PCRegShift,
+ IMG_UINT64 ui64PCVal)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* Write the cat-base address */
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ui32PCReg, ui64PCVal);
+
+ /* Pdump catbase address */
+ MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+ RGX_PDUMPREG_NAME,
+ ui32PCReg,
+ 8,
+ ui32PCRegAlignShift,
+ ui32PCRegShift,
+ PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+ IMG_UINT32 ui32PCReg,
+ IMG_UINT32 ui32PCRegAlignShift,
+ IMG_UINT32 ui32PCRegShift,
+ IMG_UINT32 ui32PCVal)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* Write the cat-base address */
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal);
+
+ /* Pdump catbase address */
+ MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+ RGX_PDUMPREG_NAME,
+ ui32PCReg,
+ 4,
+ ui32PCRegAlignShift,
+ ui32PCRegShift,
+ PDUMP_FLAGS_CONTINUOUS);
+}
+
+#endif /* defined(PDUMP) */
+
+
+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psGPURegsAddr = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->sGPURegAddr;
+}
+
+#if defined(PDUMP)
+void RGXMIPSWrapperConfig(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64GPURegsAddr,
+ IMG_UINT32 ui32GPURegsAlign,
+ IMG_UINT32 ui32BootMode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM,
+ ui32RegAddr,
+ (ui64GPURegsAddr >> ui32GPURegsAlign) | ui32BootMode);
+
+ /* Store register offset to temp PDump variable */
+ PDumpRegLabelToInternalVar(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", PDUMP_FLAGS_CONTINUOUS);
+
+ /* Align register transactions identifier */
+ PDumpWriteVarSHRValueOp(":SYSMEM:$1", ui32GPURegsAlign, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Enable micromips instruction encoding */
+ PDumpWriteVarORValueOp(":SYSMEM:$1", ui32BootMode, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Do the actual register write */
+ PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", 0);
+}
+#endif
+
+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psBootRemapAddr = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->sBootRemapAddr;
+}
+
+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psCodeRemapAddr = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->sCodeRemapAddr;
+}
+
+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psDataRemapAddr = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->sDataRemapAddr;
+}
+
+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psTrampolineRemapAddr = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->sTrampolineRemapAddr;
+}
+
+#if defined(PDUMP)
+static inline
+void RGXWriteRemapConfig2Reg(void *pvRegs,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64PhyAddr,
+ IMG_UINT64 ui64PhyMask,
+ IMG_UINT64 ui64Settings)
+{
+ OSWriteHWReg64(pvRegs, ui32RegAddr, (ui64PhyAddr & ui64PhyMask) | ui64Settings);
+
+ /* Store memory offset to temp PDump variable */
+ PDumpMemLabelToInternalVar(":SYSMEM:$1", psPMR, uiLogicalOffset, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Keep only the relevant bits of the output physical address */
+ PDumpWriteVarANDValueOp(":SYSMEM:$1", ui64PhyMask, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Extra settings for this remapped region */
+ PDumpWriteVarORValueOp(":SYSMEM:$1", ui64Settings, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Do the actual register write */
+ PDumpInternalVarToReg32(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXBootRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32BootRemapMemOffset = RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE * (IMG_UINT32)RGXMIPSFW_PAGE_SIZE;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* Write remap config1 register */
+ RGXWriteReg64(hPrivate,
+ ui32Config1RegAddr,
+ ui64Config1RegValue);
+
+ /* Write remap config2 register */
+ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+ psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR,
+ psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32BootRemapMemOffset,
+ ui32Config2RegAddr,
+ ui64Config2PhyAddr,
+ ui64Config2PhyMask,
+ ui64Config2Settings);
+}
+
+void RGXCodeRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32CodeRemapMemOffset = RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE * (IMG_UINT32)RGXMIPSFW_PAGE_SIZE;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* Write remap config1 register */
+ RGXWriteReg64(hPrivate,
+ ui32Config1RegAddr,
+ ui64Config1RegValue);
+
+ /* Write remap config2 register */
+ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+ psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR,
+ psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32CodeRemapMemOffset,
+ ui32Config2RegAddr,
+ ui64Config2PhyAddr,
+ ui64Config2PhyMask,
+ ui64Config2Settings);
+}
+
+void RGXDataRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32DataRemapMemOffset = RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * (IMG_UINT32)RGXMIPSFW_PAGE_SIZE;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* Write remap config1 register */
+ RGXWriteReg64(hPrivate,
+ ui32Config1RegAddr,
+ ui64Config1RegValue);
+
+ /* Write remap config2 register */
+ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+ psDevInfo->psRGXFWDataMemDesc->psImport->hPMR,
+ psDevInfo->psRGXFWDataMemDesc->uiOffset + ui32DataRemapMemOffset,
+ ui32Config2RegAddr,
+ ui64Config2PhyAddr,
+ ui64Config2PhyMask,
+ ui64Config2Settings);
+}
+#endif
+
+
+
+#define MAX_NUM_COHERENCY_TESTS (10)
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ if (psDevInfo->ui32CoherencyTestsDone >= MAX_NUM_COHERENCY_TESTS)
+ {
+ return IMG_FALSE;
+ }
+
+ psDevConfig = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->psDevConfig;
+
+ return PVRSRVSystemSnoopingOfCPUCache(psDevConfig);
+}
+
+PVRSRV_ERROR RGXIOCoherencyTest(const void *hPrivate)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ DEVMEM_MEMDESC *psIOCoherencyTestMemDesc;
+ IMG_UINT32 *pui32CpuVirtAddr;
+ RGXFWIF_DEV_VIRTADDR sCoherencyTestBuffer;
+ IMG_DEVMEM_SIZE_T uiCoherencyBlockSize = sizeof(IMG_UINT64);
+ IMG_DEVMEM_ALIGN_T uiCoherencyBlockAlign = sizeof(IMG_UINT64);
+ IMG_UINT32 ui32SLCCTRL;
+ IMG_UINT32 ui32TestNum;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_POWER_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* Size and align are 'expanded' because we request an export align allocation */
+ DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareHeap),
+ &uiCoherencyBlockSize,
+ &uiCoherencyBlockAlign);
+
+ /* Allocate, acquire cpu address and set firmware address */
+ eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode,
+ uiCoherencyBlockSize,
+ uiCoherencyBlockAlign,
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+ "FwExIoCoherencyTestBuffer",
+ &psIOCoherencyTestMemDesc);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = DevmemAcquireCpuVirtAddr(psIOCoherencyTestMemDesc,
+ (void **) &pui32CpuVirtAddr);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Create a FW address which is uncached in the Meta DCache and in the SLC
+ * using the Meta bootloader segment.
+ * This segment is the only one configured correctly out of reset
+ * (when this test is meant to be executed).
+ */
+ {
+ RGXSetFirmwareAddress(&sCoherencyTestBuffer,
+ psIOCoherencyTestMemDesc,
+ 0,
+ RFW_FWADDR_FLAG_NONE);
+
+ /* Undo most of the FW mappings done by RGXSetFirmwareAddress */
+ sCoherencyTestBuffer.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK;
+ sCoherencyTestBuffer.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK;
+ sCoherencyTestBuffer.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+ /* Map the buffer in the bootloader segment as uncached */
+ sCoherencyTestBuffer.ui32Addr |= RGXFW_BOOTLDR_META_ADDR;
+ sCoherencyTestBuffer.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+ }
+
+ /* Bypass the SLC when IO coherency is enabled */
+ ui32SLCCTRL = RGXReadReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS);
+ RGXWriteReg32(hPrivate,
+ RGX_CR_SLC_CTRL_BYPASS,
+ ui32SLCCTRL | RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN);
+
+ for (ui32TestNum = 1; ui32TestNum < 3; ui32TestNum++)
+ {
+ IMG_UINT32 i;
+ IMG_BOOL bPassed = IMG_TRUE;
+
+ PVR_LOG(("Startup I/O Coherency Test [pass #%u]", ui32TestNum));
+
+ for (i = 0; i < uiCoherencyBlockSize/sizeof(IMG_UINT32); i++)
+ {
+ IMG_UINT32 ui32FWAddr, ui32FWValue;
+ PVRSRV_ERROR eError2;
+
+ /* Ensures line is in dcache */
+ ui32FWValue = pui32CpuVirtAddr[i];
+
+ /* Dirty allocation in dcache */
+ pui32CpuVirtAddr[i] = i + ui32TestNum;
+
+ /* Flush possible cpu store-buffer(ing) */
+ OSWriteMemoryBarrier();
+
+ /* Read back value using RGX slave-port interface */
+ ui32FWAddr = sCoherencyTestBuffer.ui32Addr + (i * sizeof(IMG_UINT32));
+
+ eError2 = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue);
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXReadWithSP error: %s",
+ PVRSRVGetErrorStringKM(eError2)));
+ }
+
+ /* Compare to see if I/O coherency worked */
+ if (pui32CpuVirtAddr[i] != ui32FWValue)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Expected: %x, Got: %x",
+ pui32CpuVirtAddr[i], ui32FWValue));
+
+ bPassed = IMG_FALSE;
+ eError = PVRSRV_ERROR_INIT_FAILURE;
+ }
+ }
+
+ PVR_LOG(("I/O Coherency Test [pass #%u] completed, Passed? %d",
+ ui32TestNum, bPassed));
+ }
+
+ /* Restore SLC bypass settings */
+ RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS, ui32SLCCTRL);
+
+ RGXUnsetFirmwareAddress(psIOCoherencyTestMemDesc);
+ DevmemReleaseCpuVirtAddr(psIOCoherencyTestMemDesc);
+ DevmemFwFree(psDevInfo, psIOCoherencyTestMemDesc);
+
+ if (eError == PVRSRV_OK)
+ {
+ PVR_LOG(("I/O Coherency Test succeeded"));
+ psDevInfo->ui32CoherencyTestsDone = MAX_NUM_COHERENCY_TESTS + 1;
+ }
+ else
+ {
+ PVR_LOG(("I/O Coherency Test FAILED"));
+ psDevInfo->ui32CoherencyTestsDone++;
+ }
+
+ return eError;
+}
+
+IMG_BOOL RGXDeviceHasFeaturePower(const void *hPrivate, IMG_UINT64 ui64Feature)
+{
+ RGX_POWER_LAYER_PARAMS *psPowerParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psPowerParams = (RGX_POWER_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psPowerParams->psDevInfo;
+
+ return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0;
+}
+
+IMG_BOOL RGXDeviceHasErnBrnPower(const void *hPrivate, IMG_UINT64 ui64ErnsBrns)
+{
+ RGX_POWER_LAYER_PARAMS *psPowerParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psPowerParams = (RGX_POWER_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psPowerParams->psDevInfo;
+
+ return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0;
+}
+
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate)
+{
+ RGX_POWER_LAYER_PARAMS *psPowerParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psPowerParams = (RGX_POWER_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psPowerParams->psDevInfo;
+
+ return psDevInfo->sDevFeatureCfg.ui32SLCBanks;
+}
+
+IMG_UINT32 RGXGetDeviceSLCSize(const void *hPrivate)
+{
+ RGX_POWER_LAYER_PARAMS *psPowerParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psPowerParams = (RGX_POWER_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psPowerParams->psDevInfo;
+
+ return psDevInfo->sDevFeatureCfg.ui32SLCSize;
+}
+
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate)
+{
+ RGX_POWER_LAYER_PARAMS *psPowerParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psPowerParams = (RGX_POWER_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psPowerParams->psDevInfo;
+
+ return psDevInfo->sDevFeatureCfg.ui32CacheLineSize;
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Header for DDK implementation of the Services abstraction layer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for DDK implementation of the Services abstraction layer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGXLAYER_KM_IMPL_H__)
+#define __RGXLAYER_KM_IMPL_H__
+
+#include "rgxlayer_km.h"
+
+typedef struct _RGX_POWER_LAYER_PARAMS_
+{
+ void *psDevInfo;
+ void *psDevConfig;
+#if defined(PDUMP)
+ IMG_UINT32 ui32PdumpFlags;
+#endif
+
+ IMG_DEV_PHYADDR sPCAddr;
+ IMG_DEV_PHYADDR sGPURegAddr;
+ IMG_DEV_PHYADDR sBootRemapAddr;
+ IMG_DEV_PHYADDR sCodeRemapAddr;
+ IMG_DEV_PHYADDR sDataRemapAddr;
+ IMG_DEV_PHYADDR sTrampolineRemapAddr;
+} RGX_POWER_LAYER_PARAMS;
+
+#endif /* !defined (__RGXLAYER_KM_IMPL_H__) */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX memory context management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX memory context management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_server_utils.h"
+#include "devicemem_pdump.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgx_memallocflags.h"
+#include "rgx_bvnc_defs_km.h"
+/*
+ FIXME:
+ For now just get global state, but what we really want is to do
+ this per memory context
+*/
+static IMG_UINT32 gui32CacheOpps = 0;
+/* FIXME: End */
+
+typedef struct _SERVER_MMU_CONTEXT_ {
+ DEVMEM_MEMDESC *psFWMemContextMemDesc;
+ MMU_CONTEXT *psMMUContext;
+ IMG_PID uiPID;
+ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+ DLLIST_NODE sNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+} SERVER_MMU_CONTEXT;
+
+
+
+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDeviceData,
+ MMU_LEVEL eMMULevel,
+ IMG_BOOL bUnmap)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(bUnmap);
+
+ switch (eMMULevel)
+ {
+ case MMU_LEVEL_3: gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PC;
+ break;
+ case MMU_LEVEL_2: gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PD;
+ break;
+ case MMU_LEVEL_1: gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PT;
+ if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SLC_VIVT_BIT_MASK))
+ {
+ gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_TLB;
+ }
+ break;
+ default:
+ PVR_ASSERT(0);
+ break;
+ }
+}
+
+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevInfo,
+ IMG_UINT32 *pui32MMUInvalidateUpdate,
+ IMG_BOOL bInterrupt)
+{
+ PVRSRV_ERROR eError;
+
+ eError = RGXPreKickCacheCommand(psDevInfo->pvDevice,
+ RGXFWIF_DM_GP,
+ pui32MMUInvalidateUpdate,
+ bInterrupt);
+
+ return eError;
+}
+
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ IMG_UINT32 *pui32MMUInvalidateUpdate,
+ IMG_BOOL bInterrupt)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+ RGXFWIF_KCCB_CMD sFlushCmd;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!gui32CacheOpps)
+ {
+ goto _PVRSRVPowerLock_Exit;
+ }
+
+ /* PVRSRVPowerLock guarantees atomicity between commands and global variables consistency.
+ * This is helpful in a scenario with several applications allocating resources. */
+ eError = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXPreKickCacheCommand: failed to acquire powerlock (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto _PVRSRVPowerLock_Exit;
+ }
+
+ *pui32MMUInvalidateUpdate = psDeviceNode->ui32NextMMUInvalidateUpdate;
+
+ /* Setup cmd and add the device nodes sync object */
+ sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE;
+ sFlushCmd.uCmdData.sMMUCacheData.ui32MMUCacheSyncUpdateValue = psDeviceNode->ui32NextMMUInvalidateUpdate;
+ SyncPrimGetFirmwareAddr(psDeviceNode->psMMUCacheSyncPrim,
+ &sFlushCmd.uCmdData.sMMUCacheData.sMMUCacheSync.ui32Addr);
+
+ /* Set the update value for the next kick */
+ psDeviceNode->ui32NextMMUInvalidateUpdate++;
+
+ /* Set which memory context this command is for (all ctxs for now) */
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SLC_VIVT_BIT_MASK)
+ {
+ gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL;
+ }
+ /* Indicate the firmware should signal command completion to the host */
+ if(bInterrupt)
+ {
+ gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT;
+ }
+#if 0
+ sFlushCmd.uCmdData.sMMUCacheData.psMemoryContext = ???
+#endif
+
+ PDUMPPOWCMDSTART();
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON,
+ IMG_FALSE);
+ PDUMPPOWCMDEND();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXPreKickCacheCommand: failed to transition RGX to ON (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+
+ goto _PVRSRVSetDevicePowerStateKM_Exit;
+ }
+
+ sFlushCmd.uCmdData.sMMUCacheData.ui32Flags = gui32CacheOpps;
+
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Submit MMU flush and invalidate (flags = 0x%08x)",
+ gui32CacheOpps);
+#endif
+
+ gui32CacheOpps = 0;
+
+ /* Schedule MMU cache command */
+ eError = RGXSendCommand(psDevInfo,
+ eDM,
+ &sFlushCmd,
+ sizeof(RGXFWIF_KCCB_CMD),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPreKickCacheCommand: Failed to schedule MMU "
+ "cache command to DM=%d with error (%u)", eDM, eError));
+ }
+
+_PVRSRVSetDevicePowerStateKM_Exit:
+ PVRSRVPowerUnlock(psDeviceNode);
+
+_PVRSRVPowerLock_Exit:
+ return eError;
+}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+/* page fault debug is the only current use case for needing to find process info
+ * after that process device memory context has been destroyed
+ */
+
+typedef struct _UNREGISTERED_MEMORY_CONTEXT_
+{
+ IMG_PID uiPID;
+ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+ IMG_DEV_PHYADDR sPCDevPAddr;
+} UNREGISTERED_MEMORY_CONTEXT;
+
+/* must be a power of two */
+#define UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE (1 << 3)
+
+static UNREGISTERED_MEMORY_CONTEXT gasUnregisteredMemCtxs[UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE];
+static IMG_UINT32 gui32UnregisteredMemCtxsHead = 0;
+
+/* record a device memory context being unregistered.
+ * the list of unregistered contexts can be used to find the PID and process name
+ * belonging to a memory context which has been destroyed
+ */
+static void _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext)
+{
+ UNREGISTERED_MEMORY_CONTEXT *psRecord;
+
+ OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+ psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead];
+
+ gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1)
+ & (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1);
+
+ OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+ psRecord->uiPID = psServerMMUContext->uiPID;
+ if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr) != PVRSRV_OK)
+ {
+ PVR_LOG(("_RecordUnregisteredMemoryContext: Failed to get PC address for memory context"));
+ }
+ OSStringNCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName));
+ psRecord->szProcessName[sizeof(psRecord->szProcessName) - 1] = '\0';
+}
+
+#endif
+
+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData)
+{
+ SERVER_MMU_CONTEXT *psServerMMUContext = hPrivData;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psServerMMUContext->psDevInfo;
+
+ OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+ dllist_remove_node(&psServerMMUContext->sNode);
+ OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ _RecordUnregisteredMemoryContext(psDevInfo, psServerMMUContext);
+#endif
+
+ /*
+ * Release the page catalogue address acquired in RGXRegisterMemoryContext().
+ */
+ MMU_ReleaseBaseAddr(NULL /* FIXME */);
+
+ /*
+ * Free the firmware memory context.
+ */
+ DevmemFwFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc);
+
+ OSFreeMem(psServerMMUContext);
+}
+
+
+/*
+ * RGXRegisterMemoryContext
+ */
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode,
+ MMU_CONTEXT *psMMUContext,
+ IMG_HANDLE *hPrivData)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_FLAGS_T uiFWMemContextMemAllocFlags;
+ RGXFWIF_FWMEMCONTEXT *psFWMemContext;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc;
+ SERVER_MMU_CONTEXT *psServerMMUContext;
+
+ if (psDevInfo->psKernelMMUCtx == NULL)
+ {
+ /*
+ * This must be the creation of the Kernel memory context. Take a copy
+ * of the MMU context for use when programming the BIF.
+ */
+ psDevInfo->psKernelMMUCtx = psMMUContext;
+ }
+ else
+ {
+ psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext));
+ if (psServerMMUContext == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_server_ctx;
+ }
+
+ psServerMMUContext->psDevInfo = psDevInfo;
+
+ /*
+ * This FW MemContext is only mapped into kernel for initialisation purposes.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at any kick)
+ */
+ uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+ /*
+ Allocate device memory for the firmware memory context for the new
+ application.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware memory context");
+ /* FIXME: why cache-consistent? */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFWMemContext),
+ uiFWMemContextMemAllocFlags,
+ "FwMemoryContext",
+ &psFWMemContextMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to allocate firmware memory context (%u)",
+ eError));
+ goto fail_alloc_fw_ctx;
+ }
+
+ /*
+ Temporarily map the firmware memory context to the kernel.
+ */
+ eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc,
+ (void **)&psFWMemContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to map firmware memory context (%u)",
+ eError));
+ goto fail_acquire_cpu_addr;
+ }
+
+ /*
+ * Write the new memory context's page catalogue into the firmware memory
+ * context for the client.
+ */
+ eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
+ eError));
+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+ goto fail_acquire_base_addr;
+ }
+
+ /*
+ * Set default values for the rest of the structure.
+ */
+ psFWMemContext->uiPageCatBaseRegID = -1;
+ psFWMemContext->uiBreakpointAddr = 0;
+ psFWMemContext->uiBPHandlerAddr = 0;
+ psFWMemContext->uiBreakpointCtl = 0;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;
+ IMG_BOOL bOSidAxiProt;
+
+ MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+ psFWMemContext->ui32OSid = ui32OSidReg;
+ psFWMemContext->bOSidAxiProt = bOSidAxiProt;
+}
+#endif
+
+#if defined(PDUMP)
+ {
+ IMG_CHAR aszName[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiOffset = 0;
+
+ /*
+ * Dump the Mem context allocation
+ */
+ DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS);
+
+
+ /*
+ * Obtain a symbolic addr of the mem context structure
+ */
+ eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc,
+ &uiOffset,
+ aszName,
+ PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to generate a Dump Page Catalogue address (%u)",
+ eError));
+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+ goto fail_pdump_cat_base_addr;
+ }
+
+ /*
+ * Dump the Page Cat tag in the mem context (symbolic address)
+ */
+ eError = MMU_PDumpWritePageCatBase(psMMUContext,
+ aszName,
+ uiOffset,
+ 8, /* 64-bit register write */
+ 0,
+ 0,
+ 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
+ eError));
+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+ goto fail_pdump_cat_base;
+ }
+ }
+#endif
+
+ /*
+ * Release kernel address acquired above.
+ */
+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+
+ /*
+ * Store the process information for this device memory context
+ * for use with the host page-fault analysis.
+ */
+ psServerMMUContext->uiPID = OSGetCurrentClientProcessIDKM();
+ psServerMMUContext->psMMUContext = psMMUContext;
+ psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+ if (OSSNPrintf(psServerMMUContext->szProcessName,
+ RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME,
+ "%s",
+ OSGetCurrentClientProcessNameKM()) == RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME)
+ {
+ psServerMMUContext->szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME-1] = '\0';
+ }
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "New memory context: Process Name: %s PID: %u (0x%08X)",
+ psServerMMUContext->szProcessName,
+ psServerMMUContext->uiPID,
+ psServerMMUContext->uiPID);
+
+ OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+ dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode);
+ OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+ MMU_SetDeviceData(psMMUContext, psFWMemContextMemDesc);
+ *hPrivData = psServerMMUContext;
+ }
+
+ return PVRSRV_OK;
+
+#if defined(PDUMP)
+fail_pdump_cat_base:
+fail_pdump_cat_base_addr:
+ MMU_ReleaseBaseAddr(NULL);
+#endif
+fail_acquire_base_addr:
+ /* Done before jumping to the fail point as the release is done before exit */
+fail_acquire_cpu_addr:
+ DevmemFwFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc);
+fail_alloc_fw_ctx:
+ OSFreeMem(psServerMMUContext);
+fail_alloc_server_ctx:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv)
+{
+ SERVER_MMU_CONTEXT *psMMUContext = (SERVER_MMU_CONTEXT *) hPriv;
+
+ return psMMUContext->psFWMemContextMemDesc;
+}
+
+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ IMG_DEV_PHYADDR sPCDevPAddr;
+ DLLIST_NODE *psNode, *psNext;
+
+ OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+ {
+ SERVER_MMU_CONTEXT *psServerMMUContext =
+ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+
+ if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get PC address for memory context"));
+ continue;
+ }
+
+ if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+ {
+ PVR_DUMPDEBUG_LOG("Found memory context (PID = %d, %s)",
+ psServerMMUContext->uiPID,
+ psServerMMUContext->szProcessName);
+
+ MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psDevVAddr,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ break;
+ }
+ }
+
+ /* Lastly check for fault in the kernel allocated memory */
+ if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get PC address for kernel memory context"));
+ }
+
+ if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+ {
+ MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock);
+}
+
+/* given the physical address of a page catalogue, searches for a corresponding
+ * MMU context and if found, provides the caller details of the process.
+ * Returns IMG_TRUE if a process is found.
+ */
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+ RGXMEM_PROCESS_INFO *psInfo)
+{
+ IMG_BOOL bRet = IMG_FALSE;
+ DLLIST_NODE *psNode, *psNext;
+ SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+
+ /* check if the input PC addr corresponds to an active memory context */
+ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+ {
+ SERVER_MMU_CONTEXT *psThisMMUContext =
+ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+ IMG_DEV_PHYADDR sPCDevPAddr;
+
+ if (MMU_AcquireBaseAddr(psThisMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get PC address for memory context"));
+ continue;
+ }
+
+ if (sPCAddress.uiAddr == sPCDevPAddr.uiAddr)
+ {
+ psServerMMUContext = psThisMMUContext;
+ break;
+ }
+ }
+
+ if(psServerMMUContext != NULL)
+ {
+ psInfo->uiPID = psServerMMUContext->uiPID;
+ OSStringNCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName));
+ psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+ psInfo->bUnregistered = IMG_FALSE;
+ bRet = IMG_TRUE;
+ }
+ /* else check if the input PC addr corresponds to the firmware */
+ else
+ {
+ IMG_DEV_PHYADDR sKernelPCDevPAddr;
+ PVRSRV_ERROR eError;
+
+ eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sKernelPCDevPAddr);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get PC address for kernel memory context"));
+ }
+ else
+ {
+ if(sPCAddress.uiAddr == sKernelPCDevPAddr.uiAddr)
+ {
+ psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE;
+ OSStringNCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName));
+ psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+ psInfo->bUnregistered = IMG_FALSE;
+ bRet = IMG_TRUE;
+ }
+ }
+ }
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(bRet == IMG_FALSE)
+ {
+ /* no active memory context found with the given PC address.
+ * Check the list of most recently freed memory contexts.
+ */
+ IMG_UINT32 i;
+
+ OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+ /* iterate through the list of unregistered memory contexts
+ * from newest (one before the head) to the oldest (the current head)
+ */
+ i = gui32UnregisteredMemCtxsHead;
+
+ do
+ {
+ UNREGISTERED_MEMORY_CONTEXT *psRecord;
+
+ i ? i-- : (i = (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1));
+
+ psRecord = &gasUnregisteredMemCtxs[i];
+
+ if(psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr)
+ {
+ psInfo->uiPID = psRecord->uiPID;
+ OSStringNCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)-1);
+ psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+ psInfo->bUnregistered = IMG_TRUE;
+ bRet = IMG_TRUE;
+ break;
+ }
+ } while(i != gui32UnregisteredMemCtxsHead);
+
+ OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+ }
+#endif
+ return bRet;
+}
+
+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID,
+ RGXMEM_PROCESS_INFO *psInfo)
+{
+ IMG_BOOL bRet = IMG_FALSE;
+ DLLIST_NODE *psNode, *psNext;
+ SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+
+ /* check if the input PID corresponds to an active memory context */
+ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+ {
+ SERVER_MMU_CONTEXT *psThisMMUContext =
+ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+
+ if (psThisMMUContext->uiPID == uiPID)
+ {
+ psServerMMUContext = psThisMMUContext;
+ break;
+ }
+ }
+
+ if(psServerMMUContext != NULL)
+ {
+ psInfo->uiPID = psServerMMUContext->uiPID;
+ OSStringNCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName));
+ psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+ psInfo->bUnregistered = IMG_FALSE;
+ bRet = IMG_TRUE;
+ }
+ /* else check if the input PID corresponds to the firmware */
+ else if(uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+ {
+ psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE;
+ OSStringNCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName));
+ psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+ psInfo->bUnregistered = IMG_FALSE;
+ bRet = IMG_TRUE;
+ }
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ /* if the PID didn't correspond to an active context or the
+ * FW address then see if it matches a recently unregistered context
+ */
+ if(bRet == IMG_FALSE)
+ {
+ IMG_UINT32 i;
+
+ OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+ for(i = (gui32UnregisteredMemCtxsHead > 0) ? (gui32UnregisteredMemCtxsHead - 1) :
+ UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE;
+ i != gui32UnregisteredMemCtxsHead; i--)
+ {
+ UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i];
+
+ if(psRecord->uiPID == uiPID)
+ {
+ psInfo->uiPID = psRecord->uiPID;
+ OSStringNCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)-1);
+ psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+ psInfo->bUnregistered = IMG_TRUE;
+ bRet = IMG_TRUE;
+ break;
+ }
+ }
+
+ OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+ }
+#endif
+ return bRet;
+}
+
+/******************************************************************************
+ End of file (rgxmem.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX memory context management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for RGX memory context management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXMEM_H__)
+#define __RGXMEM_H__
+
+#include "pvrsrv_error.h"
+#include "device.h"
+#include "mmu_common.h"
+#include "rgxdevice.h"
+
+#define RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME 40
+
+/* this PID denotes the firmware */
+#define RGXMEM_SERVER_PID_FIRMWARE 0xFFFFFFFF
+
+typedef struct _RGXMEM_PROCESS_INFO_
+{
+ IMG_PID uiPID;
+ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+ IMG_BOOL bUnregistered;
+} RGXMEM_PROCESS_INFO;
+
+IMG_DEV_PHYADDR GetPC(MMU_CONTEXT * psContext);
+
+/* FIXME: SyncPrim should be stored on the memory context */
+void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXMMUSyncPrimFree(void);
+
+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDeviceData,
+ MMU_LEVEL eMMULevel,
+ IMG_BOOL bUnmap);
+
+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevInfo,
+ IMG_UINT32 *pui32NextMMUInvalidateUpdate,
+ IMG_BOOL bInterrupt);
+
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ IMG_UINT32 *pui32MMUInvalidateUpdate,
+ IMG_BOOL bInterrupt);
+
+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData);
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode,
+ MMU_CONTEXT *psMMUContext,
+ IMG_HANDLE *hPrivData);
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv);
+
+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+ RGXMEM_PROCESS_INFO *psInfo);
+
+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID,
+ RGXMEM_PROCESS_INFO *psInfo);
+
+#endif /* __RGXMEM_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device specific initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific MMU initialisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxmipsmmuinit.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+#include "rgxheapconfig.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "pdump_km.h"
+#include "rgx_mips.h"
+
+/*
+ * Bits of PT, PD and PC not involving addresses
+ */
+
+/* Position of the MIPS PT entry indicating entry validity */
+#define RGX_MIPS_MMUCTRL_PTE_PROTMASK (RGX_MIPS_MMUCTRL_PT_DATA_VALID_EN | \
+ RGX_MIPS_MMUCTRL_PT_DATA_GLOBAL_EN | \
+ RGX_MIPS_MMUCTRL_PT_DATA_WRITABLE_EN | \
+ ~RGX_MIPS_MMUCTRL_PT_CACHE_POLICY_CLRMSK);
+/* Currently there is no page directory for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PDE_PROTMASK 0
+/* Currently there is no page catalog for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PCE_PROTMASK 0
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+/*
+ *
+ * Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ * Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ * Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ * Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ * Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ * Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+ the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+ const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+ const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
+ PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]);
+
+ /*
+ * Setup sRGXMMUPCEConfig, no PC in MIPS MMU currently
+ */
+ sRGXMMUPCEConfig.uiBytesPerEntry = 0; /* 32 bit entries */
+ sRGXMMUPCEConfig.uiAddrMask = 0; /* Mask to get significant address bits of PC entry */
+
+ sRGXMMUPCEConfig.uiAddrShift = 0; /* Shift this many bits to get PD address in PC entry */
+ sRGXMMUPCEConfig.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE; /* Alignment of PD AND PC */
+
+ sRGXMMUPCEConfig.uiProtMask = RGX_MIPS_MMUCTRL_PCE_PROTMASK; //Mask to get the status bits of the PC */
+ sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to have status bits starting with bit 0 */
+
+ sRGXMMUPCEConfig.uiValidEnMask = RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */
+ sRGXMMUPCEConfig.uiValidEnShift = RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to have entry valid bit starting with bit 0 */
+
+ /*
+ * Setup sRGXMMUTopLevelDevVAddrConfig
+ */
+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = 0; /* Get the PC address bits from a 40 bit virt. address (in a 64bit UINT) */
+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = 0;
+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = 0;
+
+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = 0; /* Get the PD address bits from a 40 bit virt. address (in a 64bit UINT) */
+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = 0;
+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = 0;
+
+ sRGXMMUTopLevelDevVAddrConfig.uiPTIndexMask = IMG_UINT64_C(0xfffffff000); /* Get the PT address bits from a 40 bit virt. address (in a 64bit UINT) */
+ sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE;
+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = RGX_FIRMWARE_HEAP_SIZE >> sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift;
+
+/*
+ *
+ * Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_4KBDP. No PD in MIPS MMU currently
+ */
+ sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 0;
+
+ /* No PD used for MIPS */
+ sRGXMMUPDEConfig_4KBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_4KBDP.uiAddrShift = 0;
+ sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE;
+
+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x0);
+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MIPS_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_4KBDP.
+ */
+ sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+
+ sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xffffffffc0);
+ sRGXMMUPTEConfig_4KBDP.uiAddrShift = RGX_MIPS_MMUCTRL_PT_PFN_SHIFT;
+ sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE;
+
+ sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MIPS_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MIPS_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MIPS_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_4KBDP
+ */
+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = 0;
+
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = 0;
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = RGX_FIRMWARE_HEAP_SIZE >> sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift;
+
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = RGX_FIRMWARE_HEAP_BASE & IMG_UINT64_C(0x00ffffffff);
+
+ /*
+ * Setup gsPageSizeConfig4KB
+ */
+ gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+ gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+ gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+ gsPageSizeConfig4KB.uiRefCount = 0;
+ gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ * Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_16KBDP
+ */
+ sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPDEConfig_16KBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_16KBDP.uiAddrShift = 0; /* These are for a page directory ENTRY, meaning the address of a PT cropped to suit the PD */
+ sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the page tables NOT directories */
+
+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = 0;
+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_16KBDP.uiProtMask = 0;
+ sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_16KBDP.uiValidEnMask = 0;
+ sRGXMMUPDEConfig_16KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUPTEConfig_16KBDP. Not supported yet
+ */
+ sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPTEConfig_16KBDP.uiAddrMask = 0;
+ sRGXMMUPTEConfig_16KBDP.uiAddrShift = 0; /* These are for a page table ENTRY, meaning the address of a PAGE cropped to suit the PD */
+ sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the pages NOT tables */
+
+ sRGXMMUPTEConfig_16KBDP.uiProtMask = 0;
+ sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_16KBDP.uiValidEnMask = 0;
+ sRGXMMUPTEConfig_16KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_16KBDP
+ */
+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = 0;
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD= 0;
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = 0;
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig16KB
+ */
+ gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+ gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+ gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+ gsPageSizeConfig16KB.uiRefCount = 0;
+ gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ * Configuration for heaps with 64kB Data-Page size. Not supported yet
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_64KBDP
+ */
+ sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPDEConfig_64KBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_64KBDP.uiAddrShift = 0;
+ sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = 0;
+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_64KBDP.uiProtMask = 0;
+ sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_64KBDP.uiValidEnMask = 0;
+ sRGXMMUPDEConfig_64KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUPTEConfig_64KBDP.
+ *
+ */
+ sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPTEConfig_64KBDP.uiAddrMask = 0;
+ sRGXMMUPTEConfig_64KBDP.uiAddrShift = 0;
+ sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPTEConfig_64KBDP.uiProtMask = 0;
+ sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_64KBDP.uiValidEnMask = 0;
+ sRGXMMUPTEConfig_64KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_64KBDP.
+ */
+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = 0;
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = 0;
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = 0;
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig64KB.
+ */
+ gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+ gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+ gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+ gsPageSizeConfig64KB.uiRefCount = 0;
+ gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ * Configuration for heaps with 256kB Data-Page size. Not supported yet
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_256KBDP
+ */
+ sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPDEConfig_256KBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_256KBDP.uiAddrShift = 0;
+ sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = 0;
+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_256KBDP.uiProtMask = 0;
+ sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_256KBDP.uiValidEnMask = 0;
+ sRGXMMUPDEConfig_256KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+ */
+ sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPTEConfig_256KBDP.uiAddrMask = 0;
+ sRGXMMUPTEConfig_256KBDP.uiAddrShift = 0;
+ sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPTEConfig_256KBDP.uiProtMask = 0;
+ sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_256KBDP.uiValidEnMask = 0;
+ sRGXMMUPTEConfig_256KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_256KBDP
+ */
+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = 0;
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = 0;
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = 0;
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig256KB
+ */
+ gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+ gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+ gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+ gsPageSizeConfig256KB.uiRefCount = 0;
+ gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUPDEConfig_1MBDP. Not supported yet
+ */
+ sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPDEConfig_1MBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_1MBDP.uiAddrShift = 0;
+ sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = 0;
+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_1MBDP.uiProtMask = 0;
+ sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_1MBDP.uiValidEnMask = 0;
+ sRGXMMUPDEConfig_1MBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUPTEConfig_1MBDP
+ */
+ sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_1MBDP.uiAddrMask = 0;
+ sRGXMMUPTEConfig_1MBDP.uiAddrShift = 0;
+ sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPTEConfig_1MBDP.uiProtMask = 0;
+ sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_1MBDP.uiValidEnMask = 0;
+ sRGXMMUPTEConfig_1MBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_1MBDP
+ */
+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = 0;
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = 0;
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = 0;
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig1MB
+ */
+ gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+ gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+ gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+ gsPageSizeConfig1MB.uiRefCount = 0;
+ gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUPDEConfig_2MBDP. Not supported yet
+ */
+ sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPDEConfig_2MBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_2MBDP.uiAddrShift = 0;
+ sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = 0;
+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_2MBDP.uiProtMask = 0;
+ sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_2MBDP.uiValidEnMask = 0;
+ sRGXMMUPDEConfig_2MBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUPTEConfig_2MBDP
+ */
+ sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPTEConfig_2MBDP.uiAddrMask = 0;
+ sRGXMMUPTEConfig_2MBDP.uiAddrShift = 0;
+ sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPTEConfig_2MBDP.uiProtMask = 0;
+ sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_2MBDP.uiValidEnMask = 0;
+ sRGXMMUPTEConfig_2MBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_2MBDP
+ */
+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = 0;
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = 0;
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = 0;
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig2MB
+ */
+ gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+ gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+ gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+ gsPageSizeConfig2MB.uiRefCount = 0;
+ gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUDeviceAttributes
+ */
+ sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_MIPS_MICROAPTIV;
+ sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_1;
+ /* The page table fits in one big physical page as big as the page table itself */
+ sRGXMMUDeviceAttributes.ui32BaseAlign = RGXMIPSFW_LOG2_PAGETABLE_PAGE_SIZE;
+ /* The base configuration is set to 4kB pages*/
+ sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPTEConfig_4KBDP;
+ sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+ /* Functions for deriving page table/dir/cat protection bits */
+ sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+ sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+ sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+ /* Functions for establishing configurations for PDE/PTE/DEVVADDR
+ on per-heap basis */
+ sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+ sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+
+ psDeviceNode->psFirmwareMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+ psDeviceNode->pfnMMUGetContextID = NULL;
+#endif
+
+ psDeviceNode->psFirmwareMMUDevAttrs = NULL;
+
+#if defined(DEBUG)
+ PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+ gsPageSizeConfig4KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+ gsPageSizeConfig4KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+ gsPageSizeConfig16KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+ gsPageSizeConfig16KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+ gsPageSizeConfig64KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+ gsPageSizeConfig64KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+ gsPageSizeConfig256KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+ gsPageSizeConfig256KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+ gsPageSizeConfig1MB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+ gsPageSizeConfig1MB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+ gsPageSizeConfig2MB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+ gsPageSizeConfig2MB.uiRefCount));
+#endif
+ if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+ gsPageSizeConfig16KB.uiRefCount > 0 ||
+ gsPageSizeConfig64KB.uiRefCount > 0 ||
+ gsPageSizeConfig256KB.uiRefCount > 0 ||
+ gsPageSizeConfig1MB.uiRefCount > 0 ||
+ gsPageSizeConfig2MB.uiRefCount > 0
+ )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+ }
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDerivePCEProt4
+@Description calculate the PCE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+ PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePCEProt8
+@Description calculate the PCE protection flags based on an 8 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+ PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePDEProt4
+@Description derive the PDE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePDEProt8
+@Description derive the PDE protection flags based on an 8 byte entry
+
+@Input uiLog2DataPageSize The log2 of the required page size.
+ E.g, for 4KiB pages, this parameter must be 12.
+ For 2MiB pages, it must be set to 21.
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePTEProt4
+@Description calculate the PTE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+ IMG_UINT32 ui32MMUFlags = 0;
+
+ if(((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+ {
+ /* read/write */
+ ui32MMUFlags |= RGX_MIPS_MMUCTRL_PT_DATA_WRITABLE_EN;
+ }
+ else if(MMU_PROTFLAGS_READABLE & uiProtFlags)
+ {
+ /* read only */
+ }
+ else if(MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+ {
+ /* write only */
+ ui32MMUFlags |= RGX_MIPS_MMUCTRL_PT_DATA_READ_INHIBIT_EN;
+ }
+ else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: neither read nor write specified..."));
+ }
+
+ /* cache coherency */
+ if(MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: cache coherency not supported for MIPS caches"));
+ }
+
+ /* cache setup */
+ if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
+ {
+ ui32MMUFlags |= (RGX_MIPS_MMUCTRL_PT_UNCACHED_POLICY <<
+ RGX_MIPS_MMUCTRL_PT_CACHE_POLICY_SHIFT);
+ }
+ else
+ {
+ ui32MMUFlags |= (RGX_MIPS_MMUCTRL_PT_CACHED_POLICY <<
+ RGX_MIPS_MMUCTRL_PT_CACHE_POLICY_SHIFT);
+ }
+
+ if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+ {
+ ui32MMUFlags |= RGX_MIPS_MMUCTRL_PT_DATA_VALID_EN;
+ ui32MMUFlags |= RGX_MIPS_MMUCTRL_PT_DATA_GLOBAL_EN;
+ }
+
+ if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+ {
+ /* PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt4: PMMETA Protect not existent for MIPS, option discarded")); */
+ }
+
+ return ui32MMUFlags;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDerivePTEProt8
+@Description calculate the PTE protection flags based on an 8 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+ PVR_DPF((PVR_DBG_ERROR, "8-byte PTE not supported on this device"));
+
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXGetPageSizeConfig
+@Description Set up configuration for variable sized data pages.
+ RGXPutPageSizeConfigCB has to be called to ensure correct
+ refcounting.
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+ const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+ const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv)
+{
+ MMU_PAGESIZECONFIG *psPageSizeConfig;
+
+ switch (uiLog2DataPageSize)
+ {
+ case RGXMIPSFW_LOG2_PAGE_SIZE:
+ psPageSizeConfig = &gsPageSizeConfig4KB;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+ uiLog2DataPageSize));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+ }
+
+ /* Refer caller's pointers to the data */
+ *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+ *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+ *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+ /* Increment ref-count - not that we're allocating anything here
+ (I'm using static structs), but one day we might, so we want
+ the Get/Put code to be balanced properly */
+ psPageSizeConfig->uiRefCount ++;
+
+ /* This is purely for debug statistics */
+ psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+ psPageSizeConfig->uiRefCount);
+#endif
+
+ *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
+ PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function RGXPutPageSizeConfig
+@Description Tells this code that the mmu module is done with the
+ configurations set in RGXGetPageSizeConfig. This can
+ be a no-op.
+ Called after RGXGetPageSizeConfigCB.
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+ MMU_PAGESIZECONFIG *psPageSizeConfig;
+ IMG_UINT32 uiLog2DataPageSize;
+
+ uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
+
+ switch (uiLog2DataPageSize)
+ {
+ case RGXMIPSFW_LOG2_PAGE_SIZE:
+ psPageSizeConfig = &gsPageSizeConfig4KB;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+ uiLog2DataPageSize));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+ }
+
+ /* Ref-count here is not especially useful, but it's an extra
+ check that the API is being used correctly */
+ psPageSizeConfig->uiRefCount --;
+#else
+ PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32PDE);
+ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+ PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS"));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(ui64PDE);
+ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+ PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS"));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device specific initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific MMU initialisation for the MIPS firmware
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily. It exists solely
+ for the linkage between rgxinit.c and rgxmmuinit.c, the former
+ being otherwise cluttered by the contents of the latter */
+
+#ifndef _SRVKM_RGXMIPSMMUINIT_H_
+#define _SRVKM_RGXMMIPSMUINIT_H_
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+
+/*
+
+ Labelling of fields within virtual address. No PD and PC are used currently for
+ the MIPS MMU
+*/
+/*
+Page Table entry #
+*/
+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U)
+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+
+
+/* PC entries related definitions */
+/* No PC is currently used for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_CLRMSK (0U)
+
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_SHIFT (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_CLRMSK (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_EN (0U)
+
+/* PD entries related definitions */
+/* No PD is currently used for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_CLRMSK (0U)
+
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_SHIFT (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_CLRMSK (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_EN (0U)
+
+
+/* PT entries related definitions */
+#define RGX_MIPS_MMUCTRL_PT_DATA_READ_INHIBIT_SHIFT (31U)
+#define RGX_MIPS_MMUCTRL_PT_DATA_READ_INHIBIT_CLRMSK (0X7FFFFFFF)
+#define RGX_MIPS_MMUCTRL_PT_DATA_READ_INHIBIT_EN (0X80000000)
+
+#define RGX_MIPS_MMUCTRL_PT_DATA_WRITABLE_SHIFT (2U)
+#define RGX_MIPS_MMUCTRL_PT_DATA_WRITABLE_CLRMSK (0XFFFFFFFB)
+#define RGX_MIPS_MMUCTRL_PT_DATA_WRITABLE_EN (0X00000004)
+
+#define RGX_MIPS_MMUCTRL_PT_DATA_VALID_SHIFT (1U)
+#define RGX_MIPS_MMUCTRL_PT_DATA_VALID_CLRMSK (0XFFFFFFFD)
+#define RGX_MIPS_MMUCTRL_PT_DATA_VALID_EN (0X00000002)
+
+#define RGX_MIPS_MMUCTRL_PT_DATA_GLOBAL_SHIFT (0U)
+#define RGX_MIPS_MMUCTRL_PT_DATA_GLOBAL_CLRMSK (0XFFFFFFFE)
+#define RGX_MIPS_MMUCTRL_PT_DATA_GLOBAL_EN (0X00000001)
+
+#define RGX_MIPS_MMUCTRL_PT_CACHE_POLICY_SHIFT (3U)
+#define RGX_MIPS_MMUCTRL_PT_CACHE_POLICY_CLRMSK (0XFFFFFFC7)
+
+/* "Uncached" caching policy*/
+#define RGX_MIPS_MMUCTRL_PT_UNCACHED_POLICY (0X00000007)
+/* "Write-back write-allocate" caching policy*/
+#define RGX_MIPS_MMUCTRL_PT_CACHED_POLICY (0X00000003)
+
+/* Physical page number inside MIPS MMU entries */
+#define RGX_MIPS_MMUCTRL_PT_PFN_SHIFT (6U)
+#define RGX_MIPS_MMUCTRL_PT_PFN_CLRMSK (0XFC00003F)
+
+/* Flags MIPS MMU entries */
+#define RGX_MIPS_MMUCTRL_PT_FLAGS_SHIFT (0U)
+#define RGX_MIPS_MMUCTRL_PT_FLAGS_CLRMSK (0XFFFFFFC0)
+
+
+IMG_EXPORT PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+IMG_EXPORT PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* #ifndef _SRVKM_RGXMIPSMMUINIT_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device specific initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific MMU initialisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxmmuinit.h"
+#include "rgxmmudefs_km.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "rgx_heaps.h"
+#include "pdump_km.h"
+
+
+/* useful macros */
+/* units represented in a bitfield */
+#define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1)
+
+
+/*
+ * Bits of PT, PD and PC not involving addresses
+ */
+
+#define RGX_MMUCTRL_PTE_PROTMASK (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \
+ RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \
+ RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \
+ RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \
+ RGX_MMUCTRL_PT_DATA_CC_EN | \
+ RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \
+ RGX_MMUCTRL_PT_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \
+ ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \
+ RGX_MMUCTRL_PD_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \
+ RGX_MMUCTRL_PC_DATA_VALID_EN)
+
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+/*
+ *
+ * Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ * Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ * Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ * Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ * Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ * Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+ the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+ const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+ const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /* Setup of Px Entries:
+ *
+ *
+ * PAGE TABLE (8 Byte):
+ *
+ * | 62 | 61...40 | 39...12 (varies) | 11...6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * | PM/Meta protect | VP Page (39:18) | Physical Page | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid |
+ *
+ *
+ * PAGE DIRECTORY (8 Byte):
+ *
+ * | 40 | 39...5 (varies) | 4 | 3...1 | 0 |
+ * | Entry Pending | Page Table base address | (reserved) | Page Size | Valid |
+ *
+ *
+ * PAGE CATALOGUE (4 Byte):
+ *
+ * | 31...4 | 3...2 | 1 | 0 |
+ * | Page Directory base address | (reserved) | Entry Pending | Valid |
+ *
+ */
+
+
+ /* Example how to get the PD address from a PC entry.
+ * The procedure is the same for PD and PT entries to retrieve PT and Page addresses:
+ *
+ * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&':
+ * | 31...4 | 3...2 | 1 | 0 |
+ * | PD Addr | 0 | 0 | 0 |
+ *
+ * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>':
+ * | 27...0 |
+ * | PD Addr |
+ *
+ * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<':
+ * | 39...0 |
+ * | PD Addr |
+ *
+ */
+
+
+ sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
+ PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+
+ /*
+ * Setup sRGXMMUPCEConfig
+ */
+ sRGXMMUPCEConfig.uiBytesPerEntry = 4; /* 32 bit entries */
+ sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */
+
+ sRGXMMUPCEConfig.uiAddrShift = 4; /* Shift this many bits to get PD address */
+ sRGXMMUPCEConfig.uiAddrLog2Align = 12; /* Alignment of PD physical addresses. */
+
+ sRGXMMUPCEConfig.uiProtMask = RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits (pending | valid)*/
+ sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to get the statis bits */
+
+ sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */
+ sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */
+
+ /*
+ * Setup sRGXMMUTopLevelDevVAddrConfig
+ */
+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */
+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PC index */
+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask,
+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift));
+
+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */
+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PD index */
+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask,
+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift));
+
+/*
+ *
+ * Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_4KBDP
+ */
+ sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12;
+ sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12;
+
+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_4KBDP
+ */
+ sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000);
+ sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12;
+ sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */
+
+ sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_4KBDP
+ */
+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift));
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift));
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift));
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig4KB
+ */
+ gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+ gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+ gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+ gsPageSizeConfig4KB.uiRefCount = 0;
+ gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ * Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_16KBDP
+ */
+ sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10;
+ sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10;
+
+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_16KBDP
+ */
+ sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000);
+ sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14;
+ sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14;
+
+ sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_16KBDP
+ */
+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000);
+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift));
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff);
+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig16KB
+ */
+ gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+ gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+ gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+ gsPageSizeConfig16KB.uiRefCount = 0;
+ gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ * Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_64KBDP
+ */
+ sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8;
+ sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8;
+
+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_64KBDP
+ */
+ sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000);
+ sRGXMMUPTEConfig_64KBDP.uiAddrShift =16;
+ sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16;
+
+ sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_64KBDP
+ */
+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000);
+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff);
+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig64KB
+ */
+ gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+ gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+ gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+ gsPageSizeConfig64KB.uiRefCount = 0;
+ gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ * Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_256KBDP
+ */
+ sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6;
+ sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6;
+
+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+ */
+ sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000);
+ sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18;
+ sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18;
+
+ sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_256KBDP
+ */
+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000);
+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff);
+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig256KB
+ */
+ gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+ gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+ gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+ gsPageSizeConfig256KB.uiRefCount = 0;
+ gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUPDEConfig_1MBDP
+ */
+ sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ sRGXMMUPDEConfig_1MBDP.uiAddrShift = 4;
+ sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 4;
+
+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_1MBDP
+ */
+ sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000);
+ sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20;
+ sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20;
+
+ sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_1MBDP
+ */
+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000);
+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff);
+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig1MB
+ */
+ gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+ gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+ gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+ gsPageSizeConfig1MB.uiRefCount = 0;
+ gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUPDEConfig_2MBDP
+ */
+ sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ sRGXMMUPDEConfig_2MBDP.uiAddrShift = 4;
+ sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 4;
+
+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_2MBDP
+ */
+ sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000);
+ sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21;
+ sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21;
+
+ sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_2MBDP
+ */
+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000);
+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff);
+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig2MB
+ */
+ gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+ gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+ gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+ gsPageSizeConfig2MB.uiRefCount = 0;
+ gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUDeviceAttributes
+ */
+ sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
+ sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3;
+ sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+ sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig;
+ sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+ /* Functions for deriving page table/dir/cat protection bits */
+ sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+ sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+ sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+ /* Functions for establishing configurations for PDE/PTE/DEVVADDR
+ on per-heap basis */
+ sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+ sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+
+ psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+ psDeviceNode->pfnMMUGetContextID = NULL;
+#endif
+
+ psDeviceNode->psMMUDevAttrs = NULL;
+
+#if defined(DEBUG)
+ PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+ gsPageSizeConfig4KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+ gsPageSizeConfig4KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+ gsPageSizeConfig16KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+ gsPageSizeConfig16KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+ gsPageSizeConfig64KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+ gsPageSizeConfig64KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+ gsPageSizeConfig256KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+ gsPageSizeConfig256KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+ gsPageSizeConfig1MB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+ gsPageSizeConfig1MB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+ gsPageSizeConfig2MB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+ gsPageSizeConfig2MB.uiRefCount));
+#endif
+ if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+ gsPageSizeConfig16KB.uiRefCount > 0 ||
+ gsPageSizeConfig64KB.uiRefCount > 0 ||
+ gsPageSizeConfig256KB.uiRefCount > 0 ||
+ gsPageSizeConfig1MB.uiRefCount > 0 ||
+ gsPageSizeConfig2MB.uiRefCount > 0
+ )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+ }
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDerivePCEProt4
+@Description calculate the PCE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+ return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePCEProt8
+@Description calculate the PCE protection flags based on an 8 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+ PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePDEProt4
+@Description derive the PDE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePDEProt8
+@Description derive the PDE protection flags based on an 8 byte entry
+
+@Input uiLog2DataPageSize The log2 of the required page size.
+ E.g, for 4KiB pages, this parameter must be 12.
+ For 2MiB pages, it must be set to 21.
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_UINT64 ret_value = 0; // 0 means invalid
+
+ if (! (uiProtFlags & MMU_PROTFLAGS_INVALID)) // if not invalid
+ {
+ switch (uiLog2DataPageSize)
+ {
+ case RGX_HEAP_4KB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB;
+ break;
+ case RGX_HEAP_16KB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB;
+ break;
+ case RGX_HEAP_64KB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB;
+ break;
+ case RGX_HEAP_256KB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB;
+ break;
+ case RGX_HEAP_1MB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB;
+ break;
+ case RGX_HEAP_2MB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]",
+ __FILE__, __LINE__, __FUNCTION__, uiLog2DataPageSize));
+ }
+ }
+ return ret_value;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePTEProt4
+@Description calculate the PTE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device"));
+
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDerivePTEProt8
+@Description calculate the PTE protection flags based on an 8 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_UINT64 ui64MMUFlags=0;
+
+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+ if(((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+ {
+ /* read/write */
+ }
+ else if(MMU_PROTFLAGS_READABLE & uiProtFlags)
+ {
+ /* read only */
+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN;
+ }
+ else if(MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+ {
+ /* write only */
+ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: write-only is not possible on this device"));
+ }
+ else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified..."));
+ }
+
+ /* cache coherency */
+ if(MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+ {
+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN;
+ }
+
+ /* cache setup */
+ if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
+ {
+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN;
+ }
+
+ if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+ {
+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN;
+ }
+
+ if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+ {
+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN;
+ }
+
+ return ui64MMUFlags;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXGetPageSizeConfig
+@Description Set up configuration for variable sized data pages.
+ RGXPutPageSizeConfigCB has to be called to ensure correct
+ refcounting.
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+ const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+ const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv)
+{
+ MMU_PAGESIZECONFIG *psPageSizeConfig;
+
+ switch (uiLog2DataPageSize)
+ {
+ case RGX_HEAP_4KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig4KB;
+ break;
+ case RGX_HEAP_16KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig16KB;
+ break;
+ case RGX_HEAP_64KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig64KB;
+ break;
+ case RGX_HEAP_256KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig256KB;
+ break;
+ case RGX_HEAP_1MB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig1MB;
+ break;
+ case RGX_HEAP_2MB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig2MB;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+ uiLog2DataPageSize));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+ }
+
+ /* Refer caller's pointers to the data */
+ *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+ *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+ *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+ /* Increment ref-count - not that we're allocating anything here
+ (I'm using static structs), but one day we might, so we want
+ the Get/Put code to be balanced properly */
+ psPageSizeConfig->uiRefCount ++;
+
+ /* This is purely for debug statistics */
+ psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+ psPageSizeConfig->uiRefCount);
+#endif
+
+ *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
+ PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function RGXPutPageSizeConfig
+@Description Tells this code that the mmu module is done with the
+ configurations set in RGXGetPageSizeConfig. This can
+ be a no-op.
+ Called after RGXGetPageSizeConfigCB.
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+ MMU_PAGESIZECONFIG *psPageSizeConfig;
+ IMG_UINT32 uiLog2DataPageSize;
+
+ uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
+
+ switch (uiLog2DataPageSize)
+ {
+ case RGX_HEAP_4KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig4KB;
+ break;
+ case RGX_HEAP_16KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig16KB;
+ break;
+ case RGX_HEAP_64KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig64KB;
+ break;
+ case RGX_HEAP_256KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig256KB;
+ break;
+ case RGX_HEAP_1MB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig1MB;
+ break;
+ case RGX_HEAP_2MB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig2MB;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+ uiLog2DataPageSize));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+ }
+
+ /* Ref-count here is not especially useful, but it's an extra
+ check that the API is being used correctly */
+ psPageSizeConfig->uiRefCount --;
+#else
+ PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32PDE);
+ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+ PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+ switch (ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK))
+ {
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB:
+ *pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT;
+ break;
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB:
+ *pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+ break;
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB:
+ *pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT;
+ break;
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB:
+ *pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT;
+ break;
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB:
+ *pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT;
+ break;
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB:
+ *pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT;
+ break;
+ default:
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+ }
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device specific initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific MMU initialisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily. It exists solely
+ for the linkage between rgxinit.c and rgxmmuinit.c, the former
+ being otherwise cluttered by the contents of the latter */
+
+#ifndef _SRVKM_RGXMMUINIT_H_
+#define _SRVKM_RGXMMUINIT_H_
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+
+IMG_EXPORT PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+IMG_EXPORT PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* #ifndef _SRVKM_RGXMMUINIT_H_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxpdump.c
+@Title Device specific pdump routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific pdump functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(PDUMP)
+
+#include "devicemem_pdump.h"
+#include "rgxpdump.h"
+#include "rgx_bvnc_defs_km.h"
+
+/*
+ * There are two different set of functions one for META and one for MIPS
+ * because the Pdump player does not implement the support for
+ * the MIPS MMU yet. So for MIPS builds we cannot use DevmemPDumpSaveToFileVirtual,
+ * we have to use DevmemPDumpSaveToFile instead.
+ */
+static PVRSRV_ERROR _MetaDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ /* TA signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TA signatures and checksums Buffer");
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTAChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigTAChecksSize,
+ "out.tasig",
+ 0,
+ ui32PDumpFlags);
+
+ /* 3D signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer");
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSig3DChecksMemDesc,
+ 0,
+ psDevInfo->ui32Sig3DChecksSize,
+ "out.3dsig",
+ 0,
+ ui32PDumpFlags);
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ /* RT signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump RTU signatures and checksums Buffer");
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigRTChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigRTChecksSize,
+ "out.rtsig",
+ 0,
+ ui32PDumpFlags);
+ /* SH signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump SHG signatures and checksums Buffer");
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigSHChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigSHChecksSize,
+ "out.shsig",
+ 0,
+ ui32PDumpFlags);
+ }
+
+ return PVRSRV_OK;
+}
+static PVRSRV_ERROR _MetaDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32ThreadNum, ui32Size, ui32OutFileOffset;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_UNREFERENCED_PARAMETER(ui32ThreadNum);
+ PVR_UNREFERENCED_PARAMETER(ui32OutFileOffset);
+#else
+ /* Dump trace buffers */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump trace buffers");
+ for(ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++)
+ {
+ /*
+ * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+ * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+ * "expression must have a constant value".
+ */
+ const IMG_DEVMEM_OFFSET_T uiTraceBufThreadNumOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]);
+
+ /* ui32TracePointer tracepointer */
+ ui32Size = sizeof(IMG_UINT32);
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ uiTraceBufThreadNumOff,
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset,
+ ui32PDumpFlags);
+ ui32OutFileOffset += ui32Size;
+
+ /* trace buffer */
+ ui32Size = RGXFW_TRACE_BUFFER_SIZE * sizeof(IMG_UINT32);
+ PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]);
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum],
+ 0, /* 0 offset in the trace buffer mem desc */
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset,
+ ui32PDumpFlags);
+ ui32OutFileOffset += ui32Size;
+
+ /* assert info buffer */
+ ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+ + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+ + sizeof(IMG_UINT32);
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ offsetof(RGXFWIF_TRACEBUF, sTraceBuf) /* move to first element of sTraceBuf */
+ + ui32ThreadNum * sizeof(RGXFWIF_TRACEBUF_SPACE) /* skip required number of sTraceBuf elements */
+ + offsetof(RGXFWIF_TRACEBUF_SPACE, sAssertBuf), /* offset into its sAssertBuf, to be pdumped */
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset,
+ ui32PDumpFlags);
+ ui32OutFileOffset += ui32Size;
+ }
+
+ /* FW HWPerf buffer is always allocated when PDUMP is defined, irrespective of HWPerf events being enabled/disabled */
+ PVR_ASSERT(psDevInfo->psRGXFWIfHWPerfBufMemDesc);
+
+ /* Dump hwperf buffer */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump HWPerf Buffer");
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+ 0,
+ psDevInfo->ui32RGXFWIfHWPerfBufSize,
+ "out.hwperf",
+ 0,
+ ui32PDumpFlags);
+#endif
+
+ return PVRSRV_OK;
+
+}
+
+
+static PVRSRV_ERROR _MipsDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* TA signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TA signatures and checksums Buffer");
+
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigTAChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigTAChecksSize,
+ "out.tasig",
+ 0);
+
+ /* 3D signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer");
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWSig3DChecksMemDesc,
+ 0,
+ psDevInfo->ui32Sig3DChecksSize,
+ "out.3dsig",
+ 0);
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ /* RT signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump RTU signatures and checksums Buffer");
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigRTChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigRTChecksSize,
+ "out.rtsig",
+ 0);
+
+ /* SH signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump SHG signatures and checksums Buffer");
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigSHChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigSHChecksSize,
+ "out.shsig",
+ 0);
+ }
+
+ return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR _MipsDumpTraceBufferKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+#else
+
+ IMG_UINT32 ui32ThreadNum, ui32Size, ui32OutFileOffset;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ /* Dump trace buffers */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump trace buffers");
+ for(ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++)
+ {
+ /*
+ * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+ * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+ * "expression must have a constant value".
+ */
+ const IMG_DEVMEM_OFFSET_T uiTraceBufOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]);
+
+ /* Same again... */
+ const IMG_DEVMEM_OFFSET_T uiTraceBufSpaceAssertBufOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF_SPACE *)0)->sAssertBuf);
+
+ /* ui32TracePointer tracepointer */
+ ui32Size = sizeof(IMG_UINT32);
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ uiTraceBufOff,
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset);
+ ui32OutFileOffset += ui32Size;
+
+ /* trace buffer */
+ ui32Size = RGXFW_TRACE_BUFFER_SIZE * sizeof(IMG_UINT32);
+ PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]);
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum],
+ 0, /* 0 offset in the trace buffer mem desc */
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset);
+ ui32OutFileOffset += ui32Size;
+
+ /* assert info buffer */
+ ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+ + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+ + sizeof(IMG_UINT32);
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ uiTraceBufOff + uiTraceBufSpaceAssertBufOff,
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset);
+ ui32OutFileOffset += ui32Size;
+ }
+
+ /* Dump hwperf buffer */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump HWPerf Buffer");
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+ 0,
+ psDevInfo->ui32RGXFWIfHWPerfBufSize,
+ "out.hwperf",
+ 0);
+#endif
+
+ return PVRSRV_OK;
+
+}
+
+
+/*
+ * PVRSRVPDumpSignatureBufferKM
+ */
+PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ if( (psDeviceNode->pfnCheckDeviceFeature) && \
+ psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_MIPS_BIT_MASK))
+ {
+ return _MipsDumpSignatureBufferKM(psConnection,
+ psDeviceNode,
+ ui32PDumpFlags);
+ }
+ else
+ {
+ return _MetaDumpSignatureBufferKM(psConnection,
+ psDeviceNode,
+ ui32PDumpFlags);
+ }
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ if( (psDeviceNode->pfnCheckDeviceFeature) && \
+ psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_MIPS_BIT_MASK))
+ {
+ return _MipsDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags);
+ }else
+ {
+ return _MetaDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags);
+ }
+}
+
+#endif /* PDUMP */
+
+/******************************************************************************
+ End of file (rgxpdump.c)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX pdump Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX pdump functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxdevice.h"
+#include "device.h"
+#include "devicemem.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+
+#if defined(PDUMP)
+/*!
+******************************************************************************
+
+ @Function PVRSRVPDumpSignatureBufferKM
+
+ @Description
+
+ Dumps TA and 3D signature and checksum buffers
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVPDumpTraceBufferKM
+
+ @Description
+
+ Dumps TA and 3D signature and checksum buffers
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpSignatureBufferKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpTraceBufferKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+/******************************************************************************
+ End of file (rgxpdump.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxpdvfs.c
+@Title RGX Proactive DVFS Functionality
+@Codingstyle IMG
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Kernel mode Proactive DVFS Functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxpdvfs.h"
+#include "rgxfwutils.h"
+
+#define USEC_TO_MSEC 1000
+
+PVRSRV_ERROR PDVFSLimitMaxFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MaxOPPPoint)
+{
+ RGXFWIF_KCCB_CMD sGPCCBCmd;
+ PVRSRV_ERROR eError;
+
+ if(psDevInfo->bPDVFSEnabled != IMG_TRUE)
+ {
+ /* No error message to avoid excessive messages */
+ return PVRSRV_OK;
+ }
+
+ /* send feedback */
+ sGPCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ;
+ sGPCCBCmd.uCmdData.sPDVFSMaxFreqData.ui32MaxOPPPoint = ui32MaxOPPPoint;
+
+
+ /* Submit command to the firmware. */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sGPCCBCmd,
+ sizeof(sGPCCBCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return PVRSRV_OK;
+}
+
+
+void PDVFSRequestReactiveUpdate(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_KCCB_CMD sGPCCBCmd;
+ PVRSRV_ERROR eError;
+
+ if(psDevInfo->bPDVFSEnabled != IMG_TRUE)
+ {
+ /* No error message to avoid excessive messages */
+ return;
+ }
+
+ if(psDevInfo->psDeviceNode->psDevConfig->sDVFS.sPDVFSData.bWorkInFrame == IMG_FALSE)
+ {
+ return;
+ }
+
+ sGPCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_PDVFS_REQUEST_REACTIVE_UPDATE;
+
+ /* Submit command to the firmware. */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sGPCCBCmd,
+ sizeof(sGPCCBCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ psDevInfo->psDeviceNode->psDevConfig->sDVFS.sPDVFSData.bWorkInFrame = IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function PDVFSProcessCoreClkRateChange
+@Description Processes the core clock rate change request or notification.
+ Processes as notification, if SUPPORT_PDVFS_GPIO feature is enabled
+ i.e. firmware (PDVFS) can use GPIO to change core clock rate
+ else processes as request (uses system layer API to change core
+ clock rate)
+@Input psDevInfo A pointer to PVRSRV_RGXDEV_INFO.
+@Input ui32CoreClockRate New core clock rate.
+@Return PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDVFSProcessCoreClkRateChange(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevInfo->psDeviceNode->psDevConfig;
+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &psDevConfig->sDVFS.sDVFSDeviceCfg;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo = ((RGX_DATA*)(psDevConfig->hDevData))->psRGXTimingInfo;
+ PVRSRV_ERROR eError;
+
+#if !defined (SUPPORT_PDVFS_GPIO)
+ IMG_UINT32 ui32CoreClockRateCurrent = psRGXTimingInfo->ui32CoreClockSpeed;
+#endif
+ IMG_UINT32 ui32Index;
+ const IMG_OPP *psOpp = NULL;
+
+
+ if(psDevInfo->bPDVFSEnabled != IMG_TRUE)
+ {
+ /* No error message to avoid excessive messages */
+ return PVRSRV_OK;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"Core clock rate = %u\n", ui32CoreClockRate));
+
+ /**
+ * Find the matching OPP (Exact).
+ */
+ for (ui32Index = 0; ui32Index < psDVFSDeviceCfg->ui32OPPTableSize; ui32Index++)
+ {
+ if (ui32CoreClockRate == psDVFSDeviceCfg->pasOPPTable[ui32Index].ui32Freq)
+ {
+ psOpp = &psDVFSDeviceCfg->pasOPPTable[ui32Index];
+ break;
+ }
+ }
+
+ if (!psOpp)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Frequency not present in OPP table - %u", ui32CoreClockRate));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = PVRSRVDevicePreClockSpeedChange(psDevInfo->psDeviceNode, psDVFSDeviceCfg->bIdleReq, NULL);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePreClockSpeedChange failed"));
+ return eError;
+ }
+
+#if !defined (SUPPORT_PDVFS_GPIO)
+ /**
+ * Increasing frequency, change voltage first
+ */
+ if(ui32CoreClockRate > ui32CoreClockRateCurrent)
+ {
+ psDVFSDeviceCfg->pfnSetVoltage(psOpp->ui32Volt);
+ }
+
+ psDVFSDeviceCfg->pfnSetFrequency(ui32CoreClockRate);
+
+ /**
+ * Decreasing frequency, change frequency first
+ */
+ if (ui32CoreClockRate < ui32CoreClockRateCurrent)
+ {
+ psDVFSDeviceCfg->pfnSetVoltage(psOpp->ui32Volt);
+ }
+#endif
+
+ psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate;
+
+ PVRSRVDevicePostClockSpeedChange(psDevInfo->psDeviceNode, psDVFSDeviceCfg->bIdleReq, NULL);
+
+ return PVRSRV_OK;
+}
+
+#if defined (RGXFW_META_SUPPORT_2ND_THREAD)
+/*************************************************************************/ /*!
+@Function RGXPDVFSCheckCoreClkRateChange
+@Description Checks if core clock rate has changed since the last snap-shot.
+@Input psDevInfo A pointer to PVRSRV_RGXDEV_INFO.
+@Return None.
+*/ /**************************************************************************/
+void RGXPDVFSCheckCoreClkRateChange(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 ui32CoreClkRate = *(psDevInfo->pui32RGXFWIFCoreClkRate);
+
+ if(psDevInfo->bPDVFSEnabled != IMG_TRUE)
+ {
+ /* No error message to avoid excessive messages */
+ return;
+ }
+
+ if ((ui32CoreClkRate != 0) &&
+ (psDevInfo->ui32CoreClkRateSnapshot != ui32CoreClkRate))
+ {
+ psDevInfo->ui32CoreClkRateSnapshot = ui32CoreClkRate;
+ PDVFSProcessCoreClkRateChange(psDevInfo, ui32CoreClkRate);
+ }
+}
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxpdvfs.h
+@Title RGX Proactive DVFS Functionality
+@Codingstyle IMG
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the kernel mode Proactive DVFS Functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXPDVFS_H
+#define RGXPDVFS_H
+#include "img_types.h"
+#include "rgxdevice.h"
+#define PDVFS_REACTIVE_INTERVAL_MS 16
+
+
+IMG_INTERNAL
+PVRSRV_ERROR PDVFSLimitMaxFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MaxOPPPoint);
+
+void PDVFSRequestReactiveUpdate(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+PVRSRV_ERROR PDVFSProcessCoreClkRateChange(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate);
+
+#if defined (RGXFW_META_SUPPORT_2ND_THREAD)
+IMG_INTERNAL
+void RGXPDVFSCheckCoreClkRateChange(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif
+
+#endif /* RGXPDVFS_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device specific power routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include "rgxpower.h"
+#include "rgxinit.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "rgxdebug.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxtimecorr.h"
+#include "devicemem_utils.h"
+#include "htbserver.h"
+#include "rgxstartstop.h"
+#include "sync.h"
+#include "lists.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#if defined(PVR_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+
+static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sCmd;
+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+
+ /* Send the Timeout notification to the FW */
+ /* Extending the APM Latency Change command structure with the notification boolean for
+ backwards compatibility reasons */
+ sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE;
+ sCmd.uCmdData.sPowData.uPoweReqData.ui32ActivePMLatencyms = psRuntimeCfg->ui32ActivePMLatencyms;
+ sCmd.uCmdData.sPowData.bNotifyTimeout = IMG_TRUE;
+
+ /* Ensure the new APM latency is written to memory before requesting the FW to read it */
+ OSMemoryBarrier();
+
+ eError = RGXSendCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sCmd,
+ sizeof(sCmd),
+ PDUMP_FLAGS_NONE);
+
+ return eError;
+}
+
+static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb;
+ IMG_UINT64 *paui64StatsCounters;
+ IMG_UINT64 ui64LastPeriod;
+ IMG_UINT64 ui64LastState;
+ IMG_UINT64 ui64LastTime;
+ IMG_UINT64 ui64TimeNow;
+
+ psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0];
+
+ OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+ ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(OSClockns64());
+
+ /* Update counters to account for the time since the last update */
+ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord);
+ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord);
+ ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+ paui64StatsCounters[ui64LastState] += ui64LastPeriod;
+
+ /* Update state and time of the latest update */
+ psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+
+ OSLockRelease(psDevInfo->hGPUUtilLock);
+}
+
+
+static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+ if (psDevConfig->pfnTDRGXStop == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!"));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData);
+#else
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ eError = RGXStop(&psDevInfo->sPowerParams);
+#endif
+
+ return eError;
+}
+
+/*
+ RGXPrePowerState
+*/
+PVRSRV_ERROR RGXPrePowerState (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if ((eNewPowerState != eCurrentPowerState) &&
+ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_KCCB_CMD sPowCmd;
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+ /* Send the Power off request to the FW */
+ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ;
+ sPowCmd.uCmdData.sPowData.uPoweReqData.bForced = bForced;
+
+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+ __FUNCTION__));
+ return eError;
+ }
+
+ eError = RGXSendCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sPowCmd,
+ sizeof(sPowCmd),
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Failed to send Power off request"));
+ return eError;
+ }
+
+ /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies
+ on the EventObject which is signalled in this MISR */
+ eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+
+ /* Check the Power state after the answer */
+ if (eError == PVRSRV_OK)
+ {
+ /* Finally, de-initialise some registers. */
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+ {
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 ui32TID;
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+ {
+ /* Wait for the pending META/MIPS to host interrupts to come back. */
+ eError = PVRSRVPollForValueKM(&psDevInfo->aui32SampleIRQCount[ui32TID],
+ psFWTraceBuf->aui32InterruptCount[ui32TID],
+ 0xffffffff);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, \
+ "RGXPrePowerState: Wait for pending interrupts failed. Thread %u: Host:%u, FW: %u", \
+ ui32TID, \
+ psDevInfo->aui32SampleIRQCount[ui32TID], \
+ psFWTraceBuf->aui32InterruptCount[ui32TID]));
+
+ RGX_WaitForInterruptsTimeout(psDevInfo);
+ break;
+ }
+ }
+#endif /* NO_HARDWARE */
+
+ /* Update GPU frequency and timer correlation related data */
+ RGXGPUFreqCalibratePrePowerOff(psDeviceNode);
+
+ /* Update GPU state counters */
+ _RGXUpdateGPUUtilStats(psDevInfo);
+
+#if defined(PVR_DVFS)
+ eError = SuspendDVFS();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Failed to suspend DVFS"));
+ return eError;
+ }
+#endif
+
+ psDevInfo->bRGXPowered = IMG_FALSE;
+
+ eError = RGXDoStop(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: RGXDoStop failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+ }
+ }
+ else
+ {
+ /* the sync was updated bu the pow state isn't off -> the FW denied the transition */
+ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+
+ if (bForced)
+ { /* It is an error for a forced request to be denied */
+ PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Failure to power off during a forced power off. FW: %d", psFWTraceBuf->ePowState));
+ }
+ }
+ }
+ else if (eError == PVRSRV_ERROR_TIMEOUT)
+ {
+ /* timeout waiting for the FW to ack the request: return timeout */
+ PVR_DPF((PVR_DBG_WARNING,"RGXPrePowerState: Timeout waiting for powoff ack from the FW"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Error waiting for powoff ack from the FW (%s)", PVRSRVGetErrorStringKM(eError)));
+ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+ }
+
+ }
+
+ return eError;
+}
+
+
+static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+ if (psDevConfig->pfnTDRGXStart == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!"));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData);
+#else
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ eError = RGXStart(&psDevInfo->sPowerParams);
+#endif
+
+ return eError;
+}
+
+/*
+ RGXPostPowerState
+*/
+PVRSRV_ERROR RGXPostPowerState (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced)
+{
+ if ((eNewPowerState != eCurrentPowerState) &&
+ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
+ {
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_INIT *psRGXFWInit;
+
+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ /* Update GPU frequency and timer correlation related data */
+ RGXGPUFreqCalibratePostPowerOn(psDeviceNode);
+
+ /* Update GPU state counters */
+ _RGXUpdateGPUUtilStats(psDevInfo);
+
+ eError = RGXDoStart(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPostPowerState: RGXDoStart failed"));
+ return eError;
+ }
+
+ OSMemoryBarrier();
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+ eError = ValidateFWImageWithSP(psDevInfo);
+ if (eError != PVRSRV_OK) return eError;
+#endif
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+ (void **)&psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXPostPowerState: Failed to acquire kernel fw if ctl (%u)",
+ eError));
+ return eError;
+ }
+
+ /*
+ * Check whether the FW has started by polling on bFirmwareStarted flag
+ */
+ if (PVRSRVPollForValueKM((IMG_UINT32 *)&psRGXFWInit->bFirmwareStarted,
+ IMG_TRUE,
+ 0xFFFFFFFF) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed."));
+ eError = PVRSRV_ERROR_TIMEOUT;
+
+ /*
+ * When bFirmwareStarted fails some info maybe gained by doing the following
+ * debug dump but unfortunately it could lockup some cores or cause other power
+ * lock issues. The code is placed here to provide a possible example approach
+ * when all other ideas have been tried.
+ */
+ /*{
+ PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+
+ if (psPowerDev)
+ {
+ PVRSRV_DEV_POWER_STATE eOldPowerState = psPowerDev->eCurrentPowerState;
+
+ PVRSRVPowerUnlock(psDeviceNode);
+ psPowerDev->eCurrentPowerState = PVRSRV_DEV_POWER_STATE_ON;
+ RGXDumpDebugInfo(NULL, psDeviceNode->pvDevice);
+ psPowerDev->eCurrentPowerState = eOldPowerState;
+ PVRSRVPowerLock(psDeviceNode);
+ }
+ }*/
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ return eError;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start.");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, bFirmwareStarted),
+ IMG_TRUE,
+ 0xFFFFFFFFU,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXPostPowerState: problem pdumping POL for psRGXFWIfInitMemDesc (%d)",
+ eError));
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ return eError;
+ }
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ SetFirmwareStartTime(psRGXFWInit->ui32FirmwareStartedTimeStamp);
+#endif
+
+ HTBSyncPartitionMarker(psRGXFWInit->ui32MarkerVal);
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+
+ psDevInfo->bRGXPowered = IMG_TRUE;
+
+#if defined(PVR_DVFS)
+ eError = ResumeDVFS();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPostPowerState: Failed to resume DVFS"));
+ return eError;
+ }
+#endif
+ }
+ }
+
+ PDUMPCOMMENT("RGXPostPowerState: Current state: %d, New state: %d", eCurrentPowerState, eNewPowerState);
+
+ return PVRSRV_OK;
+}
+
+
+/*
+ RGXPreClockSpeedChange
+*/
+PVRSRV_ERROR RGXPreClockSpeedChange (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+ PVR_UNREFERENCED_PARAMETER(psRGXData);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"RGXPreClockSpeedChange: RGX clock speed was %uHz",
+ psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+
+ if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF)
+ && (psFWTraceBuf->ePowState != RGXFWIF_POW_OFF))
+ {
+ /* Update GPU frequency and timer correlation related data */
+ RGXGPUFreqCalibratePreClockSpeedChange(psDeviceNode);
+ }
+
+ return eError;
+}
+
+
+/*
+ RGXPostClockSpeedChange
+*/
+PVRSRV_ERROR RGXPostClockSpeedChange (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_UINT32 ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+ /* Update runtime configuration with the new value */
+ psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed = ui32NewClockSpeed;
+
+ if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF)
+ && (psFWTraceBuf->ePowState != RGXFWIF_POW_OFF))
+ {
+ RGXFWIF_KCCB_CMD sCOREClkSpeedChangeCmd;
+
+ RGXGPUFreqCalibratePostClockSpeedChange(psDeviceNode, ui32NewClockSpeed);
+
+ sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE;
+ sCOREClkSpeedChangeCmd.uCmdData.sCORECLKSPEEDCHANGEData.ui32NewClockSpeed = ui32NewClockSpeed;
+
+ /* Ensure the new clock speed is written to memory before requesting the FW to read it */
+ OSMemoryBarrier();
+
+ PDUMPCOMMENT("Scheduling CORE clock speed change command");
+
+ PDUMPPOWCMDSTART();
+ eError = RGXSendCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sCOREClkSpeedChangeCmd,
+ sizeof(sCOREClkSpeedChangeCmd),
+ PDUMP_FLAGS_NONE);
+ PDUMPPOWCMDEND();
+
+ if (eError != PVRSRV_OK)
+ {
+ PDUMPCOMMENT("Scheduling CORE clock speed change command failed");
+ PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError));
+ return eError;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"RGXPostClockSpeedChange: RGX clock speed changed to %uHz",
+ psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+ }
+
+ return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function RGXDustCountChange
+
+ @Description
+
+ Does change of number of DUSTs
+
+ @Input hDevHandle : RGX Device Node
+ @Input ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32NumberOfDusts)
+{
+
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sDustCountChange;
+ IMG_UINT32 ui32MaxAvailableDusts = MAX(1, (psDevInfo->sDevFeatureCfg.ui32NumClusters/2));
+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+
+ if (ui32NumberOfDusts > ui32MaxAvailableDusts)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXDustCountChange: Invalid number of DUSTs (%u) while expecting value within <0,%u>. Error:%u",
+ ui32NumberOfDusts,
+ ui32MaxAvailableDusts,
+ eError));
+ return eError;
+ }
+
+ #if defined(FIX_HW_BRN_59042)
+ if (ui32NumberOfDusts < ui32MaxAvailableDusts && (ui32NumberOfDusts & 0x1))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXDustCountChange: Invalid number of DUSTs (%u) due to HW restriction. Allowed values are :-",
+ ui32NumberOfDusts));
+ switch (ui32MaxAvailableDusts)
+ {
+ case 2: PVR_DPF((PVR_DBG_ERROR, "0, 2")); break;
+ case 3: PVR_DPF((PVR_DBG_ERROR, "0, 2, 3")); break;
+ case 4: PVR_DPF((PVR_DBG_ERROR, "0, 2, 4")); break;
+ case 5: PVR_DPF((PVR_DBG_ERROR, "0, 2, 4, 5")); break;
+ case 6: PVR_DPF((PVR_DBG_ERROR, "0, 2, 4, 6")); break;
+ default: break;
+ }
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ #endif
+
+ psRuntimeCfg->ui32DefaultDustsNumInit = ui32NumberOfDusts;
+
+ #if !defined(NO_HARDWARE)
+ {
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+ {
+ return PVRSRV_OK;
+ }
+
+ if (psFWTraceBuf->ePowState != RGXFWIF_POW_FORCED_IDLE)
+ {
+ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+ PVR_DPF((PVR_DBG_ERROR,"RGXDustCountChange: Attempt to change dust count when not IDLE"));
+ return eError;
+ }
+ }
+ #endif
+
+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+ __FUNCTION__));
+ return eError;
+ }
+
+ sDustCountChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sDustCountChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUMDUST_CHANGE;
+ sDustCountChange.uCmdData.sPowData.uPoweReqData.ui32NumOfDusts = ui32NumberOfDusts;
+
+ PDUMPCOMMENT("Scheduling command to change Dust Count to %u", ui32NumberOfDusts);
+ eError = RGXSendCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sDustCountChange,
+ sizeof(sDustCountChange),
+ PDUMP_FLAGS_NONE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PDUMPCOMMENT("Scheduling command to change Dust Count failed. Error:%u", eError);
+ PVR_DPF((PVR_DBG_ERROR, "RGXDustCountChange: Scheduling KCCB to change Dust Count failed. Error:%u", eError));
+ return eError;
+ }
+
+ /* Wait for the firmware to answer. */
+ eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDustCountChange: Timeout waiting for idle request"));
+ return eError;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("RGXDustCountChange: Poll for Kernel SyncPrim [0x%p] on DM %d ", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+ 1,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0);
+#endif
+
+ return PVRSRV_OK;
+}
+/*
+ @Function RGXAPMLatencyChange
+*/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32ActivePMLatencyms,
+ IMG_BOOL bActivePMLatencyPersistant)
+{
+
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+ PVRSRV_DEV_POWER_STATE ePowerState;
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXAPMLatencyChange: Failed to acquire power lock"));
+ return eError;
+ }
+
+ /* Update runtime configuration with the new values */
+ psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+ psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant;
+
+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+ if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+ {
+ RGXFWIF_KCCB_CMD sActivePMLatencyChange;
+ sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sActivePMLatencyChange.uCmdData.sPowData.bNotifyTimeout = IMG_FALSE;
+ sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE;
+ sActivePMLatencyChange.uCmdData.sPowData.uPoweReqData.ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+
+ /* Ensure the new APM latency is written to memory before requesting the FW to read it */
+ OSMemoryBarrier();
+
+ PDUMPCOMMENT("Scheduling command to change APM latency to %u", ui32ActivePMLatencyms);
+ eError = RGXSendCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sActivePMLatencyChange,
+ sizeof(sActivePMLatencyChange),
+ PDUMP_FLAGS_NONE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PDUMPCOMMENT("Scheduling command to change APM latency failed. Error:%u", eError);
+ PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError));
+ return eError;
+ }
+ }
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ return PVRSRV_OK;
+}
+
+/*
+ RGXActivePowerRequest
+*/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+ OSAcquireBridgeLock();
+ /* NOTE: If this function were to wait for event object attempt should be
+ made to prevent releasing bridge lock during sleep. Bridge lock should
+ be held during sleep. */
+
+ /* Powerlock to avoid further requests from racing with the FW hand-shake from now on
+ (previous kicks to this point are detected by the FW) */
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire PowerLock (device: %p, error: %s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ goto _RGXActivePowerRequest_PowerLock_failed;
+ }
+
+ /* Check again for IDLE once we have the power lock */
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+ {
+
+ psDevInfo->ui32ActivePMReqTotal++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFWTraceBuf->ui64StartIdleTime);
+#endif
+
+ PDUMPPOWCMDSTART();
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_OFF,
+ IMG_FALSE); /* forced */
+ PDUMPPOWCMDEND();
+
+ if (eError == PVRSRV_OK)
+ {
+ psDevInfo->ui32ActivePMReqOk++;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+ {
+ psDevInfo->ui32ActivePMReqDenied++;
+ }
+
+ }
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+_RGXActivePowerRequest_PowerLock_failed:
+ OSReleaseBridgeLock();
+
+ return eError;
+
+}
+/*
+ RGXForcedIdleRequest
+*/
+
+#define RGX_FORCED_IDLE_RETRY_COUNT 10
+
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_KCCB_CMD sPowCmd;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32RetryCount = 0;
+
+#if !defined(NO_HARDWARE)
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+ /* Firmware already forced idle */
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_FORCED_IDLE)
+ {
+ return PVRSRV_OK;
+ }
+
+ /* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+ {
+ return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+ }
+#endif
+
+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+ __FUNCTION__));
+ return eError;
+ }
+ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+ sPowCmd.uCmdData.sPowData.uPoweReqData.bCancelForcedIdle = IMG_FALSE;
+
+ PDUMPCOMMENT("RGXForcedIdleRequest: Sending forced idle command");
+
+ /* Send one forced IDLE command to GP */
+ eError = RGXSendCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sPowCmd,
+ sizeof(sPowCmd),
+ PDUMP_FLAGS_NONE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXForcedIdleRequest: Failed to send idle request"));
+ return eError;
+ }
+
+ /* Wait for GPU to finish current workload */
+ do {
+ eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+ if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT))
+ {
+ break;
+ }
+ ui32RetryCount++;
+ PVR_DPF((PVR_DBG_WARNING,"RGXForcedIdleRequest: Request timeout. Retry %d of %d", ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT));
+ } while (IMG_TRUE);
+
+ if (eError != PVRSRV_OK)
+ {
+ RGXFWNotifyHostTimeout(psDevInfo);
+ PVR_DPF((PVR_DBG_ERROR,"RGXForcedIdleRequest: Idle request failed. Firmware potentially left in forced idle state"));
+ return eError;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d ", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+ 1,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0);
+#endif
+
+#if !defined(NO_HARDWARE)
+ /* Check the firmware state for idleness */
+ if (psFWTraceBuf->ePowState != RGXFWIF_POW_FORCED_IDLE)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXForcedIdleRequest: Failed to force IDLE"));
+
+ return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*
+ RGXCancelForcedIdleRequest
+*/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_KCCB_CMD sPowCmd;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+ __FUNCTION__));
+ goto ErrorExit;
+ }
+
+ /* Send the IDLE request to the FW */
+ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+ sPowCmd.uCmdData.sPowData.uPoweReqData.bCancelForcedIdle = IMG_TRUE;
+
+ PDUMPCOMMENT("RGXForcedIdleRequest: Sending cancel forced idle command");
+
+ /* Send cancel forced IDLE command to GP */
+ eError = RGXSendCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sPowCmd,
+ sizeof(sPowCmd),
+ PDUMP_FLAGS_NONE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PDUMPCOMMENT("RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d", RGXFWIF_DM_GP);
+ goto ErrorExit;
+ }
+
+ /* Wait for the firmware to answer. */
+ eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 1, 0xFFFFFFFF);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCancelForcedIdleRequest: Timeout waiting for cancel idle request"));
+ goto ErrorExit;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d ", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+ 1,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0);
+#endif
+
+ return eError;
+
+ErrorExit:
+ PVR_DPF((PVR_DBG_ERROR,"RGXCancelForcedIdleRequest: Firmware potentially left in forced idle state"));
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVGetNextDustCount
+
+ @Description
+
+ Calculate a sequence of dust counts to achieve full transition coverage.
+ We increment two counts of dusts and switch up and down between them.
+ It does contain a few redundant transitions. If two dust exist, the
+ output transitions should be as follows.
+
+ 0->1, 0<-1, 0->2, 0<-2, (0->1)
+ 1->1, 1->2, 1<-2, (1->2)
+ 2->2, (2->0),
+ 0->0. Repeat.
+
+ Redundant transitions in brackets.
+
+ @Input psDustReqState : Counter state used to calculate next dust count
+ @Input ui32DustCount : Number of dusts in the core
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustReqState, IMG_UINT32 ui32DustCount)
+{
+ if (psDustReqState->bToggle)
+ {
+ psDustReqState->ui32DustCount2++;
+ }
+
+ if (psDustReqState->ui32DustCount2 > ui32DustCount)
+ {
+ psDustReqState->ui32DustCount1++;
+ psDustReqState->ui32DustCount2 = psDustReqState->ui32DustCount1;
+ }
+
+ if (psDustReqState->ui32DustCount1 > ui32DustCount)
+ {
+ psDustReqState->ui32DustCount1 = 0;
+ psDustReqState->ui32DustCount2 = 0;
+ }
+
+ psDustReqState->bToggle = !psDustReqState->bToggle;
+
+ return (psDustReqState->bToggle) ? psDustReqState->ui32DustCount1 : psDustReqState->ui32DustCount2;
+}
+
+/******************************************************************************
+ End of file (rgxpower.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX power header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX power
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXPOWER_H__)
+#define __RGXPOWER_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "servicesext.h"
+#include "rgxdevice.h"
+
+
+/*!
+******************************************************************************
+
+ @Function RGXPrePowerState
+
+ @Description
+
+ does necessary preparation before power state transition
+
+ @Input hDevHandle : RGX Device Node
+ @Input eNewPowerState : New power state
+ @Input eCurrentPowerState : Current power state
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+
+/*!
+******************************************************************************
+
+ @Function RGXPostPowerState
+
+ @Description
+
+ does necessary preparation after power state transition
+
+ @Input hDevHandle : RGX Device Node
+ @Input eNewPowerState : New power state
+ @Input eCurrentPowerState : Current power state
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+
+
+/*!
+******************************************************************************
+
+ @Function RGXPreClockSpeedChange
+
+ @Description
+
+ Does processing required before an RGX clock speed change.
+
+ @Input hDevHandle : RGX Device Node
+ @Input bIdleDevice : Whether the firmware needs to be idled
+ @Input eCurrentPowerState : Power state of the device
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+/*!
+******************************************************************************
+
+ @Function RGXPostClockSpeedChange
+
+ @Description
+
+ Does processing required after an RGX clock speed change.
+
+ @Input hDevHandle : RGX Device Node
+ @Input bIdleDevice : Whether the firmware had been idled previously
+ @Input eCurrentPowerState : Power state of the device
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+
+/*!
+******************************************************************************
+
+ @Function RGXDustCountChange
+
+ @Description Change of number of DUSTs
+
+ @Input hDevHandle : RGX Device Node
+ @Input ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32NumberOfDusts);
+
+/*!
+******************************************************************************
+
+ @Function RGXAPMLatencyChange
+
+ @Description
+
+ Changes the wait duration used before firmware indicates IDLE.
+ Reducing this value will cause the firmware to shut off faster and
+ more often but may increase bubbles in GPU scheduling due to the added
+ power management activity. If bPersistent is NOT set, APM latency will
+ return back to system default on power up.
+
+ @Input hDevHandle : RGX Device Node
+ @Input ui32ActivePMLatencyms : Number of milliseconds to wait
+ @Input bPersistent : Set to ensure new value is not reset
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32ActivePMLatencyms,
+ IMG_BOOL bActivePMLatencyPersistant);
+
+/*!
+******************************************************************************
+
+ @Function RGXActivePowerRequest
+
+ @Description Initiate a handshake with the FW to power off the GPU
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function RGXForcedIdleRequest
+
+ @Description Initiate a handshake with the FW to idle the GPU
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted);
+
+/*!
+******************************************************************************
+
+ @Function RGXCancelForcedIdleRequest
+
+ @Description Send a request to cancel idle to the firmware.
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVGetNextDustCount
+
+ @Description
+
+ Calculate a sequence of dust counts to achieve full transition coverage.
+ We increment two counts of dusts and switch up and down between them.
+ It does contain a few redundant transitions. If two dust exist, the
+ output transitions should be as follows.
+
+ 0->1, 0<-1, 0->2, 0<-2, (0->1)
+ 1->1, 1->2, 1<-2, (1->2)
+ 2->2, (2->0),
+ 0->0. Repeat.
+
+ Redundant transitions in brackets.
+
+ @Input psDustReqState : Counter state used to calculate next dust count
+ @Input ui32DustCount : Number of dusts in the core
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustState, IMG_UINT32 ui32DustCount);
+
+
+#endif /* __RGXPOWER_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX ray tracing routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX ray tracing routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* for the offsetof macro */
+#include <stddef.h>
+#if defined(INTEGRITY_OS)
+#include <string.h>
+#endif
+
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxray.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_server.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "rgxtimerquery.h"
+#include "htbuffer.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "physmem.h"
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "process_stats.h"
+
+
+/*
+ * FIXME: Defs copied from "rgxrpmdefs.h"
+ */
+
+typedef struct _RGX_RPM_DATA_RTU_FREE_PAGE_LIST {
+ IMG_UINT32 u32_0;
+} RGX_RPM_DATA_RTU_FREE_PAGE_LIST;
+
+/*
+Page table index.
+ The field is a pointer to a free page
+*/
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_WOFF (0U)
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_SHIFT (0U)
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_CLRMSK (0XFFC00000U)
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_SET_PTI(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_CLRMSK ) | (((_x_) & (0x003fffff)) << 0)))
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_GET_PTI(_ft_) (((_ft_).u32_0 >> (0)) & 0x003fffff)
+
+typedef struct _RGX_RPM_DATA_RTU_PAGE_TABLE {
+ IMG_UINT32 u32_0;
+} RGX_RPM_DATA_RTU_PAGE_TABLE;
+
+/*
+ Page Table State
+ <br> 00: Empty Block
+ <br> 01: Full Block
+ <br> 10: Fragmented Block: Partially full page
+*/
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_WOFF (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_SHIFT (30U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_CLRMSK (0X3FFFFFFFU)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_SET_PTS(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_CLRMSK ) | (((_x_) & (0x00000003)) << 30)))
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_GET_PTS(_ft_) (((_ft_).u32_0 >> (30)) & 0x00000003)
+/*
+ Primitives in Page.
+ Number of unique primitives stored in this page.
+ The memory manager will re-use this page when the RCNT drops to zero.
+*/
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_WOFF (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_SHIFT (22U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_CLRMSK (0XC03FFFFFU)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_SET_RCNT(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_CLRMSK ) | (((_x_) & (0x000000ff)) << 22)))
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_GET_RCNT(_ft_) (((_ft_).u32_0 >> (22)) & 0x000000ff)
+/*
+Next page table index.
+ The field is a pointer to the next page for this primitive.
+*/
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_WOFF (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_SHIFT (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_CLRMSK (0XFFC00000U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_SET_NPTI(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_CLRMSK ) | (((_x_) & (0x003fffff)) << 0)))
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_GET_NPTI(_ft_) (((_ft_).u32_0 >> (0)) & 0x003fffff)
+
+
+#define RGX_CR_RPM_PAGE_TABLE_BASE_VALUE_ALIGNSHIFT (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT (2U)
+
+
+typedef struct {
+ DEVMEM_MEMDESC *psContextStateMemDesc;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+#if 0
+ /* FIXME - multiple frame contexts? */
+ RGX_RPM_FREELIST *psSHFFreeList;
+ RGX_RPM_FREELIST *psSHGFreeList;
+#endif
+} RGX_SERVER_RAY_SH_DATA;
+
+
+typedef enum {
+ NODE_EMPTY = 0,
+ NODE_SCENE_HIERARCHY,
+ NODE_RPM_PAGE_TABLE,
+ NODE_RPM_FREE_PAGE_LIST
+} RGX_DEVMEM_NODE_TYPE;
+
+typedef struct _RGX_DEVMEM_NODE_ {
+ RGX_DEVMEM_NODE_TYPE eNodeType; /*!< Alloc type */
+ PMR *psPMR; /*!< Scene hierarchy/page table/free page list phys pages */
+ DEVMEMINT_HEAP *psDevMemHeap; /*!< Heap where the virtual mapping is made */
+ IMG_DEV_VIRTADDR sAddr; /*!< GPU virtual address where the phys pages are mapped into */
+ IMG_UINT32 ui32NumPhysPages; /*!< Number of physical pages mapped in for this node */
+ IMG_UINT32 ui32StartOfMappingIndex; /*!< Start of mapping index (i.e. OS page offset from virtual base) */
+ IMG_BOOL bInternal;
+} RGX_DEVMEM_NODE;
+
+typedef struct _RGX_RPM_DEVMEM_DESC_ {
+ DLLIST_NODE sMemoryDescBlock; /*!< the hierarchy scene memory block */
+ RGX_RPM_FREELIST *psFreeList; /*!< Free list this allocation is associated with */
+ IMG_UINT32 ui32NumPages; /*!< Number of RPM pages added */
+ RGX_DEVMEM_NODE sSceneHierarchyNode; /*!< scene hierarchy block descriptor */
+ RGX_DEVMEM_NODE sRPMPageListNode; /*!< RPM page list block descriptor */
+ RGX_DEVMEM_NODE sRPMFreeListNode; /*!< RPM free list block descriptor */
+} RGX_RPM_DEVMEM_DESC;
+
+typedef struct _DEVMEM_RPM_FREELIST_LOOKUP_
+{
+ IMG_UINT32 ui32FreeListID;
+ RGX_RPM_FREELIST *psFreeList;
+} DEVMEM_RPM_FREELIST_LOOKUP;
+
+typedef struct {
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+ RGX_CLIENT_CCB *psFCClientCCB[DPX_MAX_RAY_CONTEXTS];
+ DEVMEM_MEMDESC *psFCClientCCBMemDesc[DPX_MAX_RAY_CONTEXTS];
+ DEVMEM_MEMDESC *psFCClientCCBCtrlMemDesc[DPX_MAX_RAY_CONTEXTS];
+} RGX_SERVER_RAY_RS_DATA;
+
+
+struct _RGX_SERVER_RAY_CONTEXT_ {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWRayContextMemDesc;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ RGX_SERVER_RAY_SH_DATA sSHData;
+ RGX_SERVER_RAY_RS_DATA sRSData;
+ IMG_UINT32 ui32CleanupStatus;
+#define RAY_CLEANUP_SH_COMPLETE (1 << 0)
+#define RAY_CLEANUP_RS_COMPLETE (1 << 1)
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListFence;
+ SYNC_ADDR_LIST sSyncAddrListUpdate;
+ ATOMIC_T hJobId;
+};
+
+
+#if 0
+static
+#ifdef __GNUC__
+ __attribute__((noreturn))
+#endif
+void sleep_for_ever(void)
+{
+#if defined(__KLOCWORK__) // klocworks would report an infinite loop because of while(1).
+ PVR_ASSERT(0);
+#else
+ while(1)
+ {
+ OSSleepms(~0); // sleep the maximum amount of time possible
+ }
+#endif
+}
+#endif
+
+static
+PVRSRV_ERROR _RGXCreateRPMSparsePMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_DEVMEM_NODE_TYPE eBlockType,
+ IMG_UINT32 ui32NumPages,
+ IMG_UINT32 uiLog2DopplerPageSize,
+ PMR **ppsPMR);
+
+static PVRSRV_ERROR _RGXMapRPMPBBlock(RGX_DEVMEM_NODE *psDevMemNode,
+ RGX_RPM_FREELIST *psFreeList,
+ RGX_DEVMEM_NODE_TYPE eBlockType,
+ DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEV_VIRTADDR sDevVAddrBase);
+
+static
+PVRSRV_ERROR _RGXUnmapRPMPBBlock(RGX_DEVMEM_NODE *psDevMemNode,
+ RGX_RPM_FREELIST *psFreeList,
+ IMG_DEV_VIRTADDR sDevVAddrBase);
+
+static
+PVRSRV_ERROR _CreateSHContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_DEV_VIRTADDR sVRMCallStackAddr,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_RAY_SH_DATA *psSHData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_VRDMCTX_STATE *psContextState;
+ PVRSRV_ERROR eError;
+ /*
+ Allocate device memory for the firmware GPU context suspend state.
+ Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware SHG context suspend state");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_VRDMCTX_STATE),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwRaySHGContextSuspendState",
+ &psSHData->psContextStateMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+ eError));
+ goto fail_shcontextsuspendalloc;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psSHData->psContextStateMemDesc,
+ (void **)&psContextState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to map firmware render context state (%u)",
+ eError));
+ goto fail_suspendcpuvirtacquire;
+ }
+ psContextState->uVRDMReg_VRM_CALL_STACK_POINTER = sVRMCallStackAddr.uiAddr;
+ DevmemReleaseCpuVirtAddr(psSHData->psContextStateMemDesc);
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_SH,
+ RGXFWIF_DM_SHG,
+ psAllocatedMemDesc,
+ ui32AllocatedOffset,
+ psFWMemContextMemDesc,
+ psSHData->psContextStateMemDesc,
+ RGX_RTU_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &psSHData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to init TA fw common context (%u)",
+ eError));
+ goto fail_shcommoncontext;
+ }
+
+ /*
+ * Dump the FW SH context suspend state buffer
+ */
+ PDUMPCOMMENT("Dump the SH context suspend state buffer");
+ DevmemPDumpLoadMem(psSHData->psContextStateMemDesc,
+ 0,
+ sizeof(RGXFWIF_VRDMCTX_STATE),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psSHData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+fail_shcommoncontext:
+fail_suspendcpuvirtacquire:
+ DevmemFwFree(psDevInfo, psSHData->psContextStateMemDesc);
+fail_shcontextsuspendalloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+static
+PVRSRV_ERROR _CreateRSContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_RAY_RS_DATA *psRSData)
+{
+ PVRSRV_ERROR eError;
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_RS,
+ RGXFWIF_DM_RTU,
+ psAllocatedMemDesc,
+ ui32AllocatedOffset,
+ psFWMemContextMemDesc,
+ NULL,
+ RGX_RTU_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &psRSData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to init 3D fw common context (%u)",
+ eError));
+ goto fail_rscommoncontext;
+ }
+
+ psRSData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+fail_rscommoncontext:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+
+/*
+ Static functions used by ray context code
+*/
+
+static
+PVRSRV_ERROR _DestroySHContext(RGX_SERVER_RAY_SH_DATA *psSHData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ psSHData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_SHG,
+ PDUMP_FLAGS_NONE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free its resources */
+ FWCommonContextFree(psSHData->psServerCommonContext);
+ DevmemFwFree(psDeviceNode->pvDevice, psSHData->psContextStateMemDesc);
+ psSHData->psContextStateMemDesc = NULL;
+ psSHData->psServerCommonContext = NULL;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _DestroyRSContext(RGX_SERVER_RAY_RS_DATA *psRSData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ psRSData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_RTU,
+ PDUMP_FLAGS_NONE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free its resources */
+
+
+ FWCommonContextFree(psRSData->psServerCommonContext);
+ psRSData->psServerCommonContext = NULL;
+ return PVRSRV_OK;
+}
+
+
+/*
+ * RPM driver management rev 2
+ *
+ * The RPM freelists are opaque to the client driver. Scene Hierarchy pages
+ * are managed in Blocks (analogous to PB blocks) which are alloc'd in KM
+ * and mapped into the client MMU context.
+ *
+ * Page tables are set up for each existing Scene Memory Block.
+ *
+ * Freelist entries are also updated according to the list of Scene Memory Blocks.
+ *
+ * NOTES:
+ *
+ * (1) Scene Hierarchy shrink is not expected to be used.
+ * (2) The RPM FreeLists are Circular buffers and must be contiguous in virtual space
+ * (3) Each PMR is created with no phys backing pages. Pages are mapped in on-demand
+ * via RGXGrowRPMFreeList.
+ *
+ */
+#if defined(DEBUG)
+static PVRSRV_ERROR _ReadRPMFreePageList(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32PageCount)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiIdx, j;
+ size_t uNumBytesCopied;
+ RGX_RPM_DATA_RTU_FREE_PAGE_LIST *psFreeListBuffer;
+ IMG_UINT32 ui32PTI[4];
+
+ /* Allocate scratch area for setting up Page table indices */
+ psFreeListBuffer = OSAllocMem(ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST));
+ if (psFreeListBuffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_WriteRPMPageList: failed to allocate scratch page table"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Read scratch buffer from PMR (FPL entries must be contiguous) */
+ eError = PMR_ReadBytes(psPMR,
+ uiLogicalOffset,
+ (IMG_UINT8 *) psFreeListBuffer,
+ ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST),
+ &uNumBytesCopied);
+
+ if (eError == PVRSRV_OK)
+ {
+ for (uiIdx = 0; uiIdx < ui32PageCount; uiIdx +=4)
+ {
+ for (j=0; j<4; j++)
+ {
+ ui32PTI[j] = RGX_RPM_DATA_RTU_FREE_PAGE_LIST_GET_PTI(psFreeListBuffer[uiIdx + j]);
+ }
+ PVR_DPF((PVR_DBG_MESSAGE, "%4d: %7d %7d %7d %7d", uiIdx,
+ ui32PTI[0], ui32PTI[1], ui32PTI[2], ui32PTI[3]));
+ }
+ }
+
+ /* Free scratch buffer */
+ OSFreeMem(psFreeListBuffer);
+
+ return eError;
+}
+
+static IMG_BOOL RGXDumpRPMFreeListPageList(RGX_RPM_FREELIST *psFreeList)
+{
+ PVR_LOG(("RPM Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016llx",
+ psFreeList->sFreeListFWDevVAddr.ui32Addr,
+ psFreeList->ui32FreelistID,
+ psFreeList->ui64FreelistChecksum));
+
+ /* Dump FreeList page list */
+ _ReadRPMFreePageList(psFreeList->psFreeListPMR, 0, psFreeList->ui32CurrentFLPages);
+
+ return IMG_TRUE;
+}
+#endif
+
+static PVRSRV_ERROR _UpdateFwRPMFreelistSize(RGX_RPM_FREELIST *psFreeList,
+ IMG_BOOL bGrow,
+ IMG_BOOL bRestartRPM,
+ IMG_UINT32 ui32DeltaSize)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sGPCCBCmd;
+
+ if(!bGrow)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_UpdateFwRPMFreelistSize: RPM freelist shrink not supported."));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* send feedback */
+ sGPCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_DOPPLER_MEMORY_GROW;
+ sGPCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+ sGPCCBCmd.uCmdData.sFreeListGSData.ui32DeltaSize = ui32DeltaSize;
+ sGPCCBCmd.uCmdData.sFreeListGSData.ui32NewSize =
+ ((bRestartRPM) ? RGX_FREELIST_GSDATA_RPM_RESTART_EN : 0) |
+ psFreeList->ui32CurrentFLPages;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Send FW update: RPM freelist [FWAddr=0x%08x] has 0x%08x pages",
+ psFreeList->sFreeListFWDevVAddr.ui32Addr,
+ psFreeList->ui32CurrentFLPages));
+
+ /* Submit command to the firmware. */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psFreeList->psDevInfo,
+ RGXFWIF_DM_GP,
+ &sGPCCBCmd,
+ sizeof(sGPCCBCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_UpdateFwRPMFreelistSize: failed to update FW freelist size. (error = %u)", eError));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+#if 0
+static void _CheckRPMFreelist(RGX_RPM_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumOfPagesToCheck,
+ IMG_UINT64 ui64ExpectedCheckSum,
+ IMG_UINT64 *pui64CalculatedCheckSum)
+{
+#if defined(NO_HARDWARE)
+ /* No checksum needed as we have all information in the pdumps */
+ PVR_UNREFERENCED_PARAMETER(psFreeList);
+ PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck);
+ PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum);
+ *pui64CalculatedCheckSum = 0;
+#else
+ PVRSRV_ERROR eError;
+ size_t uiNumBytes;
+ IMG_UINT8* pui8Buffer;
+ IMG_UINT32* pui32Buffer;
+ IMG_UINT32 ui32CheckSumAdd = 0;
+ IMG_UINT32 ui32CheckSumXor = 0;
+ IMG_UINT32 ui32Entry;
+ IMG_UINT32 ui32Entry2;
+ IMG_BOOL bFreelistBad = IMG_FALSE;
+
+ *pui64CalculatedCheckSum = 0;
+
+ /* Allocate Buffer of the size of the freelist */
+ pui8Buffer = OSAllocMem(psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32));
+ if (pui8Buffer == NULL)
+ {
+ PVR_LOG(("_CheckRPMFreelist: Failed to allocate buffer to check freelist %p!", psFreeList));
+ sleep_for_ever();
+ //PVR_ASSERT(0);
+ return;
+ }
+
+ /* Copy freelist content into Buffer */
+ eError = PMR_ReadBytes(psFreeList->psFreeListPMR,
+ psFreeList->uiFreeListPMROffset + (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32),
+ pui8Buffer,
+ psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32),
+ &uiNumBytes);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(pui8Buffer);
+ PVR_LOG(("_CheckRPMFreelist: Failed to get freelist data for RPM freelist %p!", psFreeList));
+ sleep_for_ever();
+ //PVR_ASSERT(0);
+ return;
+ }
+
+ PVR_ASSERT(uiNumBytes == psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32));
+ PVR_ASSERT(ui32NumOfPagesToCheck <= psFreeList->ui32CurrentFLPages);
+
+ /* Generate checksum */
+ pui32Buffer = (IMG_UINT32 *)pui8Buffer;
+ for(ui32Entry = 0; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++)
+ {
+ ui32CheckSumAdd += pui32Buffer[ui32Entry];
+ ui32CheckSumXor ^= pui32Buffer[ui32Entry];
+
+ /* Check for double entries */
+ for (ui32Entry2 = 0; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++)
+ {
+ if ((ui32Entry != ui32Entry2) &&
+ (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2]))
+ {
+ PVR_LOG(("_CheckRPMFreelist: RPM Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d",
+ psFreeList->sFreeListFWDevVAddr.ui32Addr,
+ pui32Buffer[ui32Entry2],
+ ui32Entry,
+ ui32Entry2,
+ psFreeList->ui32CurrentFLPages));
+ bFreelistBad = IMG_TRUE;
+ }
+ }
+ }
+
+ OSFreeMem(pui8Buffer);
+
+ /* Check the calculated checksum against the expected checksum... */
+ *pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+
+ if (ui64ExpectedCheckSum != 0 && ui64ExpectedCheckSum != *pui64CalculatedCheckSum)
+ {
+ PVR_LOG(("_CheckRPMFreelist: Checksum mismatch for RPM freelist %p! Expected 0x%016llx calculated 0x%016llx",
+ psFreeList, ui64ExpectedCheckSum, *pui64CalculatedCheckSum));
+ bFreelistBad = IMG_TRUE;
+ }
+
+ if (bFreelistBad)
+ {
+ PVR_LOG(("_CheckRPMFreelist: Sleeping for ever!"));
+ sleep_for_ever();
+// PVR_ASSERT(!bFreelistBad);
+ }
+#endif
+}
+#endif
+
+static PVRSRV_ERROR _WriteRPMFreePageList(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32NextPageIndex,
+ IMG_UINT32 ui32PageCount)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiIdx;
+ size_t uNumBytesCopied;
+ RGX_RPM_DATA_RTU_FREE_PAGE_LIST *psFreeListBuffer;
+
+ /* Allocate scratch area for setting up Page table indices */
+ psFreeListBuffer = OSAllocMem(ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST));
+ if (psFreeListBuffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_WriteRPMPageList: failed to allocate scratch page table"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ for (uiIdx = 0; uiIdx < ui32PageCount; uiIdx ++, ui32NextPageIndex ++)
+ {
+ psFreeListBuffer[uiIdx].u32_0 = 0;
+ RGX_RPM_DATA_RTU_FREE_PAGE_LIST_SET_PTI(psFreeListBuffer[uiIdx], ui32NextPageIndex);
+ }
+
+ /* Copy scratch buffer to PMR */
+ eError = PMR_WriteBytes(psPMR,
+ uiLogicalOffset,
+ (IMG_UINT8 *) psFreeListBuffer,
+ ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST),
+ &uNumBytesCopied);
+
+ /* Free scratch buffer */
+ OSFreeMem(psFreeListBuffer);
+
+#if defined(PDUMP)
+ /* Pdump the Page tables */
+ PDUMPCOMMENT("Dump %u RPM free page list entries.", ui32PageCount);
+ PMRPDumpLoadMem(psPMR,
+ uiLogicalOffset,
+ ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST),
+ PDUMP_FLAGS_CONTINUOUS,
+ IMG_FALSE);
+#endif
+ return eError;
+}
+
+
+static RGX_RPM_FREELIST* FindRPMFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID)
+{
+ DLLIST_NODE *psNode, *psNext;
+ RGX_RPM_FREELIST *psFreeList = NULL;
+
+ OSLockAcquire(psDevInfo->hLockRPMFreeList);
+ dllist_foreach_node(&psDevInfo->sRPMFreeListHead, psNode, psNext)
+ {
+ RGX_RPM_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_RPM_FREELIST, sNode);
+
+ if (psThisFreeList->ui32FreelistID == ui32FreelistID)
+ {
+ psFreeList = psThisFreeList;
+ break;
+ }
+ }
+ OSLockRelease(psDevInfo->hLockRPMFreeList);
+
+ return psFreeList;
+}
+
+void RGXProcessRequestRPMGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistID)
+{
+ RGX_RPM_FREELIST *psFreeList = NULL;
+ RGXFWIF_KCCB_CMD sVRDMCCBCmd;
+ IMG_UINT32 ui32GrowValue;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bRestartRPM = IMG_TRUE; /* FIXME */
+
+ PVR_ASSERT(psDevInfo);
+
+ /* find the freelist with the corresponding ID */
+ psFreeList = FindRPMFreeList(psDevInfo, ui32FreelistID);
+
+ if (psFreeList)
+ {
+ /* Try to grow the freelist */
+ eError = RGXGrowRPMFreeList(psFreeList,
+ psFreeList->ui32GrowFLPages,
+ &psFreeList->sMemoryBlockHead);
+ if (eError == PVRSRV_OK)
+ {
+ /* Grow successful, return size of grow size */
+ ui32GrowValue = psFreeList->ui32GrowFLPages;
+
+ psFreeList->ui32NumGrowReqByFW++;
+
+ #if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* Update Stats */
+ PVRSRVStatsUpdateFreelistStats(0,
+ 1, /* Add 1 to the appropriate counter (Requests by FW) */
+ psFreeList->ui32InitFLPages,
+ psFreeList->ui32NumHighPages,
+ psFreeList->ownerPid);
+
+ #endif
+
+ }
+ else
+ {
+ /* Grow failed */
+ ui32GrowValue = 0;
+ PVR_DPF((PVR_DBG_ERROR,"Grow for FreeList %p [ID %d] failed (error %u)",
+ psFreeList,
+ psFreeList->ui32FreelistID,
+ eError));
+ }
+
+ /* send feedback */
+ sVRDMCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_DOPPLER_MEMORY_GROW;
+ sVRDMCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+ sVRDMCCBCmd.uCmdData.sFreeListGSData.ui32DeltaSize = ui32GrowValue;
+ sVRDMCCBCmd.uCmdData.sFreeListGSData.ui32NewSize =
+ ((bRestartRPM) ? RGX_FREELIST_GSDATA_RPM_RESTART_EN : 0) |
+ (psFreeList->ui32CurrentFLPages);
+
+ PVR_DPF((PVR_DBG_ERROR,"Send feedback to RPM after grow on freelist [ID %d]", ui32FreelistID));
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_SHG,
+ &sVRDMCCBCmd,
+ sizeof(sVRDMCCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ /* Kernel CCB should never fill up, as the FW is processing them right away */
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ else
+ {
+ /* Should never happen */
+ PVR_DPF((PVR_DBG_ERROR,"FreeList Lookup for FreeList ID 0x%08x failed (Populate)", ui32FreelistID));
+ PVR_ASSERT(IMG_FALSE);
+ }
+}
+
+
+/*!
+ * RGXGrowRPMFreeList
+ *
+ * Allocate and map physical backing pages for RPM buffers
+ *
+ * @param ppsRPMDevMemDesc - RPM buffer descriptor representing new Scene memory block
+ * and its associated RPM page table and free page list entries
+ * @param psRPMContext - RPM context
+ * @param psFreeList - RPM freelist descriptor
+ * @param ui32RequestNumPages - number of RPM pages to add to Doppler scene hierarchy
+ * @param pListHeader - linked list of RGX_RPM_DEVMEM_DESC blocks
+ *
+ */
+PVRSRV_ERROR RGXGrowRPMFreeList(RGX_RPM_FREELIST *psFreeList,
+ IMG_UINT32 ui32RequestNumPages,
+ PDLLIST_NODE pListHeader)
+{
+ PVRSRV_ERROR eError;
+ RGX_SERVER_RPM_CONTEXT *psRPMContext = psFreeList->psParentCtx;
+ RGX_RPM_DEVMEM_DESC *psRPMDevMemDesc;
+ IMG_DEVMEM_OFFSET_T uiPMROffset;
+ IMG_UINT32 ui32NextPageIndex;
+
+ /* Are we allowed to grow ? */
+ if (ui32RequestNumPages > psFreeList->psParentCtx->ui32UnallocatedPages)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Scene Hierarchy buffer exceeded (0x%x pages required, 0x%x pages available).",
+ ui32RequestNumPages, psFreeList->psParentCtx->ui32UnallocatedPages));
+ return PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX;
+ }
+
+ /* Allocate descriptor */
+ psRPMDevMemDesc = OSAllocZMem(sizeof(*psRPMDevMemDesc));
+ if (psRPMDevMemDesc == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: failed to allocate host data structure"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /*
+ * Lock protects simultaneous manipulation of:
+ * - the memory block list
+ * - the freelist's ui32CurrentFLPages
+ * - the context's ui32UnallocatedPages
+ */
+ OSLockAcquire(psFreeList->psDevInfo->hLockRPMFreeList);
+ OSLockAcquire(psFreeList->psDevInfo->hLockRPMContext);
+
+ /* Update the sparse PMRs */
+ psRPMDevMemDesc->psFreeList = psFreeList;
+ psRPMDevMemDesc->ui32NumPages = ui32RequestNumPages;
+ psRPMDevMemDesc->sSceneHierarchyNode.psPMR = psRPMContext->psSceneHierarchyPMR;
+ psRPMDevMemDesc->sRPMPageListNode.psPMR = psRPMContext->psRPMPageTablePMR;
+ psRPMDevMemDesc->sRPMFreeListNode.psPMR = psFreeList->psFreeListPMR;
+
+
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXGrowRPMFreeList: mapping %d pages for Doppler scene memory to VA 0x%llx with heap ID %p",
+ ui32RequestNumPages, psRPMContext->sSceneMemoryBaseAddr.uiAddr, psRPMContext->psSceneHeap));
+
+ /*
+ * 1. Doppler scene hierarchy
+ */
+ PDUMPCOMMENT("Allocate %d pages with mapping index %d for Doppler scene memory.",
+ ui32RequestNumPages,
+ psRPMContext->ui32SceneMemorySparseMappingIndex);
+ eError = _RGXMapRPMPBBlock(&psRPMDevMemDesc->sSceneHierarchyNode,
+ psFreeList,
+ NODE_SCENE_HIERARCHY,
+ psRPMContext->psSceneHeap,
+ ui32RequestNumPages,
+ psRPMContext->sSceneMemoryBaseAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Unable to map RPM scene hierarchy block (status %d)", eError));
+ goto ErrorSceneBlock;
+ }
+
+ /*
+ * 2. RPM page list
+ */
+ if (ui32RequestNumPages > psRPMContext->ui32RPMEntriesInPage)
+ {
+ /* we need to map in phys pages for RPM page table */
+ PDUMPCOMMENT("Allocate %d (%d requested) page table entries with mapping index %d for RPM page table.",
+ ui32RequestNumPages - psRPMContext->ui32RPMEntriesInPage,
+ ui32RequestNumPages,
+ psRPMContext->ui32RPMPageTableSparseMappingIndex);
+ eError = _RGXMapRPMPBBlock(&psRPMDevMemDesc->sRPMPageListNode,
+ psFreeList,
+ NODE_RPM_PAGE_TABLE,
+ psRPMContext->psRPMPageTableHeap,
+ ui32RequestNumPages - psRPMContext->ui32RPMEntriesInPage,
+ psRPMContext->sRPMPageTableBaseAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Unable to map RPM page table block (status %d)", eError));
+ goto ErrorPageTableBlock;
+ }
+ }
+
+ /*
+ * 3. Free page list (FPL)
+ */
+ if (ui32RequestNumPages > psFreeList->ui32EntriesInPage)
+ {
+ /* we need to map in phys pages for RPM free page list */
+ PDUMPCOMMENT("Allocate %d (%d requested) FPL entries with mapping index %d for RPM free page list.",
+ ui32RequestNumPages - psFreeList->ui32EntriesInPage,
+ ui32RequestNumPages,
+ psFreeList->ui32RPMFreeListSparseMappingIndex);
+ eError = _RGXMapRPMPBBlock(&psRPMDevMemDesc->sRPMFreeListNode,
+ psFreeList,
+ NODE_RPM_FREE_PAGE_LIST,
+ psRPMContext->psRPMPageTableHeap,
+ ui32RequestNumPages - psFreeList->ui32EntriesInPage,
+ psFreeList->sBaseDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Unable to map RPM free page list (status %d)", eError));
+ goto ErrorFreeListBlock;
+ }
+ }
+
+ /*
+ * Update FPL entries
+ */
+
+ /* Calculate doppler page index from base of Doppler heap */
+ ui32NextPageIndex = (psRPMDevMemDesc->sSceneHierarchyNode.sAddr.uiAddr -
+ psRPMContext->sDopplerHeapBaseAddr.uiAddr) >> psFreeList->uiLog2DopplerPageSize;
+
+ /* Calculate write offset into FPL PMR assuming pages are mapped in order with no gaps */
+ uiPMROffset = (size_t)psFreeList->ui32CurrentFLPages * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+
+ eError = _WriteRPMFreePageList(psFreeList->psFreeListPMR, uiPMROffset, ui32NextPageIndex, ui32RequestNumPages);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: error writing RPM free list entries (%d)", eError));
+ goto ErrorFreeListWriteEntries;
+ }
+
+ {
+ /*
+ * Update the entries remaining in the last mapped RPM and FPL pages.
+ *
+ * psRPMDevMemDesc->sRPMPageListNode.ui32NumPhysPages * 1024 entries are added (can be zero)
+ * ui32RequestNumPages entries are committed
+ *
+ * The number of entries remaining should always be less than a full page.
+ */
+ IMG_UINT32 ui32PTEntriesPerChunk = OSGetPageSize() / sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+ IMG_UINT32 ui32PTEntriesPerChunkClearMask = ~(ui32PTEntriesPerChunk - 1);
+
+ psRPMContext->ui32RPMEntriesInPage = psRPMContext->ui32RPMEntriesInPage +
+ (psRPMDevMemDesc->sRPMPageListNode.ui32NumPhysPages * ui32PTEntriesPerChunk) - ui32RequestNumPages;
+ PVR_ASSERT((psRPMContext->ui32RPMEntriesInPage & ui32PTEntriesPerChunkClearMask) == 0);
+
+ psFreeList->ui32EntriesInPage = psFreeList->ui32EntriesInPage +
+ (psRPMDevMemDesc->sRPMFreeListNode.ui32NumPhysPages * ui32PTEntriesPerChunk) - ui32RequestNumPages;
+ PVR_ASSERT((psFreeList->ui32EntriesInPage & ui32PTEntriesPerChunkClearMask) == 0);
+ }
+
+ /* Add node to link list */
+ dllist_add_to_head(pListHeader, &psRPMDevMemDesc->sMemoryDescBlock);
+
+ /* Update number of available pages */
+ psFreeList->ui32CurrentFLPages += ui32RequestNumPages;
+ psRPMContext->ui32UnallocatedPages -= ui32RequestNumPages;
+
+#if defined(DEBUG)
+ RGXDumpRPMFreeListPageList(psFreeList);
+#endif
+
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMContext);
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"RPM Freelist [%p, ID %d]: grow by %u pages (current pages %u/%u, unallocated pages %u)",
+ psFreeList,
+ psFreeList->ui32FreelistID,
+ ui32RequestNumPages,
+ psFreeList->ui32CurrentFLPages,
+ psRPMContext->ui32TotalRPMPages,
+ psRPMContext->ui32UnallocatedPages));
+
+ return PVRSRV_OK;
+
+ /* Error handling */
+ErrorFreeListWriteEntries:
+ /* TODO: unmap sparse block for RPM FPL */
+ErrorFreeListBlock:
+ /* TODO: unmap sparse block for RPM page table */
+ErrorPageTableBlock:
+ /* TODO: unmap sparse block for scene hierarchy */
+
+ErrorSceneBlock:
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMContext);
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+ OSFreeMem(psRPMDevMemDesc);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static PVRSRV_ERROR RGXShrinkRPMFreeList(PDLLIST_NODE pListHeader,
+ RGX_RPM_FREELIST *psFreeList)
+{
+ DLLIST_NODE *psNode;
+ RGX_RPM_DEVMEM_DESC *psRPMDevMemNode;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32OldValue;
+
+ /*
+ * Lock protects simultaneous manipulation of:
+ * - the memory block list
+ * - the freelist's ui32CurrentFLPages value
+ */
+ PVR_ASSERT(pListHeader);
+ PVR_ASSERT(psFreeList);
+ PVR_ASSERT(psFreeList->psDevInfo);
+ PVR_ASSERT(psFreeList->psDevInfo->hLockRPMFreeList);
+
+ OSLockAcquire(psFreeList->psDevInfo->hLockRPMFreeList);
+
+ /********************************************************************
+ * All scene memory blocks must be freed together as non-contiguous
+ * virtual mappings are not yet supported.
+ ********************************************************************/
+
+ /* Get node from head of list and remove it */
+ psNode = dllist_get_next_node(pListHeader);
+ PVR_DPF((PVR_DBG_MESSAGE, "Found node %p", psNode));
+ if (psNode)
+ {
+ dllist_remove_node(psNode);
+
+ psRPMDevMemNode = IMG_CONTAINER_OF(psNode, RGX_RPM_DEVMEM_DESC, sMemoryDescBlock);
+ PVR_ASSERT(psRPMDevMemNode);
+ PVR_ASSERT(psRPMDevMemNode->psFreeList);
+ PVR_ASSERT(psRPMDevMemNode->sSceneHierarchyNode.psPMR);
+
+ /* remove scene hierarchy block */
+ PVR_DPF((PVR_DBG_MESSAGE, "Removing scene hierarchy node"));
+ eError = _RGXUnmapRPMPBBlock(&psRPMDevMemNode->sSceneHierarchyNode,
+ psRPMDevMemNode->psFreeList,
+ psFreeList->psParentCtx->sSceneMemoryBaseAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXShrinkRPMFreeList: Failed to unmap %d pages with mapping index %d (status %d)",
+ psRPMDevMemNode->sSceneHierarchyNode.ui32NumPhysPages,
+ psRPMDevMemNode->sSceneHierarchyNode.ui32StartOfMappingIndex,
+ eError));
+ goto UnMapError;
+ }
+
+ /*
+ * If the grow size is sub OS page size then the page lists may not need updating
+ */
+ if (psRPMDevMemNode->sRPMPageListNode.eNodeType != NODE_EMPTY)
+ {
+ /* unmap the RPM page table backing pages */
+ PVR_DPF((PVR_DBG_MESSAGE, "Removing RPM page list node"));
+ PVR_ASSERT(psRPMDevMemNode->sRPMPageListNode.psPMR);
+ eError = _RGXUnmapRPMPBBlock(&psRPMDevMemNode->sRPMPageListNode,
+ psRPMDevMemNode->psFreeList,
+ psFreeList->psParentCtx->sRPMPageTableBaseAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXShrinkRPMFreeList: Failed to unmap %d pages with mapping index %d (status %d)",
+ psRPMDevMemNode->sRPMPageListNode.ui32NumPhysPages,
+ psRPMDevMemNode->sRPMPageListNode.ui32StartOfMappingIndex,
+ eError));
+ goto UnMapError;
+ }
+ }
+
+ if (psRPMDevMemNode->sRPMFreeListNode.eNodeType != NODE_EMPTY)
+ {
+ /* unmap the RPM free page list backing pages */
+ PVR_DPF((PVR_DBG_MESSAGE, "Removing RPM free list node"));
+ PVR_ASSERT(psRPMDevMemNode->sRPMFreeListNode.psPMR);
+ eError = _RGXUnmapRPMPBBlock(&psRPMDevMemNode->sRPMFreeListNode,
+ psRPMDevMemNode->psFreeList,
+ psFreeList->sBaseDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXShrinkRPMFreeList: Failed to unmap %d pages with mapping index %d (status %d)",
+ psRPMDevMemNode->sRPMFreeListNode.ui32NumPhysPages,
+ psRPMDevMemNode->sRPMFreeListNode.ui32StartOfMappingIndex,
+ eError));
+ goto UnMapError;
+ }
+ }
+
+ /* update available RPM pages in freelist (NOTE: may be different from phys page count) */
+ ui32OldValue = psFreeList->ui32CurrentFLPages;
+ psFreeList->ui32CurrentFLPages -= psRPMDevMemNode->ui32NumPages;
+
+ /* check underflow */
+ PVR_ASSERT(ui32OldValue > psFreeList->ui32CurrentFLPages);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p, ID %d]: shrink by %u pages (current pages %u/%u)",
+ psFreeList,
+ psFreeList->ui32FreelistID,
+ psRPMDevMemNode->ui32NumPages,
+ psFreeList->ui32CurrentFLPages,
+ psFreeList->psParentCtx->ui32UnallocatedPages));
+
+ OSFreeMem(psRPMDevMemNode);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,"Freelist [0x%p]: shrink denied. PB already at zero PB size (%u pages)",
+ psFreeList,
+ psFreeList->ui32CurrentFLPages));
+ eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN;
+ }
+
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+ return PVRSRV_OK;
+
+UnMapError:
+ OSFreeMem(psRPMDevMemNode);
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*!
+ * _RGXCreateRPMSparsePMR
+ *
+ * Creates a PMR container with no phys pages initially. Phys pages will be allocated
+ * and mapped later when requested by client or by HW RPM Out of Memory event.
+ * The PMR is created with zero phys backing pages.
+ * The sparse PMR is associated to either the RPM context or to the RPM freelist(s):
+ *
+ * RGX_SERVER_RPM_CONTEXT - Scene hierarchy, page table
+ * RGX_RPM_FREELIST - free page list PMR
+ *
+ * @param eBlockType - whether block is for scene hierarchy pages or page
+ * tables. This parameter is used to calculate size.
+ * @param ui32NumPages - total number of pages
+ * @param uiLog2DopplerPageSize - log2 Doppler/RPM page size
+ * @param ppsPMR - (Output) new PMR container.
+ *
+ * See the documentation for more details.
+ */
+static
+PVRSRV_ERROR _RGXCreateRPMSparsePMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_DEVMEM_NODE_TYPE eBlockType,
+ IMG_UINT32 ui32NumPages,
+ IMG_UINT32 uiLog2DopplerPageSize,
+ PMR **ppsPMR)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEVMEM_SIZE_T uiMaxSize = 0;
+ IMG_UINT32 ui32NumVirtPages = 0; /*!< number of virtual pages to cover virtual range */
+ IMG_UINT32 ui32Log2OSPageSize = OSGetPageShift();
+ IMG_UINT32 ui32ChunkSize = OSGetPageSize();
+ PVRSRV_MEMALLOCFLAGS_T uiCustomFlags = 0;
+
+ /* Work out the allocation logical size = virtual size */
+ switch(eBlockType)
+ {
+ case NODE_EMPTY:
+ PVR_ASSERT(IMG_FALSE);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ case NODE_SCENE_HIERARCHY:
+ PDUMPCOMMENT("Allocate Scene Hierarchy PMR (Pages %08X)", ui32NumPages);
+ uiMaxSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * (1 << uiLog2DopplerPageSize);
+ break;
+ case NODE_RPM_PAGE_TABLE:
+ PDUMPCOMMENT("Allocate RPM Page Table PMR (Page entries %08X)", ui32NumPages);
+ uiMaxSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_PAGE_TABLE);
+ break;
+ case NODE_RPM_FREE_PAGE_LIST:
+ /*
+ * Each RPM free page list (FPL) supports the maximum range.
+ * In practise the maximum range is divided between allocations in each FPL
+ */
+ PDUMPCOMMENT("Allocate RPM Free Page List PMR (Page entries %08X)", ui32NumPages);
+ uiMaxSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+ uiCustomFlags |= PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; /*(PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); */
+ break;
+ /* no default case because the build should error out if a case is unhandled */
+ }
+
+ uiMaxSize = (uiMaxSize + ui32ChunkSize - 1) & ~(ui32ChunkSize - 1);
+ ui32NumVirtPages = uiMaxSize >> ui32Log2OSPageSize;
+
+ eError = PhysmemNewRamBackedPMR(psConnection,
+ psDeviceNode,
+ uiMaxSize, /* the maximum size which should match num virtual pages * page size */
+ ui32ChunkSize,
+ 0,
+ ui32NumVirtPages,
+ NULL,
+ ui32Log2OSPageSize,
+ (PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | uiCustomFlags),
+ strlen("RPM Buffer") + 1,
+ "RPM Buffer",
+ ppsPMR);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "_RGXCreateRPMSparsePMR: Failed to allocate sparse PMR of size: 0x%016llX",
+ (IMG_UINT64)uiMaxSize));
+ }
+
+ return eError;
+}
+
+/*!
+ * _RGXMapRPMPBBlock
+ *
+ * Maps in a block of phys pages for one of the following:
+ *
+ * NODE_SCENE_HIERARCHY - scene hierarchy
+ * NODE_RPM_PAGE_TABLE - RPM page table entries
+ * NODE_RPM_FREE_PAGE_LIST - RPM free page list entries
+ *
+ * @param psDevMemNode - device mem block descriptor (allocated by caller)
+ * @param psFreeList - free list descriptor
+ * @param eBlockType - block type: scene memory, RPM page table or RPM page free list
+ * @param psDevmemHeap - heap for GPU virtual mapping
+ * @param ui32NumPages - number of pages for scene memory, OR
+ * number of PT entries for RPM page table or page free list
+ * @param sDevVAddrBase - GPU virtual base address i.e. base address at start of sparse allocation
+ *
+ * @return PVRSRV_OK if no error occurred
+ */
+static
+PVRSRV_ERROR _RGXMapRPMPBBlock(RGX_DEVMEM_NODE *psDevMemNode,
+ RGX_RPM_FREELIST *psFreeList,
+ RGX_DEVMEM_NODE_TYPE eBlockType,
+ DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT64 sCpuVAddrNULL = 0; /* no CPU mapping needed */
+ IMG_UINT32 *paui32AllocPageIndices; /* table of virtual indices for sparse mapping */
+ IMG_PUINT32 pui32MappingIndex = NULL; /* virtual index where next physical chunk is mapped */
+ IMG_UINT32 i;
+ size_t uiSize = 0;
+ IMG_UINT32 ui32Log2OSPageSize = OSGetPageShift();
+ IMG_UINT32 ui32ChunkSize = OSGetPageSize();
+ IMG_UINT32 ui32NumPhysPages = 0; /*!< number of physical pages for data pages or RPM PTs */
+ PVRSRV_MEMALLOCFLAGS_T uiCustomFlags = 0;
+
+
+ /* Allocate Memory Block for scene hierarchy */
+ switch(eBlockType)
+ {
+ case NODE_EMPTY:
+ PVR_ASSERT(IMG_FALSE);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ case NODE_SCENE_HIERARCHY:
+ PDUMPCOMMENT("Allocate Scene Hierarchy Block (Pages %08X)", ui32NumPages);
+ uiSize = (size_t)ui32NumPages * (1 << psFreeList->psParentCtx->uiLog2DopplerPageSize);
+ pui32MappingIndex = &psFreeList->psParentCtx->ui32SceneMemorySparseMappingIndex;
+ break;
+ case NODE_RPM_PAGE_TABLE:
+ PDUMPCOMMENT("Allocate RPM Page Table Block (Page entries %08X)", ui32NumPages);
+ uiSize = (size_t)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_PAGE_TABLE);
+ pui32MappingIndex = &psFreeList->psParentCtx->ui32RPMPageTableSparseMappingIndex;
+ break;
+ case NODE_RPM_FREE_PAGE_LIST:
+ PDUMPCOMMENT("Allocate RPM Free Page List Block (Page entries %08X)", ui32NumPages);
+ uiSize = (size_t)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+ pui32MappingIndex = &psFreeList->ui32RPMFreeListSparseMappingIndex;
+ uiCustomFlags |= PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; /*(PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE);*/
+ break;
+ /* no default case because the build should error out if a case is unhandled */
+ }
+
+ /*
+ * Round size up to multiple of the sparse chunk size = OS page size.
+ */
+ uiSize = (uiSize + ui32ChunkSize - 1) & ~(ui32ChunkSize - 1);
+ ui32NumPhysPages = uiSize >> ui32Log2OSPageSize;
+
+ paui32AllocPageIndices = OSAllocMem(ui32NumPhysPages * sizeof(IMG_UINT32));
+ if (paui32AllocPageIndices == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: failed to allocate sparse mapping index list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+ for(i=0; i<ui32NumPhysPages; i++)
+ {
+ paui32AllocPageIndices[i] = *pui32MappingIndex + i;
+ }
+
+ /* Set up some state */
+ psDevMemNode->eNodeType = eBlockType;
+ psDevMemNode->psDevMemHeap = psDevmemHeap;
+ if (eBlockType == NODE_SCENE_HIERARCHY)
+ {
+ /* the mapped-in scene hierarchy device address will be used to set up the FPL entries */
+ psDevMemNode->sAddr.uiAddr = sDevVAddrBase.uiAddr + (*pui32MappingIndex * ui32ChunkSize);
+ }
+ psDevMemNode->ui32NumPhysPages = ui32NumPhysPages;
+ psDevMemNode->ui32StartOfMappingIndex = *pui32MappingIndex;
+
+ {
+ if ((eBlockType == NODE_SCENE_HIERARCHY) &&
+ (ui32NumPhysPages > psFreeList->psParentCtx->ui32UnallocatedPages))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: virtual address space exceeded (0x%x pages required, 0x%x pages available).",
+ ui32NumPhysPages, psFreeList->psParentCtx->ui32UnallocatedPages));
+ OSFreeMem(paui32AllocPageIndices);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = PMRLockSysPhysAddresses(psDevMemNode->psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: unable to lock PMR physical pages (status %d)", eError));
+ goto ErrorLockPhys;
+ }
+
+ eError = DevmemIntChangeSparse(psDevmemHeap,
+ psDevMemNode->psPMR,
+ ui32NumPhysPages,
+ paui32AllocPageIndices,
+ 0,
+ NULL,
+ SPARSE_RESIZE_ALLOC,
+ (PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | uiCustomFlags),
+ sDevVAddrBase,
+ sCpuVAddrNULL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: change sparse mapping failed with %d pages starting at %d (status %d)",
+ ui32NumPhysPages, *pui32MappingIndex, eError));
+ goto ErrorSparseMapping;
+ }
+
+ /* FIXME: leave locked until destroy */
+ PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+ }
+
+ /*
+ * Update the mapping index for the next allocation.
+ * The virtual pages should be contiguous.
+ */
+ *pui32MappingIndex += ui32NumPhysPages;
+
+ OSFreeMem(paui32AllocPageIndices);
+
+ return PVRSRV_OK;
+
+ErrorSparseMapping:
+ PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+
+ErrorLockPhys:
+ OSFreeMem(paui32AllocPageIndices);
+
+ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*!
+ * _RGXUnmapRPMPBBlock
+ *
+ * NOTE: because the SHF and SHG requests for memory are interleaved, the
+ * page mapping offset cannot be updated (non-contiguous virtual mapping
+ * is not supported).
+ *
+ * So either
+ * (i) the allocated virtual address range is unusable after unmap
+ * (ii) all of the scene memory must be freed
+ *
+ * @param psDevMemNode - block to free
+ * @param psFreeList - RPM free list
+ * @param sDevVAddrBase - the virtual base address (i.e. where page 1 of the PMR is mapped)
+ */
+static
+PVRSRV_ERROR _RGXUnmapRPMPBBlock(RGX_DEVMEM_NODE *psDevMemNode,
+ RGX_RPM_FREELIST *psFreeList,
+ IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT64 sCpuVAddrNULL = 0; /* no CPU mapping needed */
+ IMG_UINT32 *paui32FreePageIndices; /* table of virtual indices for sparse unmapping */
+ IMG_UINT32 i;
+ IMG_UINT32 ui32NumPhysPages = psDevMemNode->ui32NumPhysPages; /*!< number of physical pages for data pages or RPM PTs */
+
+#if defined(PDUMP)
+ /* Free Memory Block for scene hierarchy */
+ switch(psDevMemNode->eNodeType)
+ {
+ case NODE_EMPTY:
+ PVR_ASSERT(IMG_FALSE);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ case NODE_SCENE_HIERARCHY:
+ PDUMPCOMMENT("Free Scene Hierarchy Block (Pages %08X)", ui32NumPhysPages);
+ break;
+ case NODE_RPM_PAGE_TABLE:
+ PDUMPCOMMENT("Free RPM Page Table Block (Page entries %08X)", ui32NumPhysPages);
+ break;
+ case NODE_RPM_FREE_PAGE_LIST:
+ PDUMPCOMMENT("Free RPM Free Page List Block (Page entries %08X)", ui32NumPhysPages);
+ break;
+ /* no default case because the build should error out if a case is unhandled */
+ }
+#endif
+
+ paui32FreePageIndices = OSAllocMem(ui32NumPhysPages * sizeof(IMG_UINT32));
+ if (paui32FreePageIndices == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXUnmapRPMPBBlock: failed to allocate sparse mapping index list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+ for(i=0; i<ui32NumPhysPages; i++)
+ {
+ paui32FreePageIndices[i] = psDevMemNode->ui32StartOfMappingIndex + i;
+ }
+
+ {
+ eError = PMRLockSysPhysAddresses(psDevMemNode->psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXUnmapRPMPBBlock: unable to lock PMR physical pages (status %d)", eError));
+ goto ErrorLockPhys;
+ }
+
+ eError = DevmemIntChangeSparse(psDevMemNode->psDevMemHeap,
+ psDevMemNode->psPMR,
+ 0, /* no pages are mapped here */
+ NULL,
+ ui32NumPhysPages,
+ paui32FreePageIndices,
+ SPARSE_RESIZE_FREE,
+ (PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE),
+ sDevVAddrBase,
+ sCpuVAddrNULL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXUnmapRPMPBBlock: free sparse mapping failed with %d pages starting at %d (status %d)",
+ ui32NumPhysPages, psDevMemNode->ui32StartOfMappingIndex, eError));
+ goto ErrorSparseMapping;
+ }
+
+ PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+ }
+
+ OSFreeMem(paui32FreePageIndices);
+
+ return PVRSRV_OK;
+
+ErrorSparseMapping:
+ PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+
+ErrorLockPhys:
+ OSFreeMem(paui32FreePageIndices);
+
+ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*!
+ * RGXCreateRPMFreeList
+ *
+ * @param ui32InitFLPages - initial allocation of mapped-in physical pages
+ * @param ui32GrowFLPages - physical pages to add to scene hierarchy if RPM OOM occurs
+ * @param sFreeListDevVAddr - virtual base address of free list
+ * @param sRPMPageListDevVAddr (DEPRECATED -- cached in RPM Context)
+ * @param ui32FLSyncAddr (DEPRECATED)
+ * @param ppsFreeList - returns a RPM freelist handle to client
+ * @param puiHWFreeList - 'handle' to FW freelist, passed in VRDM kick (FIXME)
+ * @param bIsExternal - flag which marks if the freelist is an external one
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRPMFreeList(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_RPM_CONTEXT *psRPMContext,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32GrowFLPages,
+ IMG_DEV_VIRTADDR sFreeListDevVAddr,
+ RGX_RPM_FREELIST **ppsFreeList,
+ IMG_UINT32 *puiHWFreeList,
+ IMG_BOOL bIsExternal)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_RPM_FREELIST *psFWRPMFreeList;
+ DEVMEM_MEMDESC *psFWRPMFreelistMemDesc;
+ RGX_RPM_FREELIST *psFreeList;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ /* Allocate kernel freelist struct */
+ psFreeList = OSAllocZMem(sizeof(*psFreeList));
+ if (psFreeList == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMFreeList: failed to allocate host data structure"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psFreeList->psCleanupSync,
+ "RPM free list cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateRPMFreeList: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto ErrorSyncAlloc;
+ }
+
+ /*
+ * This FW FreeList context is only mapped into kernel for initialisation.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+ *
+ * TODO - RPM freelist will be modified after creation, but only from host-side.
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFWRPMFreeList),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+ "FwRPMFreeList",
+ &psFWRPMFreelistMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMFreeList: DevmemAllocate for RGXFWIF_FREELIST failed"));
+ goto ErrorFWFreeListAlloc;
+ }
+
+ /* Initialise host data structures */
+ psFreeList->psConnection = psConnection;
+ psFreeList->psDevInfo = psDevInfo;
+ psFreeList->psParentCtx = psRPMContext;
+ psFreeList->psFWFreelistMemDesc = psFWRPMFreelistMemDesc;
+ psFreeList->sBaseDevVAddr = sFreeListDevVAddr;
+ RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWRPMFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+ psFreeList->ui32FreelistID = psDevInfo->ui32RPMFreelistCurrID++;
+ //psFreeList->ui32MaxFLPages = ui32MaxFLPages;
+ /* TODO: is it really needed? */
+ if(bIsExternal == IMG_FALSE)
+ {
+ psFreeList->ui32InitFLPages = ui32InitFLPages;
+ psFreeList->ui32GrowFLPages = ui32GrowFLPages;
+ }
+ //psFreeList->ui32CurrentFLPages = ui32InitFLPages;
+ psFreeList->ui32RefCount = 0;
+ dllist_init(&psFreeList->sMemoryBlockHead);
+
+ /* Wizard2 -- support per-freelist Doppler virtual page size */
+ psFreeList->uiLog2DopplerPageSize = psRPMContext->uiLog2DopplerPageSize;
+
+ /* Initialise FW data structure */
+ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWRPMFreeList);
+ PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", ErrorFWFreeListCpuMap);
+
+ /*
+ * FIXME - the max pages are shared with the other freelists so this
+ * over-estimates the number of free pages. The full check is
+ * implemented in RGXGrowRPMFreeList.
+ */
+ if(bIsExternal == IMG_TRUE)
+ {
+ /* An external RPM FreeList will never grow */
+ psFWRPMFreeList->ui32MaxPages = ui32InitFLPages;
+ }
+ else
+ {
+ psFWRPMFreeList->ui32MaxPages = psFreeList->psParentCtx->ui32TotalRPMPages;
+ }
+ psFWRPMFreeList->ui32CurrentPages = ui32InitFLPages;
+ psFWRPMFreeList->ui32GrowPages = ui32GrowFLPages;
+ psFWRPMFreeList->ui32ReadOffset = 0;
+ psFWRPMFreeList->ui32WriteOffset = RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN; /* FL is full */
+ psFWRPMFreeList->bReadToggle = IMG_FALSE;
+ psFWRPMFreeList->bWriteToggle = IMG_TRUE;
+ psFWRPMFreeList->sFreeListDevVAddr.uiAddr = sFreeListDevVAddr.uiAddr;
+ psFWRPMFreeList->ui32FreeListID = psFreeList->ui32FreelistID;
+ psFWRPMFreeList->bGrowPending = IMG_FALSE;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "RPM Freelist %p created: FW freelist: %p, Init pages 0x%08x, Max FL base address " IMG_DEVMEM_SIZE_FMTSPEC ", Init FL base address " IMG_DEVMEM_SIZE_FMTSPEC,
+ psFreeList,
+ psFWRPMFreeList,
+ ui32InitFLPages,
+ sFreeListDevVAddr.uiAddr,
+ psFWRPMFreeList->sFreeListDevVAddr.uiAddr));
+
+ PVR_DPF((PVR_DBG_MESSAGE,"RPM FW Freelist %p created: sync FW addr 0x%08x", psFWRPMFreeList, psFWRPMFreeList->sSyncAddr));
+
+ PDUMPCOMMENT("Dump FW RPM FreeList");
+ DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWRPMFreeList), PDUMP_FLAGS_CONTINUOUS);
+
+ /*
+ * Separate dump of the Freelist's number of Pages and stack pointer.
+ * This allows to easily modify the PB size in the out2.txt files.
+ */
+ PDUMPCOMMENT("RPM FreeList TotalPages");
+ DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+ offsetof(RGXFWIF_RPM_FREELIST, ui32CurrentPages),
+ psFWRPMFreeList->ui32CurrentPages,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("RPM FreeList device virtual base address");
+ DevmemPDumpLoadMemValue64(psFreeList->psFWFreelistMemDesc,
+ offsetof(RGXFWIF_RPM_FREELIST, sFreeListDevVAddr),
+ psFWRPMFreeList->sFreeListDevVAddr.uiAddr,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+ if (bIsExternal == IMG_TRUE)
+ {
+ /* Mark the freelist as an external */
+ psFreeList->bIsExternal = IMG_TRUE;
+
+ /* In case of an external RPM FreeList it is not needed to:
+ * - create sparse PMR
+ * - allocate physical memory for the freelist
+ * - add it to the list of freelist
+ */
+
+ /* return values */
+ *puiHWFreeList = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+ *ppsFreeList = psFreeList;
+
+ return PVRSRV_OK;
+ }
+
+ psFreeList->bIsExternal = IMG_FALSE;
+
+ /*
+ * Create the sparse PMR for the RPM free page list
+ */
+ eError = _RGXCreateRPMSparsePMR(psConnection, psDeviceNode,
+ NODE_RPM_FREE_PAGE_LIST,
+ psRPMContext->ui32TotalRPMPages,
+ psRPMContext->uiLog2DopplerPageSize,
+ &psFreeList->psFreeListPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate PMR for RPM Free page list (%d)", eError));
+ goto ErrorSparsePMR;
+ }
+
+ /*
+ * Lock protects simultaneous manipulation of:
+ * - the memory block list
+ * - the freelist's ui32CurrentFLPages
+ */
+ /* Add to list of freelists */
+ OSLockAcquire(psDevInfo->hLockRPMFreeList);
+ psFreeList->psParentCtx->uiFLRefCount++;
+ dllist_add_to_tail(&psDevInfo->sRPMFreeListHead, &psFreeList->sNode);
+ OSLockRelease(psDevInfo->hLockRPMFreeList);
+
+ /*
+ * Add initial scene hierarchy block
+ * Allocate phys memory for scene hierarchy, free page list and RPM page-in-use list
+ */
+ eError = RGXGrowRPMFreeList(psFreeList, ui32InitFLPages, &psFreeList->sMemoryBlockHead);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMFreeList: error during phys memory allocation and mapping (%d)", eError));
+ goto ErrorGrowFreeList;
+ }
+
+ /* return values */
+ *puiHWFreeList = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+ *ppsFreeList = psFreeList;
+
+ return PVRSRV_OK;
+
+ /* Error handling */
+ErrorGrowFreeList:
+ /* Remove freelists from list */
+ OSLockAcquire(psDevInfo->hLockRPMFreeList);
+ dllist_remove_node(&psFreeList->sNode);
+ psFreeList->psParentCtx->uiFLRefCount--;
+ OSLockRelease(psDevInfo->hLockRPMFreeList);
+
+ErrorSparsePMR:
+ SyncPrimFree(psFreeList->psCleanupSync);
+
+ErrorFWFreeListCpuMap:
+ RGXUnsetFirmwareAddress(psFWRPMFreelistMemDesc);
+ DevmemFwFree(psDevInfo, psFWRPMFreelistMemDesc);
+
+ErrorFWFreeListAlloc:
+ PMRUnrefPMR(psFreeList->psFreeListPMR);
+
+ErrorSyncAlloc:
+ OSFreeMem(psFreeList);
+
+ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ * RGXDestroyRPMFreeList
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRPMFreeList(RGX_RPM_FREELIST *psFreeList)
+{
+ PVRSRV_ERROR eError;
+ //IMG_UINT64 ui64CheckSum;
+
+ PVR_ASSERT(psFreeList);
+
+ if(psFreeList->ui32RefCount != 0 && psFreeList->bIsExternal == IMG_FALSE)
+ {
+ /* Freelist still busy */
+ PVR_DPF((PVR_DBG_WARNING, "Freelist %p is busy", psFreeList));
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ /* Freelist is not in use => start firmware cleanup */
+ eError = RGXFWRequestRPMFreeListCleanUp(psFreeList->psDevInfo,
+ psFreeList->sFreeListFWDevVAddr,
+ psFreeList->psCleanupSync);
+ if(eError != PVRSRV_OK)
+ {
+ /* Can happen if the firmware took too long to handle the cleanup request,
+ * or if SLC-flushes didn't went through (due to some GPU lockup) */
+ return eError;
+ }
+
+ /* update the statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PVRSRVStatsUpdateFreelistStats(psFreeList->ui32NumGrowReqByApp,
+ psFreeList->ui32NumGrowReqByFW,
+ psFreeList->ui32InitFLPages,
+ psFreeList->ui32NumHighPages,
+ 0); /* FIXME - owner PID */
+#endif
+
+ /* Destroy FW structures */
+ RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc);
+ DevmemFwFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc);
+
+ if(psFreeList->bIsExternal == IMG_FALSE)
+ {
+ /* Free the phys mem block descriptors. */
+ PVR_DPF((PVR_DBG_WARNING, "Cleaning RPM freelist index %d", psFreeList->ui32FreelistID));
+ while (!dllist_is_empty(&psFreeList->sMemoryBlockHead))
+ {
+ eError = RGXShrinkRPMFreeList(&psFreeList->sMemoryBlockHead, psFreeList);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ psFreeList->psParentCtx->uiFLRefCount--;
+
+ /* consistency checks */
+ PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockHead));
+ PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0);
+
+ /* Free RPM Free page list PMR */
+ eError = PMRUnrefPMR(psFreeList->psFreeListPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXDestroyRPMFreeList: Failed to free RPM free page list PMR %p (error %u)",
+ psFreeList->psFreeListPMR,
+ eError));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+ /* Remove RPM FreeList from list */
+ OSLockAcquire(psFreeList->psDevInfo->hLockRPMFreeList);
+ dllist_remove_node(&psFreeList->sNode);
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+ }
+
+ SyncPrimFree(psFreeList->psCleanupSync);
+
+ /* free Freelist */
+ OSFreeMem(psFreeList);
+
+ return eError;
+}
+
+
+/*!
+ * RGXAddBlockToRPMFreeListKM
+ *
+ * NOTE: This API isn't used but it's provided for symmetry with the parameter
+ * management API.
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXAddBlockToRPMFreeListKM(RGX_RPM_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumPages)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if we have reference to freelist's PMR */
+ if (psFreeList->psFreeListPMR == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RPM Freelist is not configured for grow"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* grow freelist */
+ eError = RGXGrowRPMFreeList(psFreeList,
+ ui32NumPages,
+ &psFreeList->sMemoryBlockHead);
+ if(eError == PVRSRV_OK)
+ {
+ /* update freelist data in firmware */
+ _UpdateFwRPMFreelistSize(psFreeList, IMG_TRUE, IMG_TRUE, ui32NumPages);
+
+ psFreeList->ui32NumGrowReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* Update Stats */
+ PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+ 0,
+ psFreeList->ui32InitFLPages,
+ psFreeList->ui32NumHighPages,
+ psFreeList->ownerPid);
+
+#endif
+ }
+
+ return eError;
+}
+
+
+/*
+ * RGXCreateRPMContext
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRPMContext(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_RPM_CONTEXT **ppsRPMContext,
+ IMG_UINT32 ui32TotalRPMPages,
+ IMG_UINT32 uiLog2DopplerPageSize,
+ IMG_DEV_VIRTADDR sSceneMemoryBaseAddr,
+ IMG_DEV_VIRTADDR sDopplerHeapBaseAddr,
+ DEVMEMINT_HEAP *psSceneHeap,
+ IMG_DEV_VIRTADDR sRPMPageTableBaseAddr,
+ DEVMEMINT_HEAP *psRPMPageTableHeap,
+ DEVMEM_MEMDESC **ppsMemDesc,
+ IMG_UINT32 *puiHWFrameData)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ //DEVMEM_MEMDESC *psFWRPMContextMemDesc;
+ RGX_SERVER_RPM_CONTEXT *psRPMContext;
+ RGXFWIF_RAY_FRAME_DATA *psFrameData;
+ RGXFWIF_DEV_VIRTADDR sFirmwareAddr;
+
+ /* Allocate kernel RPM context */
+ psRPMContext = OSAllocZMem(sizeof(*psRPMContext));
+ if (psRPMContext == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate host data structure"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+
+ *ppsRPMContext = psRPMContext;
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psRPMContext->psCleanupSync,
+ "RPM context cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateRPMContext: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto ErrorSyncAlloc;
+ }
+
+ /*
+ * 1. Create the sparse PMR for scene hierarchy
+ */
+ eError = _RGXCreateRPMSparsePMR(psConnection, psDeviceNode,
+ NODE_SCENE_HIERARCHY,
+ ui32TotalRPMPages,
+ uiLog2DopplerPageSize,
+ &psRPMContext->psSceneHierarchyPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate PMR for Scene hierarchy (%d)", eError));
+ goto ErrorSparsePMR1;
+ }
+
+ /*
+ * 2. Create the sparse PMR for the RPM page list
+ */
+ eError = _RGXCreateRPMSparsePMR(psConnection, psDeviceNode,
+ NODE_RPM_PAGE_TABLE,
+ ui32TotalRPMPages,
+ uiLog2DopplerPageSize,
+ &psRPMContext->psRPMPageTablePMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate PMR for RPM Page list (%d)", eError));
+ goto ErrorSparsePMR2;
+ }
+
+ /* Allocate FW structure and return FW address to client */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFrameData),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+ "FwRPMContext",
+ ppsMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: DevmemAllocate for RGXFWIF_FREELIST failed"));
+ goto ErrorFWRPMContextAlloc;
+ }
+
+ /* Update the unallocated pages, which are shared between the RPM freelists */
+ psRPMContext->ui32UnallocatedPages = psRPMContext->ui32TotalRPMPages = ui32TotalRPMPages;
+ psRPMContext->psDeviceNode = psDeviceNode;
+ psRPMContext->psFWRPMContextMemDesc = *ppsMemDesc;
+ psRPMContext->uiLog2DopplerPageSize = uiLog2DopplerPageSize;
+
+ /* Cache the virtual alloc state for future phys page mapping */
+ psRPMContext->sDopplerHeapBaseAddr = sDopplerHeapBaseAddr;
+ psRPMContext->sSceneMemoryBaseAddr = sSceneMemoryBaseAddr;
+ psRPMContext->psSceneHeap = psSceneHeap;
+ psRPMContext->sRPMPageTableBaseAddr = sRPMPageTableBaseAddr;
+ psRPMContext->psRPMPageTableHeap = psRPMPageTableHeap;
+
+ /*
+ * TODO - implement RPM abort control using HW frame data to track
+ * abort status in RTU.
+ */
+ RGXSetFirmwareAddress(&sFirmwareAddr, *ppsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+ *puiHWFrameData = sFirmwareAddr.ui32Addr;
+
+ //eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, (void **)&psFrameData);
+ //PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", ErrorFrameDataCpuMap);
+
+ /*
+ * TODO: pdumping
+ */
+
+
+ return PVRSRV_OK;
+
+ErrorFWRPMContextAlloc:
+ PMRUnrefPMR(psRPMContext->psRPMPageTablePMR);
+
+ErrorSparsePMR2:
+ PMRUnrefPMR(psRPMContext->psSceneHierarchyPMR);
+
+ErrorSparsePMR1:
+ SyncPrimFree(psRPMContext->psCleanupSync);
+
+ErrorSyncAlloc:
+ OSFreeMem(psRPMContext);
+
+ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ * RGXDestroyRPMContext
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRPMContext(RGX_SERVER_RPM_CONTEXT *psCleanupData)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ PRGXFWIF_RAY_FRAME_DATA psFrameData;
+
+ /* Wait for FW to process all commands */
+
+ PVR_ASSERT(psCleanupData);
+
+ RGXSetFirmwareAddress(&psFrameData, psCleanupData->psFWRPMContextMemDesc, 0, RFW_FWADDR_NOREF_FLAG);
+
+ /* Cleanup frame data in SHG */
+ eError = RGXFWRequestRayFrameDataCleanUp(psCleanupData->psDeviceNode,
+ psFrameData,
+ psCleanupData->psCleanupSync,
+ RGXFWIF_DM_SHG);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "FrameData busy in SHG"));
+ return eError;
+ }
+
+ psDevInfo = psCleanupData->psDeviceNode->pvDevice;
+
+ /* Cleanup frame data in RTU */
+ eError = RGXFWRequestRayFrameDataCleanUp(psCleanupData->psDeviceNode,
+ psFrameData,
+ psCleanupData->psCleanupSync,
+ RGXFWIF_DM_RTU);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "FrameData busy in RTU"));
+ return eError;
+ }
+
+ /* Free Scene hierarchy PMR (We should be the only one that holds a ref on the PMR) */
+ eError = PMRUnrefPMR(psCleanupData->psSceneHierarchyPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXDestroyRPMContext: Failed to free scene hierarchy PMR %p (error %u)",
+ psCleanupData->psSceneHierarchyPMR,
+ eError));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+ /* Free RPM Page list PMR */
+ eError = PMRUnrefPMR(psCleanupData->psRPMPageTablePMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXDestroyRPMContext: Failed to free RPM page list PMR %p (error %u)",
+ psCleanupData->psRPMPageTablePMR,
+ eError));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+ if (psCleanupData->uiFLRefCount > 0)
+ {
+ /* Kernel RPM freelists hold reference to RPM context */
+ PVR_DPF((PVR_DBG_WARNING, "RGXDestroyRPMContext: Free list ref count non-zero."));
+ return PVRSRV_ERROR_NONZERO_REFCOUNT;
+ }
+
+ /* If we got here then SHG and RTU operations on this FrameData have finished */
+ SyncPrimFree(psCleanupData->psCleanupSync);
+
+ /* Free the FW RPM descriptor */
+ RGXUnsetFirmwareAddress(psCleanupData->psFWRPMContextMemDesc);
+ DevmemFwFree(psDevInfo, psCleanupData->psFWRPMContextMemDesc);
+
+ OSFreeMem(psCleanupData);
+
+ return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVRGXCreateRayContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sMCUFenceAddr,
+ IMG_DEV_VIRTADDR sVRMCallStackAddr,
+ IMG_UINT32 ui32FrameworkRegisterSize,
+ IMG_PBYTE pabyFrameworkRegisters,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_RAY_CONTEXT **ppsRayContext)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_SERVER_RAY_CONTEXT *psRayContext;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_COMMON_CONTEXT_INFO sInfo;
+ RGXFWIF_FWRAYCONTEXT *pFWRayContext;
+ IMG_UINT32 i;
+
+ /* Prepare cleanup structure */
+ *ppsRayContext= NULL;
+ psRayContext = OSAllocZMem(sizeof(*psRayContext));
+ if (psRayContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRayContext->psDeviceNode = psDeviceNode;
+
+ /*
+ Allocate device memory for the firmware ray context.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware ray context");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_FWRAYCONTEXT),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwRayContext",
+ &psRayContext->psFWRayContextMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate firmware ray context (%u)",
+ eError));
+ goto fail_fwraycontext;
+ }
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psRayContext->psCleanupSync,
+ "Ray context cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, &psRayContext->psFWFrameworkMemDesc, ui32FrameworkRegisterSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate firmware GPU framework state (%u)",
+ eError));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psRayContext->psFWFrameworkMemDesc, pabyFrameworkRegisters, ui32FrameworkRegisterSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to populate the framework buffer (%u)",
+ eError));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psRayContext->psFWFrameworkMemDesc;
+ sInfo.psMCUFenceAddr = &sMCUFenceAddr;
+
+ eError = _CreateSHContext(psConnection,
+ psDeviceNode,
+ psRayContext->psFWRayContextMemDesc,
+ offsetof(RGXFWIF_FWRAYCONTEXT, sSHGContext),
+ psFWMemContextMemDesc,
+ sVRMCallStackAddr,
+ ui32Priority,
+ &sInfo,
+ &psRayContext->sSHData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_shcontext;
+ }
+
+ eError = _CreateRSContext(psConnection,
+ psDeviceNode,
+ psRayContext->psFWRayContextMemDesc,
+ offsetof(RGXFWIF_FWRAYCONTEXT, sRTUContext),
+ psFWMemContextMemDesc,
+ ui32Priority,
+ &sInfo,
+ &psRayContext->sRSData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_rscontext;
+ }
+
+ /*
+ Temporarily map the firmware context to the kernel and init it
+ */
+ eError = DevmemAcquireCpuVirtAddr(psRayContext->psFWRayContextMemDesc,
+ (void **)&pFWRayContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware %s ray context to CPU",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_rscontext;
+ }
+
+
+ for (i = 0; i < DPX_MAX_RAY_CONTEXTS; i++)
+ {
+ /* Allocate the frame context client CCB */
+ eError = RGXCreateCCB(psDevInfo,
+ RGX_RTU_CCB_SIZE_LOG2,
+ psConnection,
+ REQ_TYPE_FC0 + i,
+ psRayContext->sRSData.psServerCommonContext,
+ &psRayContext->sRSData.psFCClientCCB[i],
+ &psRayContext->sRSData.psFCClientCCBMemDesc[i],
+ &psRayContext->sRSData.psFCClientCCBCtrlMemDesc[i]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to create CCB for frame context %u (%s)",
+ __FUNCTION__,
+ i,
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_rscontext;
+ }
+
+ /* Set the firmware CCB device addresses in the firmware common context */
+ RGXSetFirmwareAddress(&pFWRayContext->psCCB[i],
+ psRayContext->sRSData.psFCClientCCBMemDesc[i],
+ 0, RFW_FWADDR_FLAG_NONE);
+ RGXSetFirmwareAddress(&pFWRayContext->psCCBCtl[i],
+ psRayContext->sRSData.psFCClientCCBCtrlMemDesc[i],
+ 0, RFW_FWADDR_FLAG_NONE);
+ }
+
+ pFWRayContext->ui32ActiveFCMask = 0;
+ pFWRayContext->ui32NextFC = RGXFWIF_INVALID_FRAME_CONTEXT;
+
+ /* We've finished the setup so release the CPU mapping */
+ DevmemReleaseCpuVirtAddr(psRayContext->psFWRayContextMemDesc);
+
+ /*
+ As the common context alloc will dump the SH and RS common contexts
+ after the've been setup we skip of the 2 common contexts and dump the
+ rest of the structure
+ */
+ PDUMPCOMMENT("Dump shared part of ray context context");
+ DevmemPDumpLoadMem(psRayContext->psFWRayContextMemDesc,
+ (sizeof(RGXFWIF_FWCOMMONCONTEXT) * 2),
+ sizeof(RGXFWIF_FWRAYCONTEXT) - (sizeof(RGXFWIF_FWCOMMONCONTEXT) * 2),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ OSWRLockAcquireWrite(psDevInfo->hRaytraceCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sRaytraceCtxtListHead), &(psRayContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRaytraceCtxListLock);
+ }
+
+ *ppsRayContext= psRayContext;
+ return PVRSRV_OK;
+
+fail_rscontext:
+ _DestroySHContext(&psRayContext->sSHData,
+ psDeviceNode,
+ psRayContext->psCleanupSync);
+fail_shcontext:
+fail_frameworkcopy:
+ DevmemFwFree(psDevInfo, psRayContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+ SyncPrimFree(psRayContext->psCleanupSync);
+fail_syncalloc:
+ DevmemFwFree(psDevInfo, psRayContext->psFWRayContextMemDesc);
+fail_fwraycontext:
+ OSFreeMem(psRayContext);
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+
+/*
+ * PVRSRVRGXDestroyRayContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psRayContext->psDeviceNode->pvDevice;
+
+ /* remove node from list before calling destroy - as destroy, if successful
+ * will invalidate the node
+ * must be re-added if destroy fails
+ */
+ OSWRLockAcquireWrite(psDevInfo->hRaytraceCtxListLock);
+ dllist_remove_node(&(psRayContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRaytraceCtxListLock);
+
+ /* Cleanup the TA if we haven't already */
+ if ((psRayContext->ui32CleanupStatus & RAY_CLEANUP_SH_COMPLETE) == 0)
+ {
+ eError = _DestroySHContext(&psRayContext->sSHData,
+ psRayContext->psDeviceNode,
+ psRayContext->psCleanupSync);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ psRayContext->ui32CleanupStatus |= RAY_CLEANUP_SH_COMPLETE;
+ }
+ else
+ {
+ goto e0;
+ }
+ }
+
+ /* Cleanup the RS if we haven't already */
+ if ((psRayContext->ui32CleanupStatus & RAY_CLEANUP_RS_COMPLETE) == 0)
+ {
+ eError = _DestroyRSContext(&psRayContext->sRSData,
+ psRayContext->psDeviceNode,
+ psRayContext->psCleanupSync);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ psRayContext->ui32CleanupStatus |= RAY_CLEANUP_RS_COMPLETE;
+ }
+ else
+ {
+ goto e0;
+ }
+ }
+
+#if 0
+ /*
+ * FIXME - De-allocate RPM freelists (should be called from UM)
+ */
+ RGXDestroyRPMFreeList(psRayContext->sSHData.psSHFFreeList);
+ RGXDestroyRPMFreeList(psRayContext->sSHData.psSHGFreeList);
+#endif
+
+ for (i = 0; i < DPX_MAX_RAY_CONTEXTS; i++)
+ {
+ RGXUnsetFirmwareAddress(psRayContext->sRSData.psFCClientCCBMemDesc[i]);
+ RGXUnsetFirmwareAddress(psRayContext->sRSData.psFCClientCCBCtrlMemDesc[i]);
+ RGXDestroyCCB(psDevInfo, psRayContext->sRSData.psFCClientCCB[i]);
+ }
+
+ /*
+ Only if both TA and 3D contexts have been cleaned up can we
+ free the shared resources
+ */
+ if (psRayContext->ui32CleanupStatus == (RAY_CLEANUP_RS_COMPLETE | RAY_CLEANUP_SH_COMPLETE))
+ {
+ /* Free the framework buffer */
+ DevmemFwFree(psDevInfo, psRayContext->psFWFrameworkMemDesc);
+
+ /* Free the firmware ray context */
+ DevmemFwFree(psDevInfo, psRayContext->psFWRayContextMemDesc);
+
+ /* Free the cleanup sync */
+ SyncPrimFree(psRayContext->psCleanupSync);
+
+ OSFreeMem(psRayContext);
+ }
+
+ return PVRSRV_OK;
+
+e0:
+ OSWRLockAcquireWrite(psDevInfo->hRaytraceCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sRaytraceCtxtListHead), &(psRayContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRaytraceCtxListLock);
+ return eError;
+}
+
+/*
+ * PVRSRVRGXKickRSKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickRSKM(RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceSyncOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32FCCmdSize,
+ IMG_PBYTE pui8FCDMCmd,
+ IMG_UINT32 ui32FrameContextID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef)
+{
+ RGXFWIF_KCCB_CMD sRSKCCBCmd;
+ RGX_CCB_CMD_HELPER_DATA asRSCmdHelperData[1] = {{0}};
+ RGX_CCB_CMD_HELPER_DATA asFCCmdHelperData[1] = {{0}};
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError1;
+ PVRSRV_ERROR eError2;
+ RGX_SERVER_RAY_RS_DATA *psRSData = &psRayContext->sRSData;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32FCWoff;
+ IMG_UINT32 ui32RTUCmdOffset = 0;
+ IMG_UINT32 ui32JobId;
+ IMG_UINT32 ui32FWCtx;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+ ui32JobId = OSAtomicIncrement(&psRayContext->hJobId);
+
+ eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListFence,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOSyncPrimBlock,
+ paui32ClientFenceSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOSyncPrimBlock,
+ paui32ClientUpdateSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ /* Sanity check the server fences */
+ for (i=0;i<ui32ServerSyncPrims;i++)
+ {
+ if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on RS) must fence", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ }
+ }
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRayContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+
+ if(pui8DMCmd != NULL)
+ {
+ eError = RGXCmdHelperInitCmdCCB(psRSData->psFCClientCCB[ui32FrameContextID],
+ 0,
+ NULL,
+ NULL,
+ ui32ClientUpdateCount,
+ psRayContext->sSyncAddrListUpdate.pasFWAddrs,
+ paui32ClientUpdateValue,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ ui32CmdSize,
+ pui8DMCmd,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_RTU,
+ ui32ExtJobRef,
+ ui32JobId,
+ ui32PDumpFlags,
+ NULL,
+ "FC",
+ asFCCmdHelperData);
+ }
+ else
+ {
+ eError = RGXCmdHelperInitCmdCCB(psRSData->psFCClientCCB[ui32FrameContextID],
+ 0,
+ NULL,
+ NULL,
+ ui32ClientUpdateCount,
+ psRayContext->sSyncAddrListUpdate.pasFWAddrs,
+ paui32ClientUpdateValue,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ ui32CmdSize,
+ pui8DMCmd,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_NULL,
+ ui32ExtJobRef,
+ ui32JobId,
+ ui32PDumpFlags,
+ NULL,
+ "FC",
+ asFCCmdHelperData);
+
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ goto PVRSRVRGXKickRSKM_Exit;
+ }
+
+ eError = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asFCCmdHelperData),
+ asFCCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto PVRSRVRGXKickRSKM_Exit;
+ }
+
+ ui32FCWoff = RGXCmdHelperGetCommandSize(IMG_ARR_NUM_ELEMS(asFCCmdHelperData),
+ asFCCmdHelperData);
+
+ *(IMG_UINT32*)pui8FCDMCmd = RGXGetHostWriteOffsetCCB(psRSData->psFCClientCCB[ui32FrameContextID]) + ui32FCWoff;
+
+ /*
+ We should reserved space in the kernel CCB here and fill in the command
+ directly.
+ This is so if there isn't space in the kernel CCB we can return with
+ retry back to services client before we take any operations
+ */
+
+ /*
+ We might only be kicking for flush out a padding packet so only submit
+ the command if the create was successful
+ */
+ eError1 = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psRSData->psServerCommonContext),
+ ui32ClientFenceCount,
+ psRayContext->sSyncAddrListFence.pasFWAddrs,
+ paui32ClientFenceValue,
+ 0,
+ NULL,
+ NULL,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ ui32FCCmdSize,
+ pui8FCDMCmd,
+ NULL,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_RTU_FC,
+ ui32ExtJobRef,
+ ui32JobId,
+ ui32PDumpFlags,
+ NULL,
+ "RS",
+ asRSCmdHelperData);
+ if (eError1 != PVRSRV_OK)
+ {
+ goto PVRSRVRGXKickRSKM_Exit;
+ }
+
+ eError1 = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asRSCmdHelperData),
+ asRSCmdHelperData);
+ if (eError1 != PVRSRV_OK)
+ {
+ goto PVRSRVRGXKickRSKM_Exit;
+ }
+
+
+ /*
+ We should reserved space in the kernel CCB here and fill in the command
+ directly.
+ This is so if there isn't space in the kernel CCB we can return with
+ retry back to services client before we take any operations
+ */
+
+ /*
+ We might only be kicking for flush out a padding packet so only submit
+ the command if the create was successful
+ */
+ /*
+ All the required resources are ready at this point, we can't fail so
+ take the required server sync operations and commit all the resources
+ */
+ RGXCmdHelperReleaseCmdCCB(IMG_ARR_NUM_ELEMS(asFCCmdHelperData),
+ asFCCmdHelperData, "FC", 0);
+
+ /*
+ All the required resources are ready at this point, we can't fail so
+ take the required server sync operations and commit all the resources
+ */
+ ui32RTUCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRSData->psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(IMG_ARR_NUM_ELEMS(asRSCmdHelperData),
+ asRSCmdHelperData, "RS",
+ FWCommonContextGetFWAddress(psRSData->psServerCommonContext).ui32Addr);
+
+ /*
+ * Construct the kernel RTU CCB command.
+ * (Safe to release reference to ray context virtual address because
+ * ray context destruction must flush the firmware).
+ */
+ sRSKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sRSKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRSData->psServerCommonContext);
+ sRSKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRSData->psServerCommonContext));
+ sRSKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ ui32FWCtx = FWCommonContextGetFWAddress(psRSData->psServerCommonContext).ui32Addr;
+
+ HTBLOGK(HTB_SF_MAIN_KICK_RTU,
+ sRSKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui32RTUCmdOffset
+ );
+ RGX_HWPERF_HOST_ENQ(psRayContext, OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx, ui32ExtJobRef, ui32JobId,
+ RGX_HWPERF_KICK_TYPE_RS);
+
+ /*
+ * Submit the RTU command to the firmware.
+ */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psRayContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_RTU,
+ &sRSKCCBCmd,
+ sizeof(sRSKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKickRSKM failed to schedule kernel RTU command. Error:%u", eError));
+ eError = eError2;
+ goto PVRSRVRGXKickRSKM_Exit;
+ }
+ else
+ {
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psRayContext->psDeviceNode->pvDevice,
+ ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_RS);
+#endif
+ }
+
+
+PVRSRVRGXKickRSKM_Exit:
+err_populate_sync_addr_list:
+ return eError;
+}
+
+/*
+ * PVRSRVRGXKickVRDMKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickVRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceSyncOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef)
+{
+ RGXFWIF_KCCB_CMD sSHKCCBCmd;
+ RGX_CCB_CMD_HELPER_DATA sCmdHelperData;
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ RGX_SERVER_RAY_SH_DATA *psSHData = &psRayContext->sSHData;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32SHGCmdOffset = 0;
+ IMG_UINT32 ui32JobId;
+ IMG_UINT32 ui32FWCtx;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+ ui32JobId = OSAtomicIncrement(&psRayContext->hJobId);
+
+ eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListFence,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOSyncPrimBlock,
+ paui32ClientFenceSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOSyncPrimBlock,
+ paui32ClientUpdateSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ /* Sanity check the server fences */
+ for (i=0;i<ui32ServerSyncPrims;i++)
+ {
+ if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on SH) must fence", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ }
+ }
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRayContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psSHData->psServerCommonContext),
+ ui32ClientFenceCount,
+ psRayContext->sSyncAddrListFence.pasFWAddrs,
+ paui32ClientFenceValue,
+ ui32ClientUpdateCount,
+ psRayContext->sSyncAddrListUpdate.pasFWAddrs,
+ paui32ClientUpdateValue,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ ui32CmdSize,
+ pui8DMCmd,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_SHG,
+ ui32ExtJobRef,
+ ui32JobId,
+ ui32PDumpFlags,
+ NULL,
+ "SH",
+ &sCmdHelperData);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto PVRSRVRGXKickSHKM_Exit;
+ }
+
+ eError = RGXCmdHelperAcquireCmdCCB(1, &sCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto PVRSRVRGXKickSHKM_Exit;
+ }
+
+
+ /*
+ We should reserve space in the kernel CCB here and fill in the command
+ directly.
+ This is so if there isn't space in the kernel CCB we can return with
+ retry back to services client before we take any operations
+ */
+
+ /*
+ We might only be kicking for flush out a padding packet so only submit
+ the command if the create was successful
+ */
+
+ /*
+ All the required resources are ready at this point, we can't fail so
+ take the required server sync operations and commit all the resources
+ */
+ ui32SHGCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psSHData->psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(1, &sCmdHelperData, "SH", FWCommonContextGetFWAddress(psSHData->psServerCommonContext).ui32Addr);
+
+
+ /*
+ * Construct the kernel SHG CCB command.
+ * (Safe to release reference to ray context virtual address because
+ * ray context destruction must flush the firmware).
+ */
+ sSHKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sSHKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psSHData->psServerCommonContext);
+ sSHKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psSHData->psServerCommonContext));
+ sSHKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ ui32FWCtx = FWCommonContextGetFWAddress(psSHData->psServerCommonContext).ui32Addr;
+
+ HTBLOGK(HTB_SF_MAIN_KICK_SHG,
+ sSHKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui32SHGCmdOffset
+ );
+ RGX_HWPERF_HOST_ENQ(psRayContext, OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx, ui32ExtJobRef, ui32JobId,
+ RGX_HWPERF_KICK_TYPE_VRDM);
+
+ /*
+ * Submit the RTU command to the firmware.
+ */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psRayContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_SHG,
+ &sSHKCCBCmd,
+ sizeof(sSHKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKickSHKM failed to schedule kernel RTU command. Error:%u", eError));
+ eError = eError2;
+ goto PVRSRVRGXKickSHKM_Exit;
+ }
+ else
+ {
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psRayContext->psDeviceNode->pvDevice,
+ ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_VRDM);
+#endif
+ }
+
+
+PVRSRVRGXKickSHKM_Exit:
+err_populate_sync_addr_list:
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetRayContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+ if (psRayContext->sSHData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psRayContext->sSHData.psServerCommonContext,
+ psConnection,
+ psRayContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_SHG);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the SH part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ goto fail_shcontext;
+ }
+
+ psRayContext->sSHData.ui32Priority = ui32Priority;
+ }
+
+ if (psRayContext->sRSData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psRayContext->sRSData.psServerCommonContext,
+ psConnection,
+ psRayContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_RTU);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the RS part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ goto fail_rscontext;
+ }
+
+ psRayContext->sRSData.ui32Priority = ui32Priority;
+ }
+ return PVRSRV_OK;
+
+fail_rscontext:
+fail_shcontext:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void CheckForStalledRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DLLIST_NODE *psNode, *psNext;
+ OSWRLockAcquireRead(psDevInfo->hRaytraceCtxListLock);
+ dllist_foreach_node(&psDevInfo->sRaytraceCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_RAY_CONTEXT *psCurrentServerRayCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_RAY_CONTEXT, sListNode);
+
+ DumpStalledFWCommonContext(psCurrentServerRayCtx->sSHData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ DumpStalledFWCommonContext(psCurrentServerRayCtx->sRSData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ OSWRLockReleaseRead(psDevInfo->hRaytraceCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32ContextBitMask = 0;
+
+ OSWRLockAcquireRead(psDevInfo->hRaytraceCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sRaytraceCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_RAY_CONTEXT *psCurrentServerRayCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_RAY_CONTEXT, sListNode);
+ if(NULL != psCurrentServerRayCtx->sSHData.psServerCommonContext)
+ {
+ if (CheckStalledClientCommonContext(psCurrentServerRayCtx->sSHData.psServerCommonContext, RGX_KICK_TYPE_DM_RTU) == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_RTU;
+ }
+ }
+
+ if(NULL != psCurrentServerRayCtx->sRSData.psServerCommonContext)
+ {
+ if (CheckStalledClientCommonContext(psCurrentServerRayCtx->sRSData.psServerCommonContext, RGX_KICK_TYPE_DM_SHG) == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_SHG;
+ }
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hRaytraceCtxListLock);
+ return ui32ContextBitMask;
+}
+
+/******************************************************************************
+ End of file (rgxSHGRTU.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX ray tracing functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX ray tracing functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXRAY_H__)
+#define __RGXRAY_H__
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxfwutils.h"
+#include "pvr_notifier.h"
+
+typedef struct _RGX_SERVER_RAY_CONTEXT_ RGX_SERVER_RAY_CONTEXT;
+typedef struct _RGX_SERVER_RPM_CONTEXT_ RGX_SERVER_RPM_CONTEXT;
+typedef struct _RGX_RPM_FREELIST_ RGX_RPM_FREELIST;
+
+
+struct _RGX_SERVER_RPM_CONTEXT_
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWRPMContextMemDesc;
+ //DEVMEM_MEMDESC *psRTACtlMemDesc;
+ //DEVMEM_MEMDESC *psRTArrayMemDesc;
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+ IMG_UINT32 uiFLRefCount; /*!< increments each time a free list references this parent context */
+
+ DEVMEMINT_HEAP *psSceneHeap;
+ DEVMEMINT_HEAP *psRPMPageTableHeap;
+ DEVMEMINT_HEAP *psRPMFreeListHeap;
+
+ IMG_DEV_VIRTADDR sSceneMemoryBaseAddr;
+ IMG_DEV_VIRTADDR sDopplerHeapBaseAddr; /*!< Base address of the virtual heap where Doppler scene is mapped */
+ IMG_DEV_VIRTADDR sRPMPageTableBaseAddr;
+
+ IMG_UINT32 ui32TotalRPMPages; /*!< Total virtual pages available */
+ IMG_UINT32 uiLog2DopplerPageSize; /*!< Doppler virtual page size, may be sub-4KB */
+ IMG_UINT32 ui32UnallocatedPages; /*!< Unmapped pages which may be mapped and added to a RPM free list */
+ IMG_UINT32 ui32RPMEntriesInPage; /*!< Number of remaining RPM page entries (dwords) in current mapped pages */
+
+ /* Sparse mappings */
+ PMR *psSceneHierarchyPMR; /*!< Scene hierarchy phys page resource */
+ PMR *psRPMPageTablePMR; /*!< RPM pages in use by scene hierarchy phys page resource */
+
+ /* Current page offset at the end of the physical allocation (PMR)
+ * for the scene memory and RPM page tables. This is where new phys pages
+ * will be mapped when the grow occurs (using sparse dev mem API). */
+ IMG_UINT32 ui32SceneMemorySparseMappingIndex;
+ IMG_UINT32 ui32RPMPageTableSparseMappingIndex;
+};
+
+/*
+ * RPM host freelist (analogous to PM host freelist)
+ */
+struct _RGX_RPM_FREELIST_ {
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ CONNECTION_DATA *psConnection;
+ RGX_SERVER_RPM_CONTEXT *psParentCtx;
+
+ /* Free list PMR. Used for grow */
+ PMR *psFreeListPMR;
+ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset;
+
+ IMG_DEV_VIRTADDR sBaseDevVAddr;
+
+ /* Current page offset at the end of the physical allocation (PMR)
+ * for the scene memory and RPM page tables. This is where new phys pages
+ * will be mapped when the grow occurs (using sparse dev mem API). */
+ IMG_UINT32 ui32RPMFreeListSparseMappingIndex;
+
+ IMG_UINT32 ui32ReadOffset; /*!< FPL circular buffer read offset */
+ IMG_UINT32 ui32WriteOffset; /*!< FPL circular buffer write offset */
+
+ /* Freelist config */
+ IMG_UINT32 ui32MaxFLPages;
+ IMG_UINT32 ui32InitFLPages;
+ IMG_UINT32 ui32CurrentFLPages;
+ IMG_UINT32 ui32GrowFLPages;
+ IMG_UINT32 ui32FreelistID;
+ IMG_UINT64 ui64FreelistChecksum; /* checksum over freelist content */
+ IMG_BOOL bCheckFreelist; /* freelist check enabled */
+ IMG_UINT32 ui32RefCount; /* freelist reference counting */
+ IMG_UINT32 uiLog2DopplerPageSize; /*!< Doppler virtual page size, may be sub-4KB */
+ IMG_UINT32 ui32EntriesInPage; /*!< Number of remaining FPL page entries (dwords) in current mapped pages */
+
+ IMG_UINT32 ui32NumGrowReqByApp; /* Total number of grow requests by Application*/
+ IMG_UINT32 ui32NumGrowReqByFW; /* Total Number of grow requests by Firmware */
+ IMG_UINT32 ui32NumHighPages; /* High Mark of pages in the freelist */
+
+ IMG_PID ownerPid; /* Pid of the owner of the list */
+
+ /*
+ * External freelists don't use common RPM memory and are not added to global list of freelists.
+ * They're created and destroyed on demand, e.g. when loading offline hierarchies.
+ */
+ IMG_BOOL bIsExternal; /* Mark if the freelist is external */
+
+ /* Memory Blocks */
+ DLLIST_NODE sMemoryBlockHead; /* head of list of RGX_RPM_DEVMEM_DESC block descriptors */
+ DLLIST_NODE sNode; /* node used to reference list of freelists on device */
+
+ /* FW data structures */
+ DEVMEM_MEMDESC *psFWFreelistMemDesc;
+ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr;
+
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+} ;
+
+
+/*!
+ * RGXCreateRPMFreeList
+ *
+ * @param ui32MaxFLPages
+ * @param ui32InitFLPages
+ * @param ui32GrowFLPages
+ * @param bCheckFreelist
+ * @param sFreeListDevVAddr
+ * @param sRPMPageListDevVAddr
+ * @param psFreeListPMR
+ * @param uiFreeListPMROffset
+ * @param ppsFreeList
+ * @param bIsExternal
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRPMFreeList(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_RPM_CONTEXT *psRPMContext,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32GrowFLPages,
+ IMG_DEV_VIRTADDR sFreeListDevVAddr,
+ RGX_RPM_FREELIST **ppsFreeList,
+ IMG_UINT32 *puiHWFreeList,
+ IMG_BOOL bIsExternal);
+
+/*!
+ * RGXGrowRPMFreeList
+ */
+PVRSRV_ERROR RGXGrowRPMFreeList(RGX_RPM_FREELIST *psFreeList,
+ IMG_UINT32 ui32RequestNumPages,
+ PDLLIST_NODE pListHeader);
+
+/*!
+ * RGXDestroyRPMFreeList
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRPMFreeList(RGX_RPM_FREELIST *psFreeList);
+
+/*!
+ * RGXCreateRPMContext
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRPMContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_RPM_CONTEXT **ppsRPMContext,
+ IMG_UINT32 ui32TotalRPMPages,
+ IMG_UINT32 uiLog2DopplerPageSize,
+ IMG_DEV_VIRTADDR sSceneMemoryBaseAddr,
+ IMG_DEV_VIRTADDR sDopplerHeapBaseAddr,
+ DEVMEMINT_HEAP *psSceneHeap,
+ IMG_DEV_VIRTADDR sRPMPageTableBaseAddr,
+ DEVMEMINT_HEAP *psRPMPageTableHeap,
+ DEVMEM_MEMDESC **ppsMemDesc,
+ IMG_UINT32 *puiHWFrameData);
+
+/*!
+ * RGXDestroyRPMContext
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRPMContext(RGX_SERVER_RPM_CONTEXT *psCleanupData);
+
+/*!
+ RGXProcessRequestRPMGrow
+*/
+IMG_EXPORT
+void RGXProcessRequestRPMGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistID);
+
+
+/*!
+ RGXAddBlockToRPMFreeListKM
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXAddBlockToRPMFreeListKM(RGX_RPM_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumPages);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXCreateRenderContextKM
+
+ @Description
+ Server-side implementation of RGXCreateRenderContext
+
+ @Input pvDeviceNode - device node
+ @Input psSHGCCBMemDesc - SHG CCB Memory descriptor
+ @Input psSHGCCBCtlMemDesc - SHG CCB Ctrl Memory descriptor
+ @Input psRTUCCBMemDesc - RTU CCB Memory descriptor
+ @Input psRTUCCBCtlMemDesc - RTU CCB Ctrl Memory descriptor
+ @Input ui32Priority - context priority
+ @Input sMCUFenceAddr - MCU Fence device virtual address
+ @Input sVRMCallStackAddr - VRM call stack device virtual address
+ @Input ui32FrameworkRegisterSize - framework register size
+ @Input pbyFrameworkRegisters - ptr to framework register
+ @Input hMemCtxPrivData - memory context private data
+ @Output ppsCleanupData - clean up data
+ @Output ppsFWRayContextMemDesc - firmware ray context memory descriptor
+ @Output ppsFWRayContextStateMemDesc - firmware ray context state memory descriptor
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sMCUFenceAddr,
+ IMG_DEV_VIRTADDR sVRMCallStackAddr,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_RAY_CONTEXT **ppsRayContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXDestroyRayContextKM
+
+ @Description
+ Server-side implementation of RGXDestroyRayContext
+
+ @Input psRayContext - Ray context
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXKickRSKM
+
+ @Description
+ Server-side implementation of RGXKickRS
+
+ @Input pvDeviceNode - device node
+ @Input psFWRayContextMemDesc - memdesc for the firmware render context
+ @Input ui32RTUcCCBWoffUpdate - New fw Woff for the client RTU CCB
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickRSKM(RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32FCCmdSize,
+ IMG_PBYTE pui8FCDMCmd,
+ IMG_UINT32 ui32FrameContextID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef);
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXKickVRDMKM
+
+ @Description
+ Server-side implementation of PVRSRVRGXKickVRDMKM
+
+ @Input pvDeviceNode - device node
+ @Input psFWRayContextMemDesc - memdesc for the firmware render context
+ @Input ui32SHGcCCBWoffUpdate - New fw Woff for the client SHG CCB
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickVRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef);
+
+PVRSRV_ERROR PVRSRVRGXSetRayContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32Priority);
+
+/* Debug - check if ray context is waiting on a fence */
+void CheckForStalledRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client ray contexts are stalled */
+IMG_UINT32 CheckForStalledClientRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXRAY_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX Register configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Regconfig routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxregconfig.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT8 ui8RegCfgType)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig;
+ RGXFWIF_REG_CFG_TYPE eRegCfgType = (RGXFWIF_REG_CFG_TYPE) ui8RegCfgType;
+
+ PVR_UNREFERENCED_PARAMETER(psDevConnection);
+
+ if (eRegCfgType < psRegCfg->eRegCfgTypeToPush)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXSetRegConfigTypeKM: Register configuration requested (%d) is not valid since it has to be at least %d."
+ " Configurations of different types need to go in order",
+ eRegCfgType,
+ psRegCfg->eRegCfgTypeToPush));
+ return PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE;
+ }
+
+ psRegCfg->eRegCfgTypeToPush = eRegCfgType;
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(psDevConnection);
+
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigTypeKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+ return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64RegValue,
+ IMG_UINT64 ui64RegMask)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sRegCfgCmd;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (psRegCfg->bEnabled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Cannot add record whilst register configuration active."));
+ return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+ }
+ if (psRegCfg->ui32NumRegRecords == RGXFWIF_REG_CFG_MAX_SIZE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Register configuration full."));
+ return PVRSRV_ERROR_REG_CONFIG_FULL;
+ }
+
+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Addr = (IMG_UINT64) ui32RegAddr;
+ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Value = ui64RegValue;
+ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Mask = ui64RegMask;
+ sRegCfgCmd.uCmdData.sRegConfigData.eRegConfigType = psRegCfg->eRegCfgTypeToPush;
+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ADD;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sRegCfgCmd,
+ sizeof(sRegCfgCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+ return eError;
+ }
+
+ psRegCfg->ui32NumRegRecords++;
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+ return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sRegCfgCmd;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (psRegCfg->bEnabled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Attempt to clear register configuration whilst active."));
+ return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+ }
+
+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_CLEAR;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sRegCfgCmd,
+ sizeof(sRegCfgCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+ return eError;
+ }
+
+ psRegCfg->ui32NumRegRecords = 0;
+ psRegCfg->eRegCfgTypeToPush = RGXFWIF_REG_CFG_TYPE_PWR_ON;
+
+ return eError;
+#else
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sRegCfgCmd;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ENABLE;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sRegCfgCmd,
+ sizeof(sRegCfgCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+ return eError;
+ }
+
+ psRegCfg->bEnabled = IMG_TRUE;
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+ return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sRegCfgCmd;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_DISABLE;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sRegCfgCmd,
+ sizeof(sRegCfgCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+ return eError;
+ }
+
+ psRegCfg->bEnabled = IMG_FALSE;
+
+ return eError;
+#else
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+
+/******************************************************************************
+ End of file (rgxregconfig.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX register configuration functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX register configuration functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXREGCONFIG_H__)
+#define __RGXREGCONFIG_H__
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXSetRegConfigTypeKM
+
+ @Description
+ Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui8RegPowerIsland - Reg configuration
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT8 ui8RegPowerIsland);
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXSetRegConfigKM
+
+ @Description
+ Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui64RegAddr - Register address
+ @Input ui64RegValue - Reg value
+ @Input ui64RegMask - Reg mask
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui64RegAddr,
+ IMG_UINT64 ui64RegValue,
+ IMG_UINT64 ui64RegMask);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXClearRegConfigKM
+
+ @Description
+ Server-side implementation of RGXClearRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXEnableRegConfigKM
+
+ @Description
+ Server-side implementation of RGXEnableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXDisableRegConfigKM
+
+ @Description
+ Server-side implementation of RGXDisableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* __RGXREGCONFIG_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title rgx kernel services structues/functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX initialisation script definitions.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXSCRIPT_H__
+#define __RGXSCRIPT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define RGX_MAX_DEBUG_COMMANDS (320)
+#define RGX_DBG_CMD_NAME_SIZE (40)
+
+typedef enum _RGX_INIT_OPERATION
+{
+ RGX_INIT_OP_ILLEGAL = 0,
+ RGX_INIT_OP_WRITE_HW_REG,
+ RGX_INIT_OP_POLL_64_HW_REG,
+ RGX_INIT_OP_POLL_HW_REG,
+ RGX_INIT_OP_COND_POLL_HW_REG,
+ RGX_INIT_OP_LOOP_POINT,
+ RGX_INIT_OP_COND_BRANCH,
+ RGX_INIT_OP_HALT,
+ RGX_INIT_OP_DBG_READ32_HW_REG,
+ RGX_INIT_OP_DBG_READ64_HW_REG,
+ RGX_INIT_OP_DBG_CALC,
+ RGX_INIT_OP_DBG_WAIT,
+ RGX_INIT_OP_DBG_STRING,
+ RGX_INIT_OP_PDUMP_HW_REG,
+} RGX_INIT_OPERATION;
+
+typedef union _RGX_INIT_COMMAND_
+{
+ RGX_INIT_OPERATION eOp;
+
+ struct {
+ RGX_INIT_OPERATION eOp;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+ } sWriteHWReg;
+
+ struct {
+ RGX_INIT_OPERATION eOp;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+ } sPDumpHWReg;
+
+ struct
+ {
+ RGX_INIT_OPERATION eOp;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT64 ui64Value;
+ IMG_UINT64 ui64Mask;
+ } sPoll64HWReg;
+
+ struct
+ {
+ RGX_INIT_OPERATION eOp;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Mask;
+ } sPollHWReg;
+
+ struct
+ {
+ RGX_INIT_OPERATION eOp;
+ IMG_UINT32 ui32CondOffset;
+ IMG_UINT32 ui32CondValue;
+ IMG_UINT32 ui32CondMask;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Mask;
+ } sCondPollHWReg;
+
+ struct
+ {
+ RGX_INIT_OPERATION eOp;
+ } sLoopPoint;
+
+ struct
+ {
+ RGX_INIT_OPERATION eOp;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Mask;
+
+ } sConditionalBranchPoint;
+
+ struct
+ {
+ RGX_INIT_OPERATION eOp;
+ IMG_UINT32 ui32Offset;
+ IMG_CHAR aszName[RGX_DBG_CMD_NAME_SIZE];
+ } sDBGReadHWReg;
+
+ struct
+ {
+ RGX_INIT_OPERATION eOp;
+ IMG_UINT32 ui32Offset1;
+ IMG_UINT32 ui32Offset2;
+ IMG_UINT32 ui32Offset3;
+ IMG_CHAR aszName[RGX_DBG_CMD_NAME_SIZE];
+ } sDBGCalc;
+
+ struct
+ {
+ RGX_INIT_OPERATION eOp;
+ IMG_UINT32 ui32WaitInUs;
+ } sDBGWait;
+
+ struct
+ {
+ RGX_INIT_OPERATION eOp;
+ IMG_CHAR aszString[RGX_DBG_CMD_NAME_SIZE];
+ } sDBGString;
+
+} RGX_INIT_COMMAND;
+
+typedef struct _RGX_INIT_SCRIPTS_
+{
+ RGX_INIT_COMMAND asDbgCommands[RGX_MAX_DEBUG_COMMANDS];
+} RGX_SCRIPTS;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __RGXSCRIPT_H__ */
+
+/*****************************************************************************
+ End of file (rgxscript.h)
+*****************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxsignals.c
+@Title RGX Signals routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Signals routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxsignals.h"
+
+#include "rgxmem.h"
+#include "rgx_fwif_km.h"
+#include "mmu_common.h"
+#include "devicemem.h"
+#include "rgxfwutils.h"
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ IMG_DEV_VIRTADDR sDevSignalAddress)
+{
+ DEVMEM_MEMDESC *psFWMemContextMemDesc;
+ RGXFWIF_KCCB_CMD sKCCBCmd;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+
+ /* Schedule the firmware command */
+ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE;
+ sKCCBCmd.uCmdData.sSignalUpdateData.sDevSignalAddress = sDevSignalAddress;
+ RGXSetFirmwareAddress(&sKCCBCmd.uCmdData.sSignalUpdateData.psFWMemContext,
+ psFWMemContextMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sKCCBCmd,
+ sizeof(sKCCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXNotifySignalUpdateKM: Failed to schedule the FW command %d (%s)",
+ eError, PVRSRVGETERRORSTRING(eError)));
+ }
+
+ return eError;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxsignals.h
+@Title RGX Signals routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Signals routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_RGX_SIGNALS_H)
+#define _RGX_SIGNALS_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "device.h"
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXNotifySignalUpdateKM
+
+ @Description Server-side implementation of RGXNotifySignalUpdate
+
+ @Input hMemCtxPrivData - memory context private data
+ @Input sDevSignalAddress - device virtual address of the updated signal
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ IMG_DEV_VIRTADDR sDevSignalAddress);
+
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvinit.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "km_apphint_defs.h"
+#include "htbuffer_types.h"
+#include "htbuffer_init.h"
+
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+
+#include "client_rgxinit_bridge.h"
+
+#include "rgx_fwif_sig.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "srvinit_osfunc.h"
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+#include "rgxdefs.h"
+#else
+#include "rgxdefs_km.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#include "srvinit_pdump.h"
+
+#include "rgx_fwif_hwperf.h"
+#include "rgx_hwperf_table.h"
+
+#include "rgxsrvinit_script.h"
+
+#include "rgxfwload.h"
+#include "rgxlayer_impl.h"
+#include "rgxfwimageutils.h"
+
+#include "rgx_hwperf_km.h"
+#include "rgx_bvnc_defs_km.h"
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+#include "rgx_hwperf.h"
+#include "rgx_fwif_km.h"
+#include "rgx_fwif_client.h"
+#include "rgx_fwif_alignchecks.h"
+#else
+#include "rgxdevice.h"
+#endif
+static RGX_INIT_COMMAND asDbgCommands[RGX_MAX_DEBUG_COMMANDS];
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+#error "SUPPORT_KERNEL_SRVINIT is required by SUPPORT_TRUSTED_DEVICE!"
+#endif
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#endif
+
+
+#define HW_PERF_FILTER_DEFAULT 0x00000000 /* Default to no HWPerf */
+#define HW_PERF_FILTER_DEFAULT_ALL_ON 0xFFFFFFFF /* All events */
+
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(SUPPORT_VALIDATION)
+#include "pvrsrv_apphint.h"
+#endif
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(LINUX)
+#include "km_apphint.h"
+#include "os_srvinit_param.h"
+#else
+#include "srvinit_param.h"
+/*!
+*******************************************************************************
+ * AppHint mnemonic data type helper tables
+******************************************************************************/
+/* apphint map of name vs. enable flag */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = {
+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) },
+ HTB_LOG_SFGROUPLIST
+#undef X
+};
+/* apphint map of arg vs. OpMode */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = {
+ { "droplatest", HTB_OPMODE_DROPLATEST},
+ { "dropoldest", HTB_OPMODE_DROPOLDEST},
+ /* HTB should never be started in HTB_OPMODE_BLOCK
+ * as this can lead to deadlocks
+ */
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = {
+ { "trace", 2},
+ { "tbi", 1},
+ { "none", 0}
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = {
+ { "mono", 0 },
+ { "mono_raw", 1 },
+ { "sched", 2 }
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP };
+
+/*
+ * Services AppHints initialisation
+ */
+#define X(a, b, c, d, e) SrvInitParamInit ## b( a, d, e )
+APPHINT_LIST_ALL
+#undef X
+#endif /* SUPPORT_KERNEL_SRVINIT && LINUX */
+
+/*
+ * Container for all the apphints used by this module
+ */
+typedef struct _RGX_SRVINIT_APPHINTS_
+{
+ IMG_BOOL bDustRequestInject;
+ IMG_BOOL bEnableSignatureChecks;
+ IMG_UINT32 ui32SignatureChecksBufSize;
+
+#if defined(DEBUG)
+ IMG_BOOL bAssertOnOutOfMem;
+ IMG_BOOL bAssertOnHWRTrigger;
+#endif
+ IMG_BOOL bCheckMlist;
+ IMG_BOOL bDisableClockGating;
+ IMG_BOOL bDisableDMOverlap;
+ IMG_BOOL bDisableFEDLogging;
+ IMG_BOOL bDisablePDP;
+ IMG_BOOL bEnableCDMKillRand;
+ IMG_BOOL bEnableFTrace;
+ IMG_BOOL bEnableHWPerf;
+ IMG_BOOL bEnableHWPerfHost;
+ IMG_BOOL bEnableHWR;
+ IMG_BOOL bEnableRTUBypass;
+ IMG_BOOL bFilteringMode;
+ IMG_BOOL bHWPerfDisableCustomCounterFilter;
+ IMG_BOOL bZeroFreelist;
+ IMG_UINT32 ui32EnableFWContextSwitch;
+ IMG_UINT32 ui32FWContextSwitchProfile;
+ IMG_UINT32 ui32HWPerfFWBufSize;
+ IMG_UINT32 ui32HWPerfHostBufSize;
+ IMG_UINT32 ui32HWPerfFilter0;
+ IMG_UINT32 ui32HWPerfFilter1;
+ IMG_UINT32 ui32HWPerfHostFilter;
+ IMG_UINT32 ui32TimeCorrClock;
+ IMG_UINT32 ui32HWRDebugDumpLimit;
+ IMG_UINT32 ui32JonesDisableMask;
+ IMG_UINT32 ui32LogType;
+ IMG_UINT32 ui32TruncateMode;
+ FW_PERF_CONF eFirmwarePerf;
+ RGX_ACTIVEPM_CONF eRGXActivePMConf;
+ RGX_META_T1_CONF eUseMETAT1;
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS];
+ IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS];
+#endif
+ IMG_BOOL bEnableTrustedDeviceAceConfig;
+} RGX_SRVINIT_APPHINTS;
+
+
+/*!
+*******************************************************************************
+
+ @Function GetApphints
+
+ @Description Read init time apphints and initialise internal variables
+
+ @Input psHints : Pointer to apphints container
+
+ @Return void
+
+******************************************************************************/
+static INLINE void GetApphints(RGX_SRVINIT_APPHINTS *psHints, IMG_UINT64 ui64ErnsBrns, IMG_UINT64 ui64Features)
+{
+ void *pvParamState = SrvInitParamOpen();
+ IMG_UINT32 ui32ParamTemp;
+ IMG_BOOL bS7TopInfra = IMG_FALSE, bE42290 = IMG_FALSE, bTPUFiltermodeCtrl = IMG_FALSE, \
+ bE41805 = IMG_FALSE, bE42606 = IMG_FALSE, bAXIACELite = IMG_FALSE;
+
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ PVR_UNREFERENCED_PARAMETER(bE41805);
+ PVR_UNREFERENCED_PARAMETER(bE42606);
+ PVR_UNREFERENCED_PARAMETER(bE42290);
+ PVR_UNREFERENCED_PARAMETER(bS7TopInfra);
+ PVR_UNREFERENCED_PARAMETER(bTPUFiltermodeCtrl);
+#endif
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ PVR_UNREFERENCED_PARAMETER(ui64ErnsBrns);
+ PVR_UNREFERENCED_PARAMETER(ui64Features);
+ PVR_UNREFERENCED_PARAMETER(bAXIACELite);
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+ bS7TopInfra = IMG_TRUE;
+#endif
+#if defined(HW_ERN_42290)
+ bE42290 = IMG_TRUE;
+#endif
+#if defined(HW_ERN_41805)
+ bE41805 = IMG_TRUE;
+#endif
+#if defined(HW_ERN_42606)
+ bE42606 = IMG_TRUE;
+#endif
+#if defined(RGX_FEATURE_AXI_ACELITE)
+ bAXIACELite = IMG_TRUE;
+#endif
+#else
+ if(ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+ bS7TopInfra = IMG_TRUE;
+ }
+
+ if(ui64Features & RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK)
+ {
+ bTPUFiltermodeCtrl = IMG_TRUE;
+ }
+
+ if(ui64ErnsBrns & HW_ERN_42290_BIT_MASK)
+ {
+ bE42290 = IMG_TRUE;
+ }
+
+ if(ui64ErnsBrns & HW_ERN_41805_BIT_MASK)
+ {
+ bE41805 = IMG_TRUE;
+ }
+
+ if(ui64ErnsBrns & HW_ERN_42606_BIT_MASK)
+ {
+ bE42606 = IMG_TRUE;
+ }
+
+ if(ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK)
+ {
+ bAXIACELite = IMG_TRUE;
+ }
+#endif
+ /*
+ * KM AppHints not passed through the srvinit interface
+ */
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ SrvInitParamUnreferenced(FWPoisonOnFreeValue);
+ SrvInitParamUnreferenced(EnableFWPoisonOnFree);
+ SrvInitParamUnreferenced(GeneralNon4KHeapPageSize);
+ SrvInitParamUnreferenced(WatchdogThreadWeight);
+ SrvInitParamUnreferenced(WatchdogThreadPriority);
+ SrvInitParamUnreferenced(CleanupThreadWeight);
+ SrvInitParamUnreferenced(CleanupThreadPriority);
+ SrvInitParamUnreferenced(RGXBVNC);
+#endif
+
+ /*
+ * NB AppHints initialised to a default value via SrvInitParamInit* macros above
+ */
+
+ SrvInitParamGetBOOL(pvParamState, DustRequestInject, psHints->bDustRequestInject);
+ SrvInitParamGetBOOL(pvParamState, EnableSignatureChecks, psHints->bEnableSignatureChecks);
+ SrvInitParamGetUINT32(pvParamState, SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize);
+
+#if defined(DEBUG)
+ SrvInitParamGetBOOL(pvParamState, AssertOutOfMemory, psHints->bAssertOnOutOfMem);
+ SrvInitParamGetBOOL(pvParamState, AssertOnHWRTrigger, psHints->bAssertOnHWRTrigger);
+#endif
+ SrvInitParamGetBOOL(pvParamState, CheckMList, psHints->bCheckMlist);
+ SrvInitParamGetBOOL(pvParamState, DisableClockGating, psHints->bDisableClockGating);
+ SrvInitParamGetBOOL(pvParamState, DisableDMOverlap, psHints->bDisableDMOverlap);
+ SrvInitParamGetBOOL(pvParamState, DisableFEDLogging, psHints->bDisableFEDLogging);
+ SrvInitParamGetUINT32(pvParamState, EnableAPM, ui32ParamTemp);
+ psHints->eRGXActivePMConf = ui32ParamTemp;
+ SrvInitParamGetBOOL(pvParamState, EnableCDMKillingRandMode, psHints->bEnableCDMKillRand);
+ SrvInitParamGetBOOL(pvParamState, EnableFTraceGPU, psHints->bEnableFTrace);
+ SrvInitParamGetUINT32(pvParamState, EnableFWContextSwitch, psHints->ui32EnableFWContextSwitch);
+ SrvInitParamGetBOOL(pvParamState, EnableHWPerf, psHints->bEnableHWPerf);
+ SrvInitParamGetBOOL(pvParamState, EnableHWPerfHost, psHints->bEnableHWPerfHost);
+ SrvInitParamGetBOOL(pvParamState, EnableHWR, psHints->bEnableHWR);
+ SrvInitParamGetUINT32(pvParamState, EnableRDPowerIsland, ui32ParamTemp);
+ psHints->eRGXRDPowerIslandConf = ui32ParamTemp;
+ SrvInitParamGetBOOL(pvParamState, EnableRTUBypass, psHints->bEnableRTUBypass);
+ SrvInitParamGetUINT32(pvParamState, FirmwarePerf, ui32ParamTemp);
+ psHints->eFirmwarePerf = ui32ParamTemp;
+ SrvInitParamGetUINT32(pvParamState, FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile);
+ SrvInitParamGetBOOL(pvParamState, HWPerfDisableCustomCounterFilter, psHints->bHWPerfDisableCustomCounterFilter);
+ SrvInitParamGetUINT32(pvParamState, HWPerfHostBufSizeInKB, psHints->ui32HWPerfHostBufSize);
+ SrvInitParamGetUINT32(pvParamState, HWPerfFWBufSizeInKB, psHints->ui32HWPerfFWBufSize);
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(LINUX)
+ /* name changes */
+ {
+ IMG_UINT64 ui64Tmp;
+ SrvInitParamGetBOOL(pvParamState, DisablePDumpPanic, psHints->bDisablePDP);
+ SrvInitParamGetUINT64(pvParamState, HWPerfFWFilter, ui64Tmp);
+ psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu);
+ psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu);
+ }
+#else
+ SrvInitParamGetBOOL(pvParamState, DisablePDP, psHints->bDisablePDP);
+ SrvInitParamGetUINT32(pvParamState, HWPerfFilter0, psHints->ui32HWPerfFilter0);
+ SrvInitParamGetUINT32(pvParamState, HWPerfFilter1, psHints->ui32HWPerfFilter1);
+ SrvInitParamUnreferenced(DisablePDumpPanic);
+ SrvInitParamUnreferenced(HWPerfFWFilter);
+ SrvInitParamUnreferenced(RGXBVNC);
+#endif
+ SrvInitParamGetUINT32(pvParamState, HWPerfHostFilter, psHints->ui32HWPerfHostFilter);
+ SrvInitParamGetUINT32List(pvParamState, TimeCorrClock, psHints->ui32TimeCorrClock);
+ SrvInitParamGetUINT32(pvParamState, HWRDebugDumpLimit, ui32ParamTemp);
+ psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL);
+
+ if(bS7TopInfra)
+ {
+ #define RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK (0XFFFFFFCFU)
+ #define RGX_CR_JONES_FIX_MT_ORDER_ISP_EN (0X00000020U)
+ #define RGX_CR_JONES_FIX_MT_ORDER_TE_EN (0X00000010U)
+
+ SrvInitParamGetUINT32(pvParamState, JonesDisableMask, ui32ParamTemp);
+ if (((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_ISP_EN) ||
+ ((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_TE_EN))
+ {
+ ui32ParamTemp |= (RGX_CR_JONES_FIX_MT_ORDER_TE_EN |
+ RGX_CR_JONES_FIX_MT_ORDER_ISP_EN);
+ PVR_DPF((PVR_DBG_WARNING, "Tile reordering mode requires both TE and ISP enabled. Forcing JonesDisableMask = %d",
+ ui32ParamTemp));
+ }
+ psHints->ui32JonesDisableMask = ui32ParamTemp;
+ }
+
+ if ( (bE42290) && (bTPUFiltermodeCtrl))
+ {
+ SrvInitParamGetBOOL(pvParamState, NewFilteringMode, psHints->bFilteringMode);
+ }
+
+ if(bE41805 || bE42606)
+ {
+ SrvInitParamGetUINT32(pvParamState, TruncateMode, psHints->ui32TruncateMode);
+ }
+#if defined(EMULATOR)
+ if(bAXIACELite)
+ {
+ SrvInitParamGetBOOL(pvParamState, EnableTrustedDeviceAceConfig, psHints->bEnableTrustedDeviceAceConfig);
+ }
+#else
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ SrvInitParamUnreferenced(EnableTrustedDeviceAceConfig);
+#endif
+#endif
+
+ SrvInitParamGetUINT32(pvParamState, UseMETAT1, ui32ParamTemp);
+ psHints->eUseMETAT1 = ui32ParamTemp & RGXFWIF_INICFG_METAT1_MASK;
+
+ SrvInitParamGetBOOL(pvParamState, ZeroFreelist, psHints->bZeroFreelist);
+
+
+ /*
+ * HWPerf filter apphints setup
+ */
+ if (psHints->bEnableHWPerf)
+ {
+ if (psHints->ui32HWPerfFilter0 == 0 && psHints->ui32HWPerfFilter1 == 0)
+ {
+ psHints->ui32HWPerfFilter0 = HW_PERF_FILTER_DEFAULT_ALL_ON;
+ psHints->ui32HWPerfFilter1 = HW_PERF_FILTER_DEFAULT_ALL_ON;
+ }
+ }
+ else
+ {
+ if (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0)
+ {
+ psHints->bEnableHWPerf = IMG_TRUE;
+ }
+ }
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ if (psHints->bEnableFTrace)
+ {
+ /* In case we have not set EnableHWPerf AppHint just request creation
+ * of certain events we need for the FTrace i.e. only the Kick/Finish
+ * HW events */
+ if (!psHints->bEnableHWPerf)
+ {
+ psHints->ui32HWPerfFilter0 = (IMG_UINT32) (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH & 0xFFFFFFFF);
+ psHints->ui32HWPerfFilter1 = (IMG_UINT32) ((RGX_HWPERF_EVENT_MASK_HW_KICKFINISH & 0xFFFFFFFF00000000) >> 32);
+ }
+ else
+ {
+ psHints->ui32HWPerfFilter0 = HW_PERF_FILTER_DEFAULT_ALL_ON;
+ psHints->ui32HWPerfFilter1 = HW_PERF_FILTER_DEFAULT_ALL_ON;
+ }
+
+ }
+#endif
+
+ if (psHints->bEnableHWPerfHost)
+ {
+ if (psHints->ui32HWPerfHostFilter == 0)
+ {
+ psHints->ui32HWPerfHostFilter = HW_PERF_FILTER_DEFAULT_ALL_ON;
+ }
+ }
+ else
+ {
+ if (psHints->ui32HWPerfHostFilter != 0)
+ {
+ psHints->bEnableHWPerfHost = IMG_TRUE;
+ }
+ }
+
+ /*
+ * FW logs apphints
+ */
+ {
+ IMG_UINT32 ui32LogType;
+ IMG_BOOL bFirmwareLogTypeConfigured, bAnyLogGroupConfigured;
+
+ SrvInitParamGetUINT32BitField(pvParamState, EnableLogGroup, ui32LogType);
+ bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE;
+ bFirmwareLogTypeConfigured = SrvInitParamGetUINT32List(pvParamState, FirmwareLogType, ui32ParamTemp);
+
+ if (bFirmwareLogTypeConfigured)
+ {
+ if (ui32ParamTemp == 2 /* TRACE */)
+ {
+ if (!bAnyLogGroupConfigured)
+ {
+ /* No groups configured - defaulting to MAIN group */
+ ui32LogType |= RGXFWIF_LOG_TYPE_GROUP_MAIN;
+ }
+ ui32LogType |= RGXFWIF_LOG_TYPE_TRACE;
+ }
+ else if (ui32ParamTemp == 1 /* TBI */)
+ {
+ if (!bAnyLogGroupConfigured)
+ {
+ /* No groups configured - defaulting to MAIN group */
+ ui32LogType |= RGXFWIF_LOG_TYPE_GROUP_MAIN;
+ }
+ ui32LogType &= ~RGXFWIF_LOG_TYPE_TRACE;
+ }
+ else if (ui32ParamTemp == 0 /* NONE */)
+ {
+ ui32LogType = RGXFWIF_LOG_TYPE_NONE;
+ }
+ }
+ else
+ {
+ /* No log type configured - defaulting to TRACE */
+ ui32LogType |= RGXFWIF_LOG_TYPE_TRACE;
+ }
+
+ psHints->ui32LogType = ui32LogType;
+ }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ /*
+ * GPU virtualisation validation apphints
+ */
+ {
+ IMG_UINT uiCounter, uiRegion;
+
+ PVR_DPF((PVR_DBG_MESSAGE,"\n[GPU Virtualization Validation]: Reading OSid limits\n"));
+
+ for (uiRegion = 0; uiRegion < GPUVIRT_VALIDATION_NUM_REGIONS; uiRegion++)
+ {
+ for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+ {
+ IMG_CHAR pszHintString[GPUVIRT_VALIDATION_MAX_STRING_LENGTH];
+ IMG_UINT32 ui32Default = 0;
+
+ snprintf(pszHintString, GPUVIRT_VALIDATION_MAX_STRING_LENGTH, "OSidRegion%dMin%d", uiRegion, uiCounter);
+ PVRSRVGetAppHint(pvParamState,
+ pszHintString,
+ IMG_UINT_TYPE,
+ &ui32Default,
+ &(psHints->aui32OSidMin[uiCounter][uiRegion]));
+
+ snprintf(pszHintString, GPUVIRT_VALIDATION_MAX_STRING_LENGTH, "OSidRegion%dMax%d", uiRegion, uiCounter);
+ PVRSRVGetAppHint(pvParamState,
+ pszHintString,
+ IMG_UINT_TYPE,
+ &ui32Default,
+ &(psHints->aui32OSidMax[uiCounter][uiRegion]));
+ }
+ }
+
+ for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+ {
+ for (uiRegion = 0; uiRegion < GPUVIRT_VALIDATION_NUM_REGIONS; uiRegion++)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "\n[GPU Virtualization Validation]: Region:%d, OSid:%d, Min:%u, Max:%u\n",
+ uiRegion, uiCounter,
+ psHints->aui32OSidMin[uiCounter][uiRegion],
+ psHints->aui32OSidMax[uiCounter][uiRegion]));
+ }
+ }
+ }
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+
+ SrvInitParamClose(pvParamState);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function GetFWConfigFlags
+
+ @Description Initialise and return FW config flags
+
+ @Input psHints : Apphints container
+ @Input pui32FWConfigFlags : Pointer to config flags
+
+ @Return void
+
+******************************************************************************/
+static INLINE void GetFWConfigFlags(RGX_SRVINIT_APPHINTS *psHints,
+ IMG_UINT32 *pui32FWConfigFlags)
+{
+ IMG_UINT32 ui32FWConfigFlags = 0;
+
+#if defined(DEBUG)
+ ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0;
+ ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0;
+#endif
+ ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0;
+ ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0;
+ ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0;
+ ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_SRVCFG_DISABLE_PDP_EN : 0;
+ ui32FWConfigFlags |= psHints->bEnableCDMKillRand ? RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN : 0;
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ /* Since FTrace GPU events depends on HWPerf, ensure it is enabled here */
+ ui32FWConfigFlags |= psHints->bEnableFTrace ? RGXFWIF_INICFG_HWPERF_EN : 0;
+#endif
+ ui32FWConfigFlags |= psHints->bEnableHWPerf ? RGXFWIF_INICFG_HWPERF_EN : 0;
+#if !defined(NO_HARDWARE)
+ ui32FWConfigFlags |= psHints->bEnableHWR ? RGXFWIF_INICFG_HWR_EN : 0;
+#endif
+ ui32FWConfigFlags |= psHints->bEnableRTUBypass ? RGXFWIF_INICFG_RTU_BYPASS_EN : 0;
+ ui32FWConfigFlags |= psHints->bHWPerfDisableCustomCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0;
+ ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_CUSTOM_TIMER) ? RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN : 0;
+ ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_POLLS) ? RGXFWIF_INICFG_POLL_COUNTERS_EN : 0;
+ ui32FWConfigFlags |= psHints->eUseMETAT1 << RGXFWIF_INICFG_METAT1_SHIFT;
+ ui32FWConfigFlags |= psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_CTXSWITCH_CLRMSK;
+ ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK;
+
+ *pui32FWConfigFlags = ui32FWConfigFlags;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function GetFilterFlags
+
+ @Description Initialise and return filter flags
+
+ @Input psHints : Apphints container
+
+ @Return Filter flags
+
+******************************************************************************/
+static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints)
+{
+ IMG_UINT32 ui32FilterFlags = 0;
+
+ ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0;
+ if (psHints->ui32TruncateMode == 2)
+ {
+ ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT;
+ }
+ else if (psHints->ui32TruncateMode == 3)
+ {
+ ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF;
+ }
+
+ return ui32FilterFlags;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function GetDeviceFlags
+
+ @Description Initialise and return device flags
+
+ @Input psHints : Apphints container
+ @Input pui32DeviceFlags : Pointer to device flags
+
+ @Return void
+
+******************************************************************************/
+static INLINE void GetDeviceFlags(RGX_SRVINIT_APPHINTS *psHints,
+ IMG_UINT32 *pui32DeviceFlags)
+{
+ IMG_UINT32 ui32DeviceFlags = 0;
+
+ ui32DeviceFlags |= psHints->bDustRequestInject? RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN : 0;
+
+ ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKMIF_DEVICE_STATE_ZERO_FREELIST : 0;
+ ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0;
+ ui32DeviceFlags |= psHints->bEnableHWPerfHost ? RGXKMIF_DEVICE_STATE_HWPERF_HOST_EN : 0;
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ ui32DeviceFlags |= psHints->bEnableFTrace ? RGXKMIF_DEVICE_STATE_FTRACE_EN : 0;
+#endif
+
+ *pui32DeviceFlags = ui32DeviceFlags;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function PrepareDebugScript
+
+ @Description Generates a script to dump debug info
+
+ @Input psScript
+
+ @Return IMG_BOOL True if it runs out of cmds when building the script
+
+******************************************************************************/
+static IMG_BOOL PrepareDebugScript(RGX_SCRIPT_BUILD* psDbgInitScript,
+ IMG_BOOL bFirmwarePerf,
+ void *pvDeviceInfo)
+{
+#define DBG_READ(T, R, S) if (!ScriptDBGReadRGXReg(psDbgInitScript, T, R, S)) return IMG_FALSE;
+#if defined(RGX_FEATURE_META) || defined(SUPPORT_KERNEL_SRVINIT)
+#define DBG_MSP_READ(R, S) if (!ScriptDBGReadMetaRegThroughSP(psDbgInitScript, R, S)) return IMG_FALSE;
+#define DBG_MCR_READ(R, S) if (!ScriptDBGReadMetaCoreReg(psDbgInitScript, R, S)) return IMG_FALSE;
+#else
+#define DBG_MSP_READ(R, S)
+#define DBG_MCR_READ(R, S)
+#endif
+#define DBG_CALC(R, S, T, U, V) if (!ScriptDBGCalc(psDbgInitScript, R, S, T, U, V)) return IMG_FALSE;
+#define DBG_STRING(S) if (!ScriptDBGString(psDbgInitScript, S)) return IMG_FALSE;
+#define DBG_READ32(R, S) DBG_READ(RGX_INIT_OP_DBG_READ32_HW_REG, R, S)
+#define DBG_READ64(R, S) DBG_READ(RGX_INIT_OP_DBG_READ64_HW_REG, R, S)
+#define DBG_CALC_TA_AND_3D(R, S, T, U) DBG_CALC(RGX_INIT_OP_DBG_CALC, R, S, T, U)
+ IMG_BOOL bS7Infra, bXTInfra, e44871, bRayTracing, e47025, bVIVTSlc, bMIPS, bPBVNC;
+ IMG_UINT32 ui32SLCBanks = 0, ui32Meta = 0;
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ PVR_UNREFERENCED_PARAMETER(pvDeviceInfo);
+#else
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDeviceInfo;
+#endif
+ PVR_UNREFERENCED_PARAMETER(bFirmwarePerf);
+ bS7Infra = bXTInfra = e44871 = bRayTracing = e47025 = bVIVTSlc = bMIPS = bPBVNC = IMG_FALSE;
+
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ #if defined(RGX_FEATURE_META)
+ ui32Meta = RGX_FEATURE_META;
+ #endif
+ #if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+ bS7Infra = IMG_TRUE;
+ #endif
+
+ #if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+ bXTInfra = IMG_TRUE;
+ #endif
+
+ #if defined(FIX_HW_BRN_44871)
+ e44871 = IMG_TRUE;
+ #endif
+
+ #if defined(HW_ERN_47025)
+ e47025 = IMG_TRUE;
+ #endif
+
+ #if defined(RGX_FEATURE_RAY_TRACING)
+ bRayTracing = IMG_TRUE;
+ #endif
+
+ #if defined(RGX_FEATURE_SLC_BANKS)
+ ui32SLCBanks = RGX_FEATURE_SLC_BANKS;
+ #endif
+
+ #if defined(RGX_FEATURE_SLC_VIVT)
+ bVIVTSlc = IMG_TRUE;
+ #endif
+
+ #if defined(RGX_FEATURE_MIPS)
+ bMIPS = IMG_TRUE;
+ #endif
+ #if defined(RGX_FEATURE_PBVNC_COREID_REG)
+ bPBVNC = IMG_TRUE;
+ #endif
+#else
+ do{
+ if(NULL == psDevInfo)
+ break;
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK)
+ {
+ bPBVNC = IMG_TRUE;
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui32META)
+ {
+ ui32Meta = psDevInfo->sDevFeatureCfg.ui32META;
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+ bS7Infra = IMG_TRUE;
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK)
+ {
+ bXTInfra = IMG_TRUE;
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ bRayTracing = IMG_TRUE;
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SLC_VIVT_BIT_MASK)
+ {
+ bVIVTSlc = IMG_TRUE;
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ bMIPS = IMG_TRUE;
+ }
+
+
+ if(psDevInfo->sDevFeatureCfg.ui32SLCBanks)
+ {
+ ui32SLCBanks = psDevInfo->sDevFeatureCfg.ui32SLCBanks;
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_44871_BIT_MASK)
+ {
+ e44871 = IMG_TRUE;
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_47025_POS)
+ {
+ e47025 = IMG_TRUE;
+ }
+
+ }while(0);
+#endif
+
+ if(bPBVNC)
+ {
+ DBG_READ64(RGX_CR_CORE_ID, "CORE_ID ");
+ }else
+ {
+ DBG_READ32(RGX_CR_CORE_ID, "CORE_ID ");
+ }
+
+ DBG_READ32(RGX_CR_CORE_REVISION, "CORE_REVISION ");
+ DBG_READ32(RGX_CR_DESIGNER_REV_FIELD1, "DESIGNER_REV_FIELD1 ");
+ DBG_READ32(RGX_CR_DESIGNER_REV_FIELD2, "DESIGNER_REV_FIELD2 ");
+ DBG_READ64(RGX_CR_CHANGESET_NUMBER, "CHANGESET_NUMBER ");
+ if(ui32Meta)
+ {
+ DBG_READ32(RGX_CR_META_SP_MSLVIRQSTATUS, "META_SP_MSLVIRQSTATUS ");
+ }
+ DBG_READ64(RGX_CR_CLK_CTRL, "CLK_CTRL ");
+ DBG_READ64(RGX_CR_CLK_STATUS, "CLK_STATUS ");
+ DBG_READ64(RGX_CR_CLK_CTRL2, "CLK_CTRL2 ");
+ DBG_READ64(RGX_CR_CLK_STATUS2, "CLK_STATUS2 ");
+ if (bS7Infra)
+ {
+ DBG_READ64(RGX_CR_CLK_XTPLUS_CTRL, "CLK_XTPLUS_CTRL ");
+ DBG_READ64(RGX_CR_CLK_XTPLUS_STATUS, "CLK_XTPLUS_STATUS ");
+ }
+ DBG_READ32(RGX_CR_EVENT_STATUS, "EVENT_STATUS ");
+ DBG_READ64(RGX_CR_TIMER, "TIMER ");
+ if (bS7Infra)
+ {
+ DBG_READ64(RGX_CR_MMU_FAULT_STATUS, "MMU_FAULT_STATUS ");
+ DBG_READ64(RGX_CR_MMU_FAULT_STATUS_META, "MMU_FAULT_STATUS_META ");
+ }
+ else
+ {
+ DBG_READ32(RGX_CR_BIF_FAULT_BANK0_MMU_STATUS, "BIF_FAULT_BANK0_MMU_STATUS ");
+ DBG_READ64(RGX_CR_BIF_FAULT_BANK0_REQ_STATUS, "BIF_FAULT_BANK0_REQ_STATUS ");
+ DBG_READ32(RGX_CR_BIF_FAULT_BANK1_MMU_STATUS, "BIF_FAULT_BANK1_MMU_STATUS ");
+ DBG_READ64(RGX_CR_BIF_FAULT_BANK1_REQ_STATUS, "BIF_FAULT_BANK1_REQ_STATUS ");
+ }
+
+ DBG_READ32(RGX_CR_BIF_MMU_STATUS, "BIF_MMU_STATUS ");
+ DBG_READ32(RGX_CR_BIF_MMU_ENTRY, "BIF_MMU_ENTRY ");
+ DBG_READ64(RGX_CR_BIF_MMU_ENTRY_STATUS, "BIF_MMU_ENTRY_STATUS ");
+ if (bS7Infra)
+ {
+ DBG_READ32(RGX_CR_BIF_JONES_OUTSTANDING_READ, "BIF_JONES_OUTSTANDING_READ ");
+ DBG_READ32(RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ, "BIF_BLACKPEARL_OUTSTANDING_READ ");
+ DBG_READ32(RGX_CR_BIF_DUST_OUTSTANDING_READ, "BIF_DUST_OUTSTANDING_READ ");
+ }else
+ {
+
+ if (!bXTInfra)
+ {
+ DBG_READ32(RGX_CR_BIF_STATUS_MMU, "BIF_STATUS_MMU ");
+ DBG_READ32(RGX_CR_BIF_READS_EXT_STATUS, "BIF_READS_EXT_STATUS ");
+ DBG_READ32(RGX_CR_BIF_READS_INT_STATUS, "BIF_READS_INT_STATUS ");
+ }
+ DBG_READ32(RGX_CR_BIFPM_STATUS_MMU, "BIFPM_STATUS_MMU ");
+ DBG_READ32(RGX_CR_BIFPM_READS_EXT_STATUS, "BIFPM_READS_EXT_STATUS ");
+ DBG_READ32(RGX_CR_BIFPM_READS_INT_STATUS, "BIFPM_READS_INT_STATUS ");
+ }
+
+ if(e44871)
+ {
+ DBG_STRING("Warning: BRN44871 is present");
+ }
+
+ if(e47025)
+ {
+ DBG_READ64(RGX_CR_CDM_CONTEXT_LOAD_PDS0, "CDM_CONTEXT_LOAD_PDS0 ");
+ DBG_READ64(RGX_CR_CDM_CONTEXT_LOAD_PDS1, "CDM_CONTEXT_LOAD_PDS1 ");
+ }
+
+ DBG_READ32(RGX_CR_SLC_STATUS0, "SLC_STATUS0 ");
+ DBG_READ64(RGX_CR_SLC_STATUS1, "SLC_STATUS1 ");
+
+ if (ui32SLCBanks)
+ {
+ DBG_READ64(RGX_CR_SLC_STATUS2, "SLC_STATUS2 ");
+ }
+
+ if (bVIVTSlc)
+ {
+ DBG_READ64(RGX_CR_CONTEXT_MAPPING0, "CONTEXT_MAPPING0 ");
+ DBG_READ64(RGX_CR_CONTEXT_MAPPING1, "CONTEXT_MAPPING1 ");
+ DBG_READ64(RGX_CR_CONTEXT_MAPPING2, "CONTEXT_MAPPING2 ");
+ DBG_READ64(RGX_CR_CONTEXT_MAPPING3, "CONTEXT_MAPPING3 ");
+ DBG_READ64(RGX_CR_CONTEXT_MAPPING4, "CONTEXT_MAPPING4 ");
+ }else{
+ DBG_READ64(RGX_CR_BIF_CAT_BASE_INDEX, "BIF_CAT_BASE_INDEX ");
+ DBG_READ64(RGX_CR_BIF_CAT_BASE0, "BIF_CAT_BASE0 ");
+ DBG_READ64(RGX_CR_BIF_CAT_BASE1, "BIF_CAT_BASE1 ");
+ DBG_READ64(RGX_CR_BIF_CAT_BASE2, "BIF_CAT_BASE2 ");
+ DBG_READ64(RGX_CR_BIF_CAT_BASE3, "BIF_CAT_BASE3 ");
+ DBG_READ64(RGX_CR_BIF_CAT_BASE4, "BIF_CAT_BASE4 ");
+ DBG_READ64(RGX_CR_BIF_CAT_BASE5, "BIF_CAT_BASE5 ");
+ DBG_READ64(RGX_CR_BIF_CAT_BASE6, "BIF_CAT_BASE6 ");
+ DBG_READ64(RGX_CR_BIF_CAT_BASE7, "BIF_CAT_BASE7 ");
+ }
+
+ DBG_READ32(RGX_CR_BIF_CTRL_INVAL, "BIF_CTRL_INVAL ");
+ DBG_READ32(RGX_CR_BIF_CTRL, "BIF_CTRL ");
+
+ DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_VCE0, "BIF_PM_CAT_BASE_VCE0 ");
+ DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_TE0, "BIF_PM_CAT_BASE_TE0 ");
+ DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_ALIST0, "BIF_PM_CAT_BASE_ALIST0 ");
+ DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_VCE1, "BIF_PM_CAT_BASE_VCE1 ");
+ DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_TE1, "BIF_PM_CAT_BASE_TE1 ");
+ DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_ALIST1, "BIF_PM_CAT_BASE_ALIST1 ");
+
+ DBG_READ32(RGX_CR_PERF_TA_PHASE, "PERF_TA_PHASE ");
+ DBG_READ32(RGX_CR_PERF_TA_CYCLE, "PERF_TA_CYCLE ");
+ DBG_READ32(RGX_CR_PERF_3D_PHASE, "PERF_3D_PHASE ");
+ DBG_READ32(RGX_CR_PERF_3D_CYCLE, "PERF_3D_CYCLE ");
+
+ DBG_READ32(RGX_CR_PERF_TA_OR_3D_CYCLE, "PERF_TA_OR_3D_CYCLE ");
+ DBG_CALC_TA_AND_3D(RGX_CR_PERF_TA_CYCLE, RGX_CR_PERF_3D_CYCLE, RGX_CR_PERF_TA_OR_3D_CYCLE,
+ "PERF_TA_AND_3D_CYCLE ");
+
+ DBG_READ32(RGX_CR_PERF_COMPUTE_PHASE, "PERF_COMPUTE_PHASE ");
+ DBG_READ32(RGX_CR_PERF_COMPUTE_CYCLE, "PERF_COMPUTE_CYCLE ");
+
+ DBG_READ32(RGX_CR_PM_PARTIAL_RENDER_ENABLE, "PARTIAL_RENDER_ENABLE ");
+
+ DBG_READ32(RGX_CR_ISP_RENDER, "ISP_RENDER ");
+ DBG_READ64(RGX_CR_TLA_STATUS, "TLA_STATUS ");
+ DBG_READ64(RGX_CR_MCU_FENCE, "MCU_FENCE ");
+
+ DBG_READ32(RGX_CR_VDM_CONTEXT_STORE_STATUS, "VDM_CONTEXT_STORE_STATUS ");
+ DBG_READ64(RGX_CR_VDM_CONTEXT_STORE_TASK0, "VDM_CONTEXT_STORE_TASK0 ");
+ DBG_READ64(RGX_CR_VDM_CONTEXT_STORE_TASK1, "VDM_CONTEXT_STORE_TASK1 ");
+ DBG_READ64(RGX_CR_VDM_CONTEXT_STORE_TASK2, "VDM_CONTEXT_STORE_TASK2 ");
+ DBG_READ64(RGX_CR_VDM_CONTEXT_RESUME_TASK0, "VDM_CONTEXT_RESUME_TASK0 ");
+ DBG_READ64(RGX_CR_VDM_CONTEXT_RESUME_TASK1, "VDM_CONTEXT_RESUME_TASK1 ");
+ DBG_READ64(RGX_CR_VDM_CONTEXT_RESUME_TASK2, "VDM_CONTEXT_RESUME_TASK2 ");
+
+ DBG_READ32(RGX_CR_ISP_CTL, "ISP_CTL ");
+ DBG_READ32(RGX_CR_ISP_STATUS, "ISP_STATUS ");
+ DBG_READ32(RGX_CR_MTS_INTCTX, "MTS_INTCTX ");
+ DBG_READ32(RGX_CR_MTS_BGCTX, "MTS_BGCTX ");
+ DBG_READ32(RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE, "MTS_BGCTX_COUNTED_SCHEDULE ");
+ DBG_READ32(RGX_CR_MTS_SCHEDULE, "MTS_SCHEDULE ");
+ DBG_READ32(RGX_CR_MTS_GPU_INT_STATUS, "MTS_GPU_INT_STATUS ");
+
+ DBG_READ32(RGX_CR_CDM_CONTEXT_STORE_STATUS, "CDM_CONTEXT_STORE_STATUS ");
+ DBG_READ64(RGX_CR_CDM_CONTEXT_PDS0, "CDM_CONTEXT_PDS0 ");
+ DBG_READ64(RGX_CR_CDM_CONTEXT_PDS1, "CDM_CONTEXT_PDS1 ");
+ DBG_READ64(RGX_CR_CDM_TERMINATE_PDS, "CDM_TERMINATE_PDS ");
+ DBG_READ64(RGX_CR_CDM_TERMINATE_PDS1, "CDM_TERMINATE_PDS1 ");
+
+ if(e47025)
+ {
+ DBG_READ64(RGX_CR_CDM_CONTEXT_LOAD_PDS0, "CDM_CONTEXT_LOAD_PDS0 ");
+ DBG_READ64(RGX_CR_CDM_CONTEXT_LOAD_PDS1, "CDM_CONTEXT_LOAD_PDS1 ");
+ }
+
+ if(bRayTracing)
+ {
+#if defined(RGX_FEATURE_RAY_TRACING) || defined(SUPPORT_KERNEL_SRVINIT)
+ DBG_READ32(DPX_CR_BIF_MMU_STATUS, "DPX_CR_BIF_MMU_STATUS ");
+ DBG_READ64(DPX_CR_BIF_FAULT_BANK_MMU_STATUS, "DPX_CR_BIF_FAULT_BANK_MMU_STATUS");
+ DBG_READ64(DPX_CR_BIF_FAULT_BANK_REQ_STATUS, "DPX_CR_BIF_FAULT_BANK_REQ_STATUS");
+
+ DBG_READ64(RGX_CR_RPM_SHF_FPL, "RGX_CR_RPM_SHF_FPL ");
+ DBG_READ32(RGX_CR_RPM_SHF_FPL_READ, "RGX_CR_RPM_SHF_FPL_READ ");
+ DBG_READ32(RGX_CR_RPM_SHF_FPL_WRITE, "RGX_CR_RPM_SHF_FPL_WRITE ");
+ DBG_READ64(RGX_CR_RPM_SHG_FPL, "RGX_CR_RPM_SHG_FPL ");
+ DBG_READ32(RGX_CR_RPM_SHG_FPL_READ, "RGX_CR_RPM_SHG_FPL_READ ");
+ DBG_READ32(RGX_CR_RPM_SHG_FPL_WRITE, "RGX_CR_RPM_SHG_FPL_WRITE ");
+#endif
+ }
+
+ if (bS7Infra)
+ {
+ DBG_READ32(RGX_CR_JONES_IDLE, "JONES_IDLE ");
+ }
+
+ DBG_READ32(RGX_CR_SIDEKICK_IDLE, "SIDEKICK_IDLE ");
+ if (!bS7Infra)
+ {
+ DBG_READ32(RGX_CR_SLC_IDLE, "SLC_IDLE ");
+ }else
+ {
+ DBG_READ32(RGX_CR_SLC3_IDLE, "SLC3_IDLE ");
+ DBG_READ64(RGX_CR_SLC3_STATUS, "SLC3_STATUS ");
+ DBG_READ32(RGX_CR_SLC3_FAULT_STOP_STATUS, "SLC3_FAULT_STOP_STATUS ");
+ }
+
+ if (ui32Meta)
+ {
+ DBG_MSP_READ(META_CR_T0ENABLE_OFFSET, "T0 TXENABLE ");
+ DBG_MSP_READ(META_CR_T0STATUS_OFFSET, "T0 TXSTATUS ");
+ DBG_MSP_READ(META_CR_T0DEFR_OFFSET, "T0 TXDEFR ");
+ DBG_MCR_READ(META_CR_THR0_PC, "T0 PC ");
+ DBG_MCR_READ(META_CR_THR0_PCX, "T0 PCX ");
+ DBG_MCR_READ(META_CR_THR0_SP, "T0 SP ");
+ }
+
+ if ((ui32Meta == MTP218) || (ui32Meta == MTP219))
+ {
+ DBG_MSP_READ(META_CR_T1ENABLE_OFFSET, "T1 TXENABLE ");
+ DBG_MSP_READ(META_CR_T1STATUS_OFFSET, "T1 TXSTATUS ");
+ DBG_MSP_READ(META_CR_T1DEFR_OFFSET, "T1 TXDEFR ");
+ DBG_MCR_READ(META_CR_THR1_PC, "T1 PC ");
+ DBG_MCR_READ(META_CR_THR1_PCX, "T1 PCX ");
+ DBG_MCR_READ(META_CR_THR1_SP, "T1 SP ");
+ }
+
+ if (bFirmwarePerf)
+ {
+ DBG_MSP_READ(META_CR_PERF_COUNT0, "PERF_COUNT0 ");
+ DBG_MSP_READ(META_CR_PERF_COUNT1, "PERF_COUNT1 ");
+ }
+
+ if (bMIPS)
+ {
+ DBG_READ32(RGX_CR_MIPS_EXCEPTION_STATUS, "MIPS_EXCEPTION_STATUS ");
+ }
+
+ return IMG_TRUE;
+}
+
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+/*************************************************************************/ /*!
+ @Function RGXTDProcessFWImage
+
+ @Description Fetch and send data used by the trusted device to complete
+ the FW image setup
+
+ @Input psDeviceNode - Device node
+ @Input psRGXFW - Firmware blob
+
+ @Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode,
+ struct RGXFW *psRGXFW)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_TD_FW_PARAMS sTDFWParams;
+ PVRSRV_ERROR eError;
+
+ if (psDevConfig->pfnTDSendFWImage == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXTDProcessFWImage: TDProcessFWImage not implemented!"));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ sTDFWParams.pvFirmware = RGXFirmwareData(psRGXFW);
+ sTDFWParams.ui32FirmwareSize = RGXFirmwareSize(psRGXFW);
+ sTDFWParams.sFWCodeDevVAddrBase = psDevInfo->sFWCodeDevVAddrBase;
+ sTDFWParams.sFWDataDevVAddrBase = psDevInfo->sFWDataDevVAddrBase;
+ sTDFWParams.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr;
+ sTDFWParams.sFWInitFWAddr = psDevInfo->sFWInitFWAddr;
+
+ eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams);
+
+ return eError;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function AcquireHostData
+
+ @Description Acquire Device MemDesc and CPU pointer for a given PMR
+
+ @Input hServices : Services connection
+ @Input hPMR : PMR
+ @Output ppsHostMemDesc : Returned MemDesc
+ @Output ppvHostAddr : Returned CPU pointer
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static INLINE
+PVRSRV_ERROR AcquireHostData(SHARED_DEV_CONNECTION hServices,
+ IMG_HANDLE hPMR,
+ DEVMEM_MEMDESC **ppsHostMemDesc,
+ void **ppvHostAddr)
+{
+ IMG_HANDLE hImportHandle;
+ IMG_DEVMEM_SIZE_T uiImportSize;
+ PVRSRV_ERROR eError;
+
+ eError = DevmemMakeLocalImportHandle(hServices,
+ hPMR,
+ &hImportHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AcquireHostData: DevmemMakeLocalImportHandle failed (%d)", eError));
+ goto acquire_failmakehandle;
+ }
+
+ eError = DevmemLocalImport(hServices,
+ hImportHandle,
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE,
+ ppsHostMemDesc,
+ &uiImportSize,
+ "AcquireHostData");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AcquireHostData: DevmemLocalImport failed (%d)", eError));
+ goto acquire_failimport;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(*ppsHostMemDesc,
+ ppvHostAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AcquireHostData: DevmemAcquireCpuVirtAddr failed (%d)", eError));
+ goto acquire_failcpuaddr;
+ }
+
+ /* We don't need the import handle anymore */
+ DevmemUnmakeLocalImportHandle(hServices, hImportHandle);
+
+ return PVRSRV_OK;
+
+
+acquire_failcpuaddr:
+ DevmemFree(*ppsHostMemDesc);
+
+acquire_failimport:
+ DevmemUnmakeLocalImportHandle(hServices, hImportHandle);
+
+acquire_failmakehandle:
+ return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function ReleaseHostData
+
+ @Description Releases resources associated with a Device MemDesc
+
+ @Input psHostMemDesc : MemDesc to free
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static INLINE void ReleaseHostData(DEVMEM_MEMDESC *psHostMemDesc)
+{
+ DevmemReleaseCpuVirtAddr(psHostMemDesc);
+ DevmemFree(psHostMemDesc);
+}
+
+/*!
+*******************************************************************************
+
+ @Function GetFirmwareBVNC
+
+ @Description Retrieves FW BVNC information from binary data
+
+ @Input psRGXFW : Firmware binary handle to get BVNC from
+
+ @Output psRGXFWBVNC : structure store BVNC info
+
+ @Return IMG_TRUE upon success, IMG_FALSE otherwise
+
+******************************************************************************/
+static INLINE IMG_BOOL GetFirmwareBVNC(struct RGXFW *psRGXFW,
+ RGXFWIF_COMPCHECKS_BVNC *psFWBVNC)
+{
+#if defined(LINUX)
+ const size_t FWSize = RGXFirmwareSize(psRGXFW);
+ const RGXFWIF_COMPCHECKS_BVNC * psBinBVNC;
+#endif
+
+#if !defined(LINUX)
+ /* Check not available in non linux OSes. Just fill the struct and return true */
+ psFWBVNC->ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION;
+ psFWBVNC->ui32VLenMax = RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX;
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ rgx_bvnc_packed(&psFWBVNC->ui64BNC, psFWBVNC->aszV, psFWBVNC->ui32VLenMax,
+ RGX_BNC_B, RGX_BVNC_V_ST, RGX_BNC_N, RGX_BNC_C);
+#else
+ rgx_bvnc_packed(&psFWBVNC->ui64BNC, psFWBVNC->aszV, psFWBVNC->ui32VLenMax,
+ RGX_BNC_KM_B, RGX_BVNC_KM_V_ST, RGX_BNC_KM_N, RGX_BNC_KM_C);
+#endif /* SUPPORT_KERNEL_SRVINIT */
+
+#else
+
+ if (FWSize < FW_BVNC_BACKWARDS_OFFSET)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware is too small (%zu bytes)",
+ __func__, FWSize));
+ return IMG_FALSE;
+ }
+
+ psBinBVNC = (RGXFWIF_COMPCHECKS_BVNC *) ((IMG_UINT8 *) (RGXFirmwareData(psRGXFW)) +
+ (FWSize - FW_BVNC_BACKWARDS_OFFSET));
+
+ psFWBVNC->ui32LayoutVersion = RGX_INT32_FROM_BE(psBinBVNC->ui32LayoutVersion);
+
+ psFWBVNC->ui32VLenMax = RGX_INT32_FROM_BE(psBinBVNC->ui32VLenMax);
+
+ psFWBVNC->ui64BNC = RGX_INT64_FROM_BE(psBinBVNC->ui64BNC);
+
+ strncpy(psFWBVNC->aszV, psBinBVNC->aszV, sizeof(psFWBVNC->aszV));
+#endif /* defined(LINUX) */
+
+ return IMG_TRUE;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXInitFirmwareBridgeWrapper
+
+ @Description Calls the proper RGXInitFirmware bridge version
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static INLINE PVRSRV_ERROR RGXInitFirmwareBridgeWrapper(SHARED_DEV_CONNECTION hServices,
+ RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32HWPerfFWBufSizeKB,
+ IMG_UINT64 ui64HWPerfFilter,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32FWConfigFlags,
+ IMG_UINT32 ui32LogType,
+ IMG_UINT32 ui32FilterFlags,
+ IMG_UINT32 ui32JonesDisableMask,
+ IMG_UINT32 ui32HWRDebugDumpLimit,
+ RGXFWIF_COMPCHECKS_BVNC *psClientBVNC,
+ RGXFWIF_COMPCHECKS_BVNC *psFirmwareBVNC,
+ IMG_UINT32 ui32HWPerfCountersDataSize,
+ IMG_HANDLE *phHWPerfDataPMR,
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+ FW_PERF_CONF eFirmwarePerf)
+{
+ PVRSRV_ERROR eError;
+
+ RGX_FW_INIT_IN_PARAMS sInParams = {
+ RGXFWINITPARAMS_VERSION,
+ bEnableSignatureChecks,
+ ui32SignatureChecksBufSize,
+ ui32HWPerfFWBufSizeKB,
+ ui64HWPerfFilter,
+ ui32FWConfigFlags,
+ ui32LogType,
+ ui32FilterFlags,
+ ui32JonesDisableMask,
+ ui32HWRDebugDumpLimit,
+ { 0 },
+ { 0 },
+ ui32HWPerfCountersDataSize,
+ eRGXRDPowerIslandConf,
+ eFirmwarePerf,
+ { 0 }
+ };
+
+ memcpy(&(sInParams.sClientBVNC), psClientBVNC, sizeof (sInParams.sClientBVNC));
+ memcpy(&(sInParams.sFirmwareBVNC), psFirmwareBVNC, sizeof (sInParams.sFirmwareBVNC));
+
+
+ eError = BridgeRGXInitFirmwareExtended(hServices, ui32RGXFWAlignChecksArrLength,
+ pui32RGXFWAlignChecks, psRGXFwInit, phHWPerfDataPMR, &sInParams);
+
+ /* Error calling the bridge could be due to old KM not implementing the extended version */
+ if ((eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+ || (eError == PVRSRV_ERROR_BRIDGE_EINVAL))
+ {
+ eError = BridgeRGXInitFirmware(hServices,
+ psRGXFwInit,
+ bEnableSignatureChecks,
+ ui32SignatureChecksBufSize,
+ ui32HWPerfFWBufSizeKB,
+ ui64HWPerfFilter,
+ ui32RGXFWAlignChecksArrLength,
+ pui32RGXFWAlignChecks,
+ ui32FWConfigFlags,
+ ui32LogType,
+ ui32FilterFlags,
+ ui32JonesDisableMask,
+ ui32HWRDebugDumpLimit,
+ psClientBVNC,
+ ui32HWPerfCountersDataSize,
+ phHWPerfDataPMR,
+ eRGXRDPowerIslandConf,
+ eFirmwarePerf);
+ }
+
+ return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function InitFirmware
+
+ @Description Allocate, initialise and pdump Firmware code and data memory
+
+ @Input hServices : Services connection
+ @Input psHints : Apphints
+ @Input psBVNC : Compatibility checks
+ @Output phFWCodePMR : FW code PMR handle
+ @Output phFWDataPMR : FW data PMR handle
+ @Output phFWCorememPMR : FW coremem code PMR handle
+ @Output phHWPerfDataPMR : HWPerf control PMR handle
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitFirmware(SHARED_DEV_CONNECTION hServices,
+ RGX_SRVINIT_APPHINTS *psHints,
+ RGXFWIF_COMPCHECKS_BVNC *psBVNC,
+ IMG_HANDLE *phFWCodePMR,
+ IMG_HANDLE *phFWDataPMR,
+ IMG_HANDLE *phFWCorememPMR,
+ IMG_HANDLE *phHWPerfDataPMR)
+{
+ struct RGXFW *psRGXFW = NULL;
+ const IMG_BYTE *pbRGXFirmware = NULL;
+ RGXFWIF_COMPCHECKS_BVNC sFWBVNC;
+
+ /* FW code memory */
+ IMG_DEVMEM_SIZE_T uiFWCodeAllocSize;
+ IMG_DEV_VIRTADDR sFWCodeDevVAddrBase;
+ DEVMEM_MEMDESC *psFWCodeHostMemDesc;
+ void *pvFWCodeHostAddr;
+
+ /* FW data memory */
+ IMG_DEVMEM_SIZE_T uiFWDataAllocSize;
+ IMG_DEV_VIRTADDR sFWDataDevVAddrBase;
+ DEVMEM_MEMDESC *psFWDataHostMemDesc;
+ void *pvFWDataHostAddr;
+
+ /* FW coremem code memory */
+ IMG_DEVMEM_SIZE_T uiFWCorememCodeAllocSize;
+ IMG_DEV_VIRTADDR sFWCorememDevVAddrBase;
+
+ /*
+ * Only declare psFWCorememHostMemDesc where used (PVR_UNREFERENCED_PARAMETER doesn't
+ * help for local vars when using certain compilers)
+ */
+ DEVMEM_MEMDESC *psFWCorememHostMemDesc;
+ void *pvFWCorememHostAddr = NULL;
+
+ RGXFWIF_DEV_VIRTADDR sFWCorememFWAddr; /* FW coremem data */
+ RGXFWIF_DEV_VIRTADDR sRGXFwInit; /* FW init struct */
+ RGX_INIT_LAYER_PARAMS sInitParams;
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ IMG_UINT32 aui32RGXFWAlignChecks[] = {RGXFW_ALIGN_CHECKS_INIT};
+#endif
+ IMG_UINT32 ui32FWConfigFlags;
+ PVRSRV_ERROR eError;
+ IMG_CHAR *pszFWFilename = NULL;
+ IMG_CHAR *pszFWpFilename = NULL;
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ IMG_CHAR aszFWFilenameStr[OSStringLength(RGX_FW_FILENAME)+MAX_BVNC_STRING_LEN+2];
+ IMG_CHAR aszFWpFilenameStr[OSStringLength(RGX_FW_FILENAME)+MAX_BVNC_STRING_LEN+3];
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hServices;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ pszFWFilename = &aszFWFilenameStr[0];
+ OSSNPrintf(pszFWFilename, OSStringLength(RGX_FW_FILENAME)+MAX_BVNC_STRING_LEN+2, "%s.%d.%d.%d.%d", RGX_FW_FILENAME,
+ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C);
+ pszFWpFilename = &aszFWpFilenameStr[0];
+ OSSNPrintf(pszFWpFilename, OSStringLength(RGX_FW_FILENAME)+MAX_BVNC_STRING_LEN+3, "%s.%d.%dp.%d.%d", RGX_FW_FILENAME,
+ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C);
+#endif /* defined(SUPPORT_KERNEL_SRVINIT) */
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ /*
+ * Get pointer to Firmware image
+ */
+
+ psRGXFW = RGXLoadFirmware(hServices, pszFWFilename, pszFWpFilename);
+ if (psRGXFW == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXLoadFirmware failed"));
+ eError = PVRSRV_ERROR_INIT_FAILURE;
+ goto cleanup_initfw;
+ }
+ pbRGXFirmware = RGXFirmwareData(psRGXFW);
+
+ if (!GetFirmwareBVNC(psRGXFW, &sFWBVNC))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXLoadFirmware failed to get Firmware BVNC"));
+ eError = PVRSRV_ERROR_INIT_FAILURE;
+ goto cleanup_initfw;
+ }
+
+ sInitParams.hServices = hServices;
+
+ /*
+ * Allocate Firmware memory
+ */
+
+ eError = RGXGetFWImageAllocSize(&sInitParams,
+ &uiFWCodeAllocSize,
+ &uiFWDataAllocSize,
+ &uiFWCorememCodeAllocSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXGetFWImageAllocSize failed"));
+ goto cleanup_initfw;
+ }
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Disable META core memory allocation unless the META DMA is available */
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ if (!RGXDeviceHasFeatureInit(&sInitParams, RGX_FEATURE_META_DMA_BIT_MASK))
+ {
+ uiFWCorememCodeAllocSize = 0;
+ }
+#elif !defined(RGX_FEATURE_META_DMA)
+ uiFWCorememCodeAllocSize = 0;
+#endif
+#endif
+#else
+ PVR_UNREFERENCED_PARAMETER(pszFWFilename);
+ PVR_UNREFERENCED_PARAMETER(pszFWpFilename);
+ PVR_UNREFERENCED_PARAMETER(sInitParams);
+ PVR_UNREFERENCED_PARAMETER(pbRGXFirmware);
+ uiFWCodeAllocSize = 0;
+ uiFWDataAllocSize = 0;
+ uiFWCorememCodeAllocSize = 0;
+#endif
+
+ eError = BridgeRGXInitAllocFWImgMem(hServices,
+ uiFWCodeAllocSize,
+ uiFWDataAllocSize,
+ uiFWCorememCodeAllocSize,
+ phFWCodePMR,
+ &sFWCodeDevVAddrBase,
+ phFWDataPMR,
+ &sFWDataDevVAddrBase,
+ phFWCorememPMR,
+ &sFWCorememDevVAddrBase,
+ &sFWCorememFWAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: PVRSRVRGXInitAllocFWImgMem failed (%d)", eError));
+ goto cleanup_initfw;
+ }
+
+
+ /*
+ * Setup Firmware initialisation data
+ */
+
+ GetFWConfigFlags(psHints, &ui32FWConfigFlags);
+
+ eError = RGXInitFirmwareBridgeWrapper(hServices,
+ &sRGXFwInit,
+ psHints->bEnableSignatureChecks,
+ psHints->ui32SignatureChecksBufSize,
+ psHints->ui32HWPerfFWBufSize,
+ (IMG_UINT64)psHints->ui32HWPerfFilter0 |
+ ((IMG_UINT64)psHints->ui32HWPerfFilter1 << 32),
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ 0,
+ NULL,
+#else
+ IMG_ARR_NUM_ELEMS(aui32RGXFWAlignChecks),
+ aui32RGXFWAlignChecks,
+#endif
+ ui32FWConfigFlags,
+ psHints->ui32LogType,
+ GetFilterFlags(psHints),
+ psHints->ui32JonesDisableMask,
+ psHints->ui32HWRDebugDumpLimit,
+ psBVNC,
+ &sFWBVNC,
+ sizeof(RGXFWIF_HWPERF_CTL),
+ phHWPerfDataPMR,
+ psHints->eRGXRDPowerIslandConf,
+ psHints->eFirmwarePerf);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: PVRSRVRGXInitFirmware failed (%d)", eError));
+ goto cleanup_initfw;
+ }
+#if defined(PVRSRV_GPUVIRT_GUESTDRV)
+ PVR_UNREFERENCED_PARAMETER(pvFWCorememHostAddr);
+ PVR_UNREFERENCED_PARAMETER(psFWCorememHostMemDesc);
+ PVR_UNREFERENCED_PARAMETER(pvFWDataHostAddr);
+ PVR_UNREFERENCED_PARAMETER(psFWDataHostMemDesc);
+ PVR_UNREFERENCED_PARAMETER(pvFWCodeHostAddr);
+ PVR_UNREFERENCED_PARAMETER(psFWCodeHostMemDesc);
+#else
+ /*
+ * Acquire pointers to Firmware allocations
+ */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+ eError = AcquireHostData(hServices,
+ *phFWCodePMR,
+ &psFWCodeHostMemDesc,
+ &pvFWCodeHostAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: AcquireHostData for FW code failed (%d)", eError));
+ goto release_code;
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(psFWCodeHostMemDesc);
+
+ /* We can't get a pointer to a secure FW allocation from within the DDK */
+ pvFWCodeHostAddr = NULL;
+#endif
+
+ eError = AcquireHostData(hServices,
+ *phFWDataPMR,
+ &psFWDataHostMemDesc,
+ &pvFWDataHostAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: AcquireHostData for FW data failed (%d)", eError));
+ goto release_data;
+ }
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+ if (uiFWCorememCodeAllocSize)
+ {
+ eError = AcquireHostData(hServices,
+ *phFWCorememPMR,
+ &psFWCorememHostMemDesc,
+ &pvFWCorememHostAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: AcquireHostData for FW coremem code failed (%d)", eError));
+ goto release_corememcode;
+ }
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(psFWCorememHostMemDesc);
+
+ /* We can't get a pointer to a secure FW allocation from within the DDK */
+ pvFWCorememHostAddr = NULL;
+#endif
+
+
+ /*
+ * Process the Firmware image and setup code and data segments.
+ *
+ * When the trusted device is enabled and the FW code lives
+ * in secure memory we will only setup the data segments here,
+ * while the code segments will be loaded to secure memory
+ * by the trusted device.
+ */
+
+ eError = RGXProcessFWImage(&sInitParams,
+ pbRGXFirmware,
+ pvFWCodeHostAddr,
+ pvFWDataHostAddr,
+ pvFWCorememHostAddr,
+ &sFWCodeDevVAddrBase,
+ &sFWDataDevVAddrBase,
+ &sFWCorememDevVAddrBase,
+ &sFWCorememFWAddr,
+ &sRGXFwInit,
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+ 2,
+#else
+ psHints->eUseMETAT1 == RGX_META_T1_OFF ? 1 : 2,
+#endif
+ psHints->eUseMETAT1 == RGX_META_T1_MAIN ? 1 : 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXProcessFWImage failed (%d)", eError));
+ goto release_fw_allocations;
+ }
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+ RGXTDProcessFWImage(hServices, psRGXFW);
+#endif
+
+
+ /*
+ * Perform final steps (if any) on the kernel
+ * before pdumping the Firmware allocations
+ */
+ eError = BridgeRGXInitFinaliseFWImage(hServices);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXInitFinaliseFWImage failed (%d)", eError));
+ goto release_fw_allocations;
+ }
+
+ /*
+ * PDump Firmware allocations
+ */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+ SRVINITPDumpComment(hServices, "Dump firmware code image");
+ DevmemPDumpLoadMem(psFWCodeHostMemDesc,
+ 0,
+ uiFWCodeAllocSize,
+ PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+ SRVINITPDumpComment(hServices, "Dump firmware data image");
+ DevmemPDumpLoadMem(psFWDataHostMemDesc,
+ 0,
+ uiFWDataAllocSize,
+ PDUMP_FLAGS_CONTINUOUS);
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+ if (uiFWCorememCodeAllocSize)
+ {
+ SRVINITPDumpComment(hServices, "Dump firmware coremem image");
+ DevmemPDumpLoadMem(psFWCorememHostMemDesc,
+ 0,
+ uiFWCorememCodeAllocSize,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+#endif
+
+
+ /*
+ * Release Firmware allocations and clean up
+ */
+
+release_fw_allocations:
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+release_corememcode:
+ if (uiFWCorememCodeAllocSize)
+ {
+ ReleaseHostData(psFWCorememHostMemDesc);
+ }
+#endif
+
+release_data:
+ ReleaseHostData(psFWDataHostMemDesc);
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+release_code:
+ ReleaseHostData(psFWCodeHostMemDesc);
+#endif
+#endif /* PVRSRV_GPUVIRT_GUESTDRV */
+cleanup_initfw:
+ if (psRGXFW != NULL)
+ {
+ RGXUnloadFirmware(psRGXFW);
+ }
+
+ return eError;
+}
+
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+
+ @Function InitialiseHWPerfCounters
+
+ @Description
+
+ Initialisation of hardware performance counters and dumping them out to pdump, so that they can be modified at a later point.
+
+ @Input hServices
+
+ @Input psHWPerfDataMemDesc
+
+ @Input psHWPerfInitDataInt
+
+ @Return void
+
+******************************************************************************/
+
+static void InitialiseHWPerfCounters(SHARED_DEV_CONNECTION hServices, DEVMEM_MEMDESC *psHWPerfDataMemDesc, RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt)
+{
+ RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData;
+ IMG_UINT32 ui32CntBlkModelLen;
+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel;
+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc;
+ IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx ;
+ void *pvDev = NULL; // Use SHARED_DEV_CONNECTION here?
+ RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo;
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hServices;
+ pvDev = psDeviceNode->pvDevice;
+ }
+#endif
+ ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel);
+ for(ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++)
+ {
+ /* Exit early if this core does not have any of these counter blocks
+ * due to core type/BVNC features.... */
+ psBlkTypeDesc = &asCntBlkTypeModel[ui32BlkCfgIdx];
+ if (psBlkTypeDesc->pfnIsBlkPresent(psBlkTypeDesc, pvDev, &sCntBlkRtInfo) == IMG_FALSE)
+ {
+ continue;
+ }
+
+ /* Program all counters in one block so those already on may
+ * be configured off and vice-a-versa. */
+ for (ui32BlockID = psBlkTypeDesc->uiCntBlkIdBase;
+ ui32BlockID < psBlkTypeDesc->uiCntBlkIdBase+sCntBlkRtInfo.uiNumUnits;
+ ui32BlockID++)
+ {
+
+ SRVINITPDumpComment(hServices, "Unit %d Block : %s", ui32BlockID-psBlkTypeDesc->uiCntBlkIdBase, psBlkTypeDesc->pszBlockNameComment);
+ /* Get the block configure store to update from the global store of
+ * block configuration. This is used to remember the configuration
+ * between configurations and core power on in APM */
+ psHWPerfInitBlkData = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfInitDataInt);
+ /* Assert to check for HWPerf block mis-configuration */
+ PVR_ASSERT(psHWPerfInitBlkData);
+
+ psHWPerfInitBlkData->bValid = IMG_TRUE;
+ SRVINITPDumpComment(hServices, "bValid: This specifies if the layout block is valid for the given BVNC.");
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ (size_t)&(psHWPerfInitBlkData->bValid) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->bValid,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psHWPerfInitBlkData->bEnabled = IMG_FALSE;
+ SRVINITPDumpComment(hServices, "bEnabled: Set to 0x1 if the block needs to be enabled during playback. ");
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ (size_t)&(psHWPerfInitBlkData->bEnabled) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->bEnabled,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psHWPerfInitBlkData->eBlockID = ui32BlockID;
+ SRVINITPDumpComment(hServices, "eBlockID: The Block ID for the layout block. See RGX_HWPERF_CNTBLK_ID for further information.");
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ (size_t)&(psHWPerfInitBlkData->eBlockID) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->eBlockID,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psHWPerfInitBlkData->uiCounterMask = 0x00;
+ SRVINITPDumpComment(hServices, "uiCounterMask: Bitmask for selecting the counters that need to be configured.(Bit 0 - counter0, bit 1 - counter1 and so on. ");
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ (size_t)&(psHWPerfInitBlkData->uiCounterMask) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->uiCounterMask,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ for(ui32CounterIdx = RGX_CNTBLK_COUNTER0_ID; ui32CounterIdx < psBlkTypeDesc->uiNumCounters; ui32CounterIdx++)
+ {
+ psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx] = IMG_UINT64_C(0x0000000000000000);
+
+ SRVINITPDumpComment(hServices, "%s_COUNTER_%d", psBlkTypeDesc->pszBlockNameComment,ui32CounterIdx);
+ DevmemPDumpLoadMemValue64(psHWPerfDataMemDesc,
+ (size_t)&(psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx],
+ PDUMP_FLAGS_CONTINUOUS);
+
+ }
+ }
+ }
+}
+/*!
+*******************************************************************************
+
+ @Function InitialiseCustomCounters
+
+ @Description
+
+ Initialisation of custom counters and dumping them out to pdump, so that they can be modified at a later point.
+
+ @Input hServices
+
+ @Input psHWPerfDataMemDesc
+
+ @Return void
+
+******************************************************************************/
+
+static void InitialiseCustomCounters(SHARED_DEV_CONNECTION hServices, DEVMEM_MEMDESC *psHWPerfDataMemDesc)
+{
+ IMG_UINT32 ui32CustomBlock, ui32CounterID;
+
+ SRVINITPDumpComment(hServices, "ui32SelectedCountersBlockMask - The Bitmask of the custom counters that are to be selected");
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ offsetof(RGXFWIF_HWPERF_CTL, ui32SelectedCountersBlockMask),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ for( ui32CustomBlock = 0; ui32CustomBlock < RGX_HWPERF_MAX_CUSTOM_BLKS; ui32CustomBlock++ )
+ {
+ /*
+ * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+ * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+ * "expression must have a constant value".
+ */
+ const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounters
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].ui32NumSelectedCounters);
+
+ SRVINITPDumpComment(hServices, "ui32NumSelectedCounters - The Number of counters selected for this Custom Block: %d",ui32CustomBlock );
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ uiOffsetOfCustomBlockSelectedCounters,
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ for(ui32CounterID = 0; ui32CounterID < RGX_HWPERF_MAX_CUSTOM_CNTRS; ui32CounterID++ )
+ {
+ const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounterIDs
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].aui32SelectedCountersIDs[ui32CounterID]);
+
+ SRVINITPDumpComment(hServices, "CUSTOMBLK_%d_COUNTERID_%d",ui32CustomBlock, ui32CounterID);
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ uiOffsetOfCustomBlockSelectedCounterIDs,
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+ }
+}
+
+/*!
+*******************************************************************************
+
+ @Function InitialiseAllCounters
+
+ @Description Initialise HWPerf and custom counters
+
+ @Input hServices : Services connection
+ @Input hHWPerfDataPMR : HWPerf control PMR handle
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitialiseAllCounters(SHARED_DEV_CONNECTION hServices,
+ IMG_HANDLE hHWPerfDataPMR)
+{
+ RGXFWIF_HWPERF_CTL *psHWPerfInitData;
+ DEVMEM_MEMDESC *psHWPerfDataMemDesc;
+ PVRSRV_ERROR eError;
+
+ eError = AcquireHostData(hServices,
+ hHWPerfDataPMR,
+ &psHWPerfDataMemDesc,
+ (void **)&psHWPerfInitData);
+
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt);
+ }
+
+ InitialiseHWPerfCounters(hServices, psHWPerfDataMemDesc, psHWPerfInitData);
+ InitialiseCustomCounters(hServices, psHWPerfDataMemDesc);
+
+failHWPerfCountersMemDescAqCpuVirt:
+ ReleaseHostData(psHWPerfDataMemDesc);
+
+ return eError;
+}
+#endif /* PDUMP */
+
+static void
+_ParseHTBAppHints(SHARED_DEV_CONNECTION hServices)
+{
+ PVRSRV_ERROR eError;
+ void * pvParamState = NULL;
+ IMG_UINT32 ui32LogType;
+ IMG_BOOL bAnyLogGroupConfigured;
+
+ IMG_CHAR * szBufferName = "PVRHTBuffer";
+ IMG_UINT32 ui32BufferSize;
+ HTB_OPMODE_CTRL eOpMode;
+
+ /* Services initialisation parameters */
+ pvParamState = SrvInitParamOpen();
+
+ SrvInitParamGetUINT32BitField(pvParamState, EnableHTBLogGroup, ui32LogType);
+ bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE: IMG_FALSE;
+ SrvInitParamGetUINT32List(pvParamState, HTBOperationMode, eOpMode);
+ SrvInitParamGetUINT32(pvParamState, HTBufferSize, ui32BufferSize);
+
+ eError = HTBConfigure(hServices, szBufferName, ui32BufferSize);
+ PVR_LOGG_IF_ERROR(eError, "PVRSRVHTBConfigure", cleanup);
+
+ if (bAnyLogGroupConfigured)
+ {
+ eError = HTBControl(hServices, 1, &ui32LogType, 0, 0, HTB_LOGMODE_ALLPID, eOpMode);
+ PVR_LOGG_IF_ERROR(eError, "PVRSRVHTBControl", cleanup);
+ }
+
+cleanup:
+ SrvInitParamClose(pvParamState);
+}
+
+#if defined(PDUMP) && defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__)
+static void RGXInitFWSigRegisters(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 ui32PhantomCnt = 0;
+
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+ {
+ ui32PhantomCnt = RGX_GET_NUM_PHANTOMS(psDevInfo->sDevFeatureCfg.ui32NumClusters) - 1;
+ }
+
+ /*Initialise the TA related signature registers */
+ if(0 == gui32TASigRegCount)
+ {
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK)
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVB_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT,0, ui32PhantomCnt};
+ }else
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS0_CHECKSUM, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS1_CHECKSUM, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS2_CHECKSUM, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS3_CHECKSUM, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS4_CHECKSUM, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS5_CHECKSUM, 0, 0, 0};
+ }
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK)
+ {
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK)
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PPP_CLIP_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT,0, ui32PhantomCnt};
+ }else
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PPP, 0, 0, 0};
+ }
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TE_CHECKSUM,0, 0, 0};
+ }else
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PPP_SIGNATURE, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TE_SIGNATURE, 0, 0, 0};
+ }
+
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_VCE_CHECKSUM, 0, 0, 0};
+
+ if(0 == (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PDS_PER_DUST_BIT_MASK))
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PDS_DOUTM_STM_SIGNATURE,0, 0, 0};
+ }
+ }
+
+ if(0 == gui323DSigRegCount)
+ {
+ /* List of 3D signature and checksum register addresses */
+ if(0 == (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_PDS_CHECKSUM, 0, 0, 0};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_TPF_CHECKSUM, 0, 0, 0};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE0_CHECKSUM, 0, 0, 0};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE1_CHECKSUM, 0, 0, 0};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PBE_CHECKSUM, 0, 0, 0};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_IFPU_ISP_CHECKSUM, 0, 0, 0};
+ }else
+ {
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_PDS_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, ui32PhantomCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_TPF_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, ui32PhantomCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE0_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, ui32PhantomCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE1_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, ui32PhantomCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PBE_CHECKSUM, RGX_CR_PBE_INDIRECT, 0, psDevInfo->sDevFeatureCfg.ui32NumClusters-1};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_IFPU_ISP_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, ui32PhantomCnt};
+ };
+
+ }
+
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXInit
+
+ @Description
+
+ RGX Initialisation
+
+ @Input hServices
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXInit(SHARED_DEV_CONNECTION hServices)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+
+ /* Services initialisation parameters */
+ RGX_SRVINIT_APPHINTS sApphints = {0};
+ IMG_UINT32 ui32DeviceFlags;
+ IMG_UINT64 ui64ErnsBrns = 0, ui64Features = 0;
+
+ /* Server scripts */
+ RGX_SCRIPT_BUILD sDbgInitScript = {RGX_MAX_DEBUG_COMMANDS, 0, IMG_FALSE, asDbgCommands};
+
+ /* FW allocations handles */
+ IMG_HANDLE hFWCodePMR;
+ IMG_HANDLE hFWDataPMR;
+ IMG_HANDLE hFWCorememPMR;
+
+ /* HWPerf Ctl allocation handle */
+ IMG_HANDLE hHWPerfDataPMR;
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hServices;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ IMG_CHAR sV[RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX];
+
+ OSSNPrintf(sV, sizeof(sV), "%d", psDevInfo->sDevFeatureCfg.ui32V);
+ /*
+ * FIXME:
+ * Is this check redundant for the kernel mode version of srvinit?
+ * How do we check the user mode BVNC in this case?
+ */
+ rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, psDevInfo->sDevFeatureCfg.ui32B, \
+ sV, \
+ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C);
+
+
+ ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+ ui64Features = psDevInfo->sDevFeatureCfg.ui64Features;
+#else
+ rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, RGX_BVNC_B, RGX_BVNC_V_ST, RGX_BVNC_N, RGX_BVNC_C);
+#endif
+
+ /* Services initialisation parameters */
+ _ParseHTBAppHints(hServices);
+ GetApphints(&sApphints, ui64ErnsBrns, ui64Features);
+ GetDeviceFlags(&sApphints, &ui32DeviceFlags);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ IMG_UINT uiOS, uiRegion;
+ IMG_UINT32 aui32Buffer[GPUVIRT_VALIDATION_NUM_OS * GPUVIRT_VALIDATION_NUM_REGIONS * 2]; /* The final 2 is 1 for Min and 1 for Max */
+ IMG_UINT32 ui32Counter = 0;
+
+ for (uiOS = 0; uiOS < GPUVIRT_VALIDATION_NUM_OS; uiOS++)
+ {
+ for (uiRegion = 0; uiRegion < GPUVIRT_VALIDATION_NUM_REGIONS; uiRegion++)
+ {
+ aui32Buffer[ui32Counter++] = sApphints.aui32OSidMin[uiOS][uiRegion];
+ aui32Buffer[ui32Counter++] = sApphints.aui32OSidMax[uiOS][uiRegion];
+ }
+ }
+
+ BridgeGPUVIRTPopulateLMASubArenas(hServices, ui32Counter, aui32Buffer, sApphints.bEnableTrustedDeviceAceConfig);
+}
+#endif
+
+
+ eError = InitFirmware(hServices,
+ &sApphints,
+ &sBVNC,
+ &hFWCodePMR,
+ &hFWDataPMR,
+ &hFWCorememPMR,
+ &hHWPerfDataPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXInit: InitFirmware failed (%d)", eError));
+ goto cleanup;
+ }
+
+ /*
+ * Build Debug info script
+ */
+ sDbgInitScript.psCommands = asDbgCommands;
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ if(!PrepareDebugScript(&sDbgInitScript, sApphints.eFirmwarePerf != FW_PERF_CONF_NONE, psDevInfo))
+#else
+ if(!PrepareDebugScript(&sDbgInitScript, sApphints.eFirmwarePerf != FW_PERF_CONF_NONE, NULL))
+#endif
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXInit: Run out of mem for the dbg commands"));
+ }
+
+ /* finish the script */
+ if(!ScriptHalt(&sDbgInitScript))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXInit: Run out of mem for the terminating dbg script"));
+ }
+
+#if defined(PDUMP)
+ eError = InitialiseAllCounters(hServices, hHWPerfDataPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXInit: InitialiseAllCounters failed (%d)", eError));
+ goto cleanup;
+ }
+#endif
+
+ /*
+ * Perform second stage of RGX initialisation
+ */
+ eError = BridgeRGXInitDevPart2(hServices,
+ sDbgInitScript.psCommands,
+ ui32DeviceFlags,
+ sApphints.ui32HWPerfHostBufSize,
+ sApphints.ui32HWPerfHostFilter,
+ sApphints.eRGXActivePMConf,
+ hFWCodePMR,
+ hFWDataPMR,
+ hFWCorememPMR,
+ hHWPerfDataPMR);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXInit: BridgeRGXInitDevPart2 failed (%d)", eError));
+ goto cleanup;
+ }
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(SUPPORT_VALIDATION)
+ PVRSRVAppHintDumpState();
+#endif
+
+#if defined(PDUMP)
+ /*
+ * Dump the list of signature registers
+ */
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 ui32TASigRegCount = 0, ui323DSigRegCount= 0;
+ IMG_BOOL bRayTracing = IMG_FALSE;
+
+#if defined(SUPPORT_KERNEL_SRVINIT) && defined(__KERNEL__)
+ RGXInitFWSigRegisters(psDevInfo);
+ ui32TASigRegCount = gui32TASigRegCount;
+ ui323DSigRegCount = gui323DSigRegCount;
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+ {
+ bRayTracing = IMG_TRUE;
+ }
+#if defined(DEBUG)
+ if (gui32TASigRegCount > SIG_REG_TA_MAX_COUNT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: TA signature registers max count exceeded",__func__));
+ PVR_ASSERT(0);
+ }
+ if (gui323DSigRegCount > SIG_REG_3D_MAX_COUNT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: 3D signature registers max count exceeded",__func__));
+ PVR_ASSERT(0);
+ }
+#endif
+#else
+ ui32TASigRegCount = sizeof(asTASigRegList)/sizeof(RGXFW_REGISTER_LIST);
+ ui323DSigRegCount = sizeof(as3DSigRegList)/sizeof(RGXFW_REGISTER_LIST);
+#if defined(RGX_FEATURE_RAY_TRACING)
+ bRayTracing = IMG_TRUE;
+#endif
+#endif
+
+
+
+ SRVINITPDumpComment(hServices, "Signature TA registers: ");
+ for (i = 0; i < ui32TASigRegCount; i++)
+ {
+ if (asTASigRegList[i].ui16IndirectRegNum != 0)
+ {
+ SRVINITPDumpComment(hServices, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+ asTASigRegList[i].ui16RegNum, asTASigRegList[i].ui16IndirectRegNum,
+ asTASigRegList[i].ui16IndirectStartVal, asTASigRegList[i].ui16IndirectEndVal);
+ }
+ else
+ {
+ SRVINITPDumpComment(hServices, " * 0x%8.8X", asTASigRegList[i].ui16RegNum);
+ }
+ }
+
+ SRVINITPDumpComment(hServices, "Signature 3D registers: ");
+ for (i = 0; i < ui323DSigRegCount; i++)
+ {
+ if (as3DSigRegList[i].ui16IndirectRegNum != 0)
+ {
+ SRVINITPDumpComment(hServices, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+ as3DSigRegList[i].ui16RegNum, as3DSigRegList[i].ui16IndirectRegNum,
+ as3DSigRegList[i].ui16IndirectStartVal, as3DSigRegList[i].ui16IndirectEndVal);
+ }
+ else
+ {
+ SRVINITPDumpComment(hServices, " * 0x%8.8X", as3DSigRegList[i].ui16RegNum);
+ }
+ }
+
+ if(bRayTracing)
+ {
+#if defined (RGX_FEATURE_RAY_TRACING) || defined(SUPPORT_KERNEL_SRVINIT)
+ SRVINITPDumpComment(hServices, "Signature RTU registers: ");
+ for (i = 0; i < sizeof(asRTUSigRegList)/sizeof(RGXFW_REGISTER_LIST); i++)
+ {
+ if (asRTUSigRegList[i].ui16IndirectRegNum != 0)
+ {
+ SRVINITPDumpComment(hServices, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+ asRTUSigRegList[i].ui16RegNum, asRTUSigRegList[i].ui16IndirectRegNum,
+ asRTUSigRegList[i].ui16IndirectStartVal, asRTUSigRegList[i].ui16IndirectEndVal);
+ }
+ else
+ {
+ SRVINITPDumpComment(hServices, " * 0x%8.8X", asRTUSigRegList[i].ui16RegNum);
+ }
+ }
+
+ SRVINITPDumpComment(hServices, "Signature SHG registers: ");
+ for (i = 0; i < sizeof(asSHGSigRegList)/sizeof(RGXFW_REGISTER_LIST); i++)
+ {
+ if (asSHGSigRegList[i].ui16IndirectRegNum != 0)
+ {
+ SRVINITPDumpComment(hServices, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+ asSHGSigRegList[i].ui16RegNum, asSHGSigRegList[i].ui16IndirectRegNum,
+ asSHGSigRegList[i].ui16IndirectStartVal, asSHGSigRegList[i].ui16IndirectEndVal);
+ }
+ else
+ {
+ SRVINITPDumpComment(hServices, " * 0x%8.8X", asSHGSigRegList[i].ui16RegNum);
+ }
+ }
+#endif
+ }
+
+ }
+#endif /* !defined(SUPPORT_KERNEL_SRVINIT) && defined(PDUMP) */
+
+ eError = PVRSRV_OK;
+
+cleanup:
+ return eError;
+}
+
+/******************************************************************************
+ End of file (rgxsrvinit.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services script routines used at initialisation time
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxsrvinit_script.h"
+#include "srvinit_osfunc.h"
+#include "pvr_debug.h"
+
+
+/*!
+*******************************************************************************
+
+ @Function OutOfScriptSpace
+
+ @Description Checks for script space failure
+
+ @Input psScript
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL OutOfScriptSpace(RGX_SCRIPT_BUILD *psScript)
+{
+ if (psScript->ui32CurrComm >= psScript->ui32MaxLen)
+ {
+ psScript->bOutOfSpace = IMG_TRUE;
+ }
+
+ return psScript->bOutOfSpace;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function NextScriptCommand
+
+ @Description Gets next script command to populate
+
+ @Input psScript
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+static RGX_INIT_COMMAND* NextScriptCommand(RGX_SCRIPT_BUILD *psScript)
+{
+ if (OutOfScriptSpace(psScript))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "NextScriptCommand: Out of space for commands (%d)",
+ psScript->ui32MaxLen));
+ return NULL;
+ }
+
+ return &psScript->psCommands[psScript->ui32CurrComm++];
+}
+
+
+IMG_BOOL ScriptWriteRGXReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value)
+{
+ RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+ if (psComm != NULL)
+ {
+ psComm->sWriteHWReg.eOp = RGX_INIT_OP_WRITE_HW_REG;
+ psComm->sWriteHWReg.ui32Offset = ui32Offset;
+ psComm->sWriteHWReg.ui32Value = ui32Value;
+
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptPoll64RGXReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT64 ui64Value,
+ IMG_UINT64 ui64PollMask)
+{
+ RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+ if (psComm != NULL)
+ {
+ psComm->sPoll64HWReg.eOp = RGX_INIT_OP_POLL_64_HW_REG;
+ psComm->sPoll64HWReg.ui32Offset = ui32Offset;
+ psComm->sPoll64HWReg.ui64Value = ui64Value;
+ psComm->sPoll64HWReg.ui64Mask = ui64PollMask;
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptPollRGXReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32PollMask)
+{
+ RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+ if (psComm != NULL)
+ {
+ psComm->sPollHWReg.eOp = RGX_INIT_OP_POLL_HW_REG;
+ psComm->sPollHWReg.ui32Offset = ui32Offset;
+ psComm->sPollHWReg.ui32Value = ui32Value;
+ psComm->sPollHWReg.ui32Mask = ui32PollMask;
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptDBGReadRGXReg(RGX_SCRIPT_BUILD *psScript,
+ RGX_INIT_OPERATION eOp,
+ IMG_UINT32 ui32Offset,
+ IMG_CHAR *pszName)
+{
+ RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+ PVR_ASSERT(strlen(pszName) < RGX_DBG_CMD_NAME_SIZE);
+
+ if (psComm != NULL)
+ {
+ PVR_ASSERT((eOp == RGX_INIT_OP_DBG_READ32_HW_REG) ||
+ (eOp == RGX_INIT_OP_DBG_READ64_HW_REG));
+
+ psComm->sDBGReadHWReg.eOp = eOp;
+ psComm->sDBGReadHWReg.ui32Offset = ui32Offset;
+
+ strcpy(&psComm->sDBGReadHWReg.aszName[0], pszName);
+
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptDBGCalc(RGX_SCRIPT_BUILD *psScript,
+ RGX_INIT_OPERATION eOp,
+ IMG_UINT32 ui32Offset1,
+ IMG_UINT32 ui32Offset2,
+ IMG_UINT32 ui32Offset3,
+ IMG_CHAR *pszName)
+{
+ RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+ PVR_ASSERT(strlen(pszName) < RGX_DBG_CMD_NAME_SIZE);
+
+ if (psComm != NULL)
+ {
+ PVR_ASSERT(eOp == RGX_INIT_OP_DBG_CALC);
+
+ psComm->sDBGCalc.eOp = eOp;
+ psComm->sDBGCalc.ui32Offset1 = ui32Offset1;
+ psComm->sDBGCalc.ui32Offset2 = ui32Offset2;
+ psComm->sDBGCalc.ui32Offset3 = ui32Offset3;
+ strcpy(&psComm->sDBGCalc.aszName[0], pszName);
+
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+
+#if defined(RGX_FEATURE_META) || defined(SUPPORT_KERNEL_SRVINIT)
+IMG_BOOL ScriptWriteRGXRegPDUMPOnly(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value)
+{
+ RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+ if (psComm != NULL)
+ {
+ psComm->sPDumpHWReg.eOp = RGX_INIT_OP_PDUMP_HW_REG;
+ psComm->sPDumpHWReg.ui32Offset = ui32Offset;
+ psComm->sPDumpHWReg.ui32Value = ui32Value;
+
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function ScriptPrepareReadMetaRegThroughSP
+
+ @Description Add script entries for reading a reg through Meta slave port
+
+ @Input psScript
+ @Input ui32RegAddr
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL ScriptPrepareReadMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32RegAddr)
+{
+ IMG_BOOL bCmdAdded = IMG_FALSE;
+
+ /* Wait for Slave Port to be Ready */
+ bCmdAdded = ScriptPollRGXReg(psScript,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+ RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+ RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+ if (!bCmdAdded) return IMG_FALSE;
+
+ /* Issue a Read */
+ bCmdAdded = ScriptWriteRGXReg(psScript,
+ RGX_CR_META_SP_MSLVCTRL0,
+ ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+ if (!bCmdAdded) return IMG_FALSE;
+
+ /* Wait for Slave Port to be Ready: read complete */
+ bCmdAdded = ScriptPollRGXReg(psScript,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+ RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+ RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+
+ return bCmdAdded;
+}
+
+
+IMG_BOOL ScriptDBGReadMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32RegAddr,
+ IMG_CHAR *pszName)
+{
+ IMG_BOOL bCmdsAdded = IMG_FALSE;
+
+ /* Issue a Read */
+ bCmdsAdded = ScriptPrepareReadMetaRegThroughSP(psScript, ui32RegAddr);
+ if (!bCmdsAdded) return IMG_FALSE;
+
+ /* Read the value */
+ bCmdsAdded = ScriptDBGReadRGXReg(psScript,
+ RGX_INIT_OP_DBG_READ32_HW_REG,
+ RGX_CR_META_SP_MSLVDATAX,
+ pszName);
+
+ return bCmdsAdded;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function ScriptCondPollRGXReg
+
+ @Description Sets up a script entry for a conditional register poll
+
+ @Input psScript
+ @Input ui32CondOffset
+ @Input ui32CondValue
+ @Input ui32CondPollMask
+ @Input ui32Offset
+ @Input ui32Value
+ @Input ui32PollMask
+
+ @return IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL ScriptCondPollRGXReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32CondOffset,
+ IMG_UINT32 ui32CondValue,
+ IMG_UINT32 ui32CondPollMask,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32PollMask)
+{
+ RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+ if (psComm != NULL)
+ {
+ psComm->sCondPollHWReg.eOp = RGX_INIT_OP_COND_POLL_HW_REG;
+ psComm->sCondPollHWReg.ui32CondOffset = ui32CondOffset;
+ psComm->sCondPollHWReg.ui32CondValue = ui32CondValue;
+ psComm->sCondPollHWReg.ui32CondMask = ui32CondPollMask;
+ psComm->sCondPollHWReg.ui32Offset = ui32Offset;
+ psComm->sCondPollHWReg.ui32Value = ui32Value;
+ psComm->sCondPollHWReg.ui32Mask = ui32PollMask;
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptMetaRegCondPollRGXReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32MetaRegAddr,
+ IMG_UINT32 ui32MetaRegValue,
+ IMG_UINT32 ui32MetaRegMask,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32RegMask)
+{
+ IMG_BOOL bCmdsAdded = IMG_FALSE;
+
+ /* Issue a Read */
+ bCmdsAdded = ScriptPrepareReadMetaRegThroughSP(psScript, ui32MetaRegAddr);
+ if (!bCmdsAdded) return IMG_FALSE;
+
+ /* Read the value */
+ bCmdsAdded = ScriptCondPollRGXReg(psScript,
+ RGX_CR_META_SP_MSLVDATAX,
+ ui32MetaRegValue,
+ ui32MetaRegMask,
+ ui32RegAddr,
+ ui32RegValue,
+ ui32RegMask);
+
+ return bCmdsAdded;
+}
+
+
+IMG_BOOL ScriptWriteMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue)
+{
+ IMG_BOOL bCmdAdded = IMG_FALSE;
+
+ /* Wait for Slave Port to be Ready */
+ bCmdAdded = ScriptPollRGXReg(psScript,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+ RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+ RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+ if (!bCmdAdded) return IMG_FALSE;
+
+ /* Issue a Write */
+ bCmdAdded = ScriptWriteRGXReg(psScript,
+ RGX_CR_META_SP_MSLVCTRL0,
+ ui32RegAddr);
+ if (!bCmdAdded) return IMG_FALSE;
+
+ bCmdAdded = ScriptWriteRGXReg(psScript,
+ RGX_CR_META_SP_MSLVDATAT,
+ ui32RegValue);
+
+ /* Wait for complete to be done on the next attempt to read/write */
+
+ return bCmdAdded;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function ScriptInsertLoopPoint
+
+ @Description Inserts a loop point in the startup script
+
+ @Input psScript
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL ScriptInsertLoopPoint(RGX_SCRIPT_BUILD *psScript)
+{
+ RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+ if (psComm != NULL)
+ {
+ psComm->eOp = RGX_INIT_OP_LOOP_POINT;
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function ScriptConditionalBranchOnReg
+
+ @Description Conditionally branches back to the last loop point in the script.
+ Condition is satisfied by the contents of a register
+
+ @Input psScript
+ @Input ui32Offset
+ @Input ui32Value
+ @Input ui32Mask
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL ScriptConditionalBranchOnReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask)
+{
+ RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+ if (psComm != NULL)
+ {
+ psComm->eOp = RGX_INIT_OP_COND_BRANCH;
+ psComm->sConditionalBranchPoint.ui32Offset = ui32Offset;
+ psComm->sConditionalBranchPoint.ui32Value = ui32Value;
+ psComm->sConditionalBranchPoint.ui32Mask = ui32Mask;
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptPollMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32PollValue,
+ IMG_UINT32 ui32PollMask)
+{
+ IMG_BOOL bCmdsAdded = IMG_FALSE;
+
+ bCmdsAdded = ScriptInsertLoopPoint(psScript);
+ if (!bCmdsAdded) return IMG_FALSE;
+
+ bCmdsAdded = ScriptPrepareReadMetaRegThroughSP(psScript, ui32Offset);
+ if (!bCmdsAdded) return IMG_FALSE;
+
+ bCmdsAdded = ScriptConditionalBranchOnReg(psScript,
+ RGX_CR_META_SP_MSLVDATAX,
+ ui32PollValue,
+ ui32PollMask);
+ return bCmdsAdded;
+}
+
+
+IMG_BOOL ScriptDBGReadMetaCoreReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32RegAddr,
+ IMG_CHAR *pszName)
+{
+ IMG_BOOL bCmdsAdded = IMG_FALSE;
+
+ /* Core Read Ready? */
+ bCmdsAdded = ScriptPollMetaRegThroughSP(psScript,
+ META_CR_TXUXXRXRQ_OFFSET,
+ META_CR_TXUXXRXRQ_DREADY_BIT,
+ META_CR_TXUXXRXRQ_DREADY_BIT);
+
+ /* Set the reg we are interested in reading */
+ bCmdsAdded = ScriptWriteMetaRegThroughSP(psScript,
+ META_CR_TXUXXRXRQ_OFFSET,
+ ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT);
+ if (!bCmdsAdded) return IMG_FALSE;
+
+ /* Core Read Done? */
+ bCmdsAdded = ScriptPollMetaRegThroughSP(psScript,
+ META_CR_TXUXXRXRQ_OFFSET,
+ META_CR_TXUXXRXRQ_DREADY_BIT,
+ META_CR_TXUXXRXRQ_DREADY_BIT);
+
+ /* Read the value */
+ ScriptDBGReadMetaRegThroughSP(psScript, META_CR_TXUXXRXDT_OFFSET, pszName);
+
+ return IMG_TRUE;
+
+}
+#endif /* RGX_FEATURE_META */
+
+
+IMG_BOOL ScriptDBGString(RGX_SCRIPT_BUILD *psScript,
+ const IMG_CHAR *aszString)
+{
+ RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+ if (psComm != NULL)
+ {
+ psComm->sDBGString.eOp = RGX_INIT_OP_DBG_STRING;
+ strcpy(psComm->sDBGString.aszString, aszString);
+ if (strlen(aszString) >= (sizeof(psComm->sDBGString.aszString) - 2))
+ {
+ psComm->sDBGString.aszString[RGX_DBG_CMD_NAME_SIZE-1] = '\0';
+ }
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptHalt(RGX_SCRIPT_BUILD *psScript)
+{
+ RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+ if (psComm != NULL)
+ {
+ psComm->eOp = RGX_INIT_OP_HALT;
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+
+/******************************************************************************
+ End of file (rgxsrvinit_script.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Header for Services script routines used at initialisation time
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the connections between the various parts of the
+ initialisation server.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __RGXSRVINIT_SCRIPT_H__
+#define __RGXSRVINIT_SCRIPT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "rgxscript.h"
+#include "rgx_firmware_processor.h"
+#include "rgxdefs_km.h"
+
+
+typedef struct _RGX_SCRIPT_BUILD
+{
+ IMG_UINT32 ui32MaxLen;
+ IMG_UINT32 ui32CurrComm;
+ IMG_BOOL bOutOfSpace;
+ RGX_INIT_COMMAND *psCommands;
+} RGX_SCRIPT_BUILD;
+
+
+/*!
+*******************************************************************************
+
+ @Function ScriptWriteRGXReg
+
+ @Description Sets up a script entry for register write
+
+ @Input psScript
+ @Input ui32Offset
+ @Input ui32Value
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptWriteRGXReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+
+ @Function ScriptPoll64RGXReg
+
+ @Description Sets up a script entry for register poll
+
+ @Input psScript
+ @Input ui32Offset
+ @Input ui32Value
+ @Input ui32PollMask
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptPoll64RGXReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT64 ui64Value,
+ IMG_UINT64 ui64PollMask);
+
+/*!
+*******************************************************************************
+
+ @Function ScriptPollRGXReg
+
+ @Description Sets up a script entry for register poll
+
+ @Input psScript
+ @Input ui32Offset
+ @Input ui32Value
+ @Input ui32PollMask
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptPollRGXReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32PollMask);
+
+/*!
+*******************************************************************************
+
+ @Function ScriptDBGReadRGXReg
+
+ @Description Sets up a script entry for register setup
+
+ @Input psScript
+ @Input eOp
+ @Input ui32Offset
+ @Input ui32Value
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptDBGReadRGXReg(RGX_SCRIPT_BUILD *psScript,
+ RGX_INIT_OPERATION eOp,
+ IMG_UINT32 ui32Offset,
+ IMG_CHAR *pszName);
+
+/*!
+*******************************************************************************
+
+ @Function ScriptDBGCalc
+
+ @Description Sets up a script for calculation
+
+ @Input psScript
+ @Input eOp
+ @Input ui32Offset1
+ @Input ui32Offset2
+ @Input ui32Offset3
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptDBGCalc(RGX_SCRIPT_BUILD *psScript,
+ RGX_INIT_OPERATION eOp,
+ IMG_UINT32 ui32Offset1,
+ IMG_UINT32 ui32Offset2,
+ IMG_UINT32 ui32Offset3,
+ IMG_CHAR *pszName);
+
+
+#if defined(RGX_FEATURE_META) || defined(SUPPORT_KERNEL_SRVINIT)
+/*!
+*******************************************************************************
+
+ @Function ScriptWriteRGXReg
+
+ @Description Sets up a script entry for register setup
+
+ @Input psScript
+ @Input ui32Offset
+ @Input ui32Value
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptWriteRGXRegPDUMPOnly(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+
+ @Function ScriptDBGReadMetaRegThroughSP
+
+ @Description Add script entries for reading a reg through Meta slave port
+
+ @Input psScript
+ @Input ui32RegAddr
+ @Input pszName
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptDBGReadMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32RegAddr,
+ IMG_CHAR *pszName);
+
+/*!
+*******************************************************************************
+
+ @Function ScriptDBGReadMetaRegThroughSP
+
+ @Description Add script entries for polling a reg through Meta slave port
+
+ @Input psScript
+ @Input ui32RegAddr
+ @Input pszName
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptMetaRegCondPollRGXReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32MetaRegAddr,
+ IMG_UINT32 ui32MetaRegValue,
+ IMG_UINT32 ui32MetaRegMask,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32RegMask);
+
+/*!
+*******************************************************************************
+
+ @Function ScriptWriteMetaRegThroughSP
+
+ @Description Add script entries for writing a reg through Meta slave port
+
+ @Input psScript
+ @Input ui32RegAddr
+ @Input pszName
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptWriteMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue);
+
+/*!
+*******************************************************************************
+
+ @Function ScriptPollMetaRegThroughSP
+
+ @Description Polls a Core Garten register through the slave port
+
+ @Input psScript
+
+ @Return void
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptPollMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32PollValue,
+ IMG_UINT32 ui32PollMask);
+
+/*!
+*******************************************************************************
+
+ @Function ScriptDBGReadMetaRegThroughSP
+
+ @Description Adds script entries reading a reg through Meta slave port
+
+ @Input psScript
+ @Input ui32RegAddr
+ @Input pszName
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptDBGReadMetaCoreReg(RGX_SCRIPT_BUILD *psScript,
+ IMG_UINT32 ui32RegAddr,
+ IMG_CHAR *pszName);
+#endif /* RGX_FEATURE_META */
+
+
+/*!
+*******************************************************************************
+
+ @Function ScriptDBGString
+
+ @Description Adds a debug print to the script
+
+ @Input psScript
+ @Input pszName
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+
+IMG_INTERNAL
+IMG_BOOL ScriptDBGString(RGX_SCRIPT_BUILD *psScript,
+ const IMG_CHAR *aszString);
+
+
+/*!
+*******************************************************************************
+
+ @Function ScriptHalt
+
+ @Description Add a cmd to finish the script
+
+ @Input psScript
+
+ @Return IMG_BOOL True if it runs out of cmds when building the script
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptHalt(RGX_SCRIPT_BUILD *psScript);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __RGXSRVINIT_SCRIPT_H__ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device specific start/stop routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific start/stop routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* The routines implemented here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when trusted device is enabled).
+ * Any new dependency should be added to rgxlayer_km.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxstartstop.h"
+
+#if defined(SUPPORT_SHARED_SLC)
+#include "rgxapi_km.h"
+#include "rgxdevice.h"
+#endif
+
+#define SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING
+
+
+#if !defined(FIX_HW_BRN_37453)
+/*!
+*******************************************************************************
+
+ @Function RGXEnableClocks
+
+ @Description Enable RGX Clocks
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void RGXEnableClocks(const void *hPrivate)
+{
+ RGXCommentLogPower(hPrivate, "RGX clock: use default (automatic clock gating)");
+}
+#endif
+
+
+static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Wait for Slave Port to be Ready */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+ if (eError != PVRSRV_OK) return eError;
+
+ /* Issue a Write */
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr);
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue);
+
+ return eError;
+}
+
+static PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32* ui32RegValue)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Wait for Slave Port to be Ready */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+ if (eError != PVRSRV_OK) return eError;
+
+ /* Issue a Read */
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+
+ /* Wait for Slave Port to be Ready */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+ if (eError != PVRSRV_OK) return eError;
+
+#if !defined(NO_HARDWARE)
+ *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX);
+#else
+ *ui32RegValue = 0xFFFFFFFF;
+#endif
+
+ return eError;
+}
+
+static PVRSRV_ERROR RGXWriteMetaCoreRegThoughSP(const void *hPrivate,
+ IMG_UINT32 ui32CoreReg,
+ IMG_UINT32 ui32Value)
+{
+ IMG_UINT32 i = 0;
+
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXDT_OFFSET, ui32Value);
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, ui32CoreReg & ~META_CR_TXUXXRXRQ_RDnWR_BIT);
+
+ do
+ {
+ RGXReadMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, &ui32Value);
+ } while (((ui32Value & META_CR_TXUXXRXRQ_DREADY_BIT) != META_CR_TXUXXRXRQ_DREADY_BIT) && (i++ < 1000));
+
+ if (i == 1000)
+ {
+ RGXCommentLogPower(hPrivate, "RGXWriteMetaCoreRegThoughSP: Timeout");
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXStartFirmware(const void *hPrivate)
+{
+ PVRSRV_ERROR eError;
+
+ /* Give privilege to debug and slave port */
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN);
+
+ /* Point Meta to the bootloader address, global (uncached) range */
+ eError = RGXWriteMetaCoreRegThoughSP(hPrivate,
+ PC_ACCESS(0),
+ RGXFW_BOOTLDR_META_ADDR | META_MEM_GLOBAL_RANGE_BIT);
+
+ if (eError != PVRSRV_OK)
+ {
+ RGXCommentLogPower(hPrivate, "RGXStart: RGX Firmware Slave boot Start failed!");
+ return eError;
+ }
+
+ /* Enable minim encoding */
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXPRIVEXT, META_CR_TXPRIVEXT_MINIM_EN);
+
+ /* Enable Meta thread */
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_T0ENABLE_OFFSET, META_CR_TXENABLE_ENABLE_BIT);
+
+ return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXInitMetaProcWrapper
+
+ @Description Configures the hardware wrapper of the META processor
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void RGXInitMetaProcWrapper(const void *hPrivate)
+{
+ IMG_UINT64 ui64GartenConfig;
+
+ /* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address */
+
+ /* Garten IDLE bit controlled by META */
+ ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META;
+
+ /* The fence addr is set at the fw init sequence */
+
+ if (RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ /* Set PC = 0 for fences */
+ ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK;
+ ui64GartenConfig |= (IMG_UINT64)META_MMU_CONTEXT_MAPPING
+ << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT;
+
+ if (!RGXDeviceHasErnBrnPower(hPrivate, FIX_HW_BRN_51281_BIT_MASK))
+ {
+ /* Ensure the META fences go all the way to external memory */
+ ui64GartenConfig |= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_EN; /* SLC Coherent 1 */
+ ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PERSISTENCE_CLRMSK; /* SLC Persistence 0 */
+ }
+ }
+ else
+ {
+ /* Set PC = 0 for fences */
+ ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK;
+ ui64GartenConfig |= (IMG_UINT64)META_MMU_CONTEXT_MAPPING
+ << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT;
+
+ /* Set SLC DM=META */
+ ui64GartenConfig |= ((IMG_UINT64) RGXFW_SEGMMU_META_DM_ID) << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT;
+ }
+
+ RGXCommentLogPower(hPrivate, "RGXStart: Configure META wrapper");
+ RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXInitMipsProcWrapper
+
+ @Description Configures the hardware wrapper of the MIPS processor
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void RGXInitMipsProcWrapper(const void *hPrivate)
+{
+ IMG_DEV_PHYADDR sPhyAddr;
+ IMG_UINT64 ui64RemapSettings = RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE; /* Same for all remap registers */
+
+ RGXCommentLogPower(hPrivate, "RGXStart: Configure MIPS wrapper");
+
+ /*
+ * MIPS wrapper (registers transaction ID and ISA mode) setup
+ */
+
+ RGXAcquireGPURegsAddr(hPrivate, &sPhyAddr);
+
+ RGXCommentLogPower(hPrivate, "RGXStart: Write wrapper config register");
+ RGXMIPSWrapperConfig(hPrivate,
+ RGX_CR_MIPS_WRAPPER_CONFIG,
+ sPhyAddr.uiAddr,
+ RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN,
+ RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+
+ /*
+ * Boot remap setup
+ */
+
+ RGXAcquireBootRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Do not mark accesses to a FW code remap region as DRM accesses */
+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+ RGXCommentLogPower(hPrivate, "RGXStart: Write boot remap registers");
+ RGXBootRemapConfig(hPrivate,
+ RGX_CR_MIPS_ADDR_REMAP1_CONFIG1,
+ RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN,
+ RGX_CR_MIPS_ADDR_REMAP1_CONFIG2,
+ sPhyAddr.uiAddr,
+ ~RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK,
+ ui64RemapSettings);
+
+ /*
+ * Data remap setup
+ */
+
+ RGXAcquireDataRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Remapped data in non-secure memory */
+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+ RGXCommentLogPower(hPrivate, "RGXStart: Write data remap registers");
+ RGXDataRemapConfig(hPrivate,
+ RGX_CR_MIPS_ADDR_REMAP2_CONFIG1,
+ RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN,
+ RGX_CR_MIPS_ADDR_REMAP2_CONFIG2,
+ sPhyAddr.uiAddr,
+ ~RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK,
+ ui64RemapSettings);
+
+ /*
+ * Code remap setup
+ */
+
+ RGXAcquireCodeRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Do not mark accesses to a FW code remap region as DRM accesses */
+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+ RGXCommentLogPower(hPrivate, "RGXStart: Write exceptions remap registers");
+ RGXCodeRemapConfig(hPrivate,
+ RGX_CR_MIPS_ADDR_REMAP3_CONFIG1,
+ RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN,
+ RGX_CR_MIPS_ADDR_REMAP3_CONFIG2,
+ sPhyAddr.uiAddr,
+ ~RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK,
+ ui64RemapSettings);
+
+ /*
+ * Trampoline remap setup
+ */
+
+ RGXAcquireTrampolineRemapAddr(hPrivate, &sPhyAddr);
+ ui64RemapSettings = RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Remapped data in non-secure memory */
+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+ RGXCommentLogPower(hPrivate, "RGXStart: Write trampoline remap registers");
+ RGXTrampolineRemapConfig(hPrivate,
+ RGX_CR_MIPS_ADDR_REMAP4_CONFIG1,
+ sPhyAddr.uiAddr | RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN,
+ RGX_CR_MIPS_ADDR_REMAP4_CONFIG2,
+ RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR,
+ ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK,
+ ui64RemapSettings);
+
+ /* Garten IDLE bit controlled by MIPS */
+ RGXCommentLogPower(hPrivate, "RGXStart: Set GARTEN_IDLE type to MIPS");
+ RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+ /* Turn on the EJTAG probe (only useful driver live) */
+ RGXWriteReg32(hPrivate, RGX_CR_MIPS_DEBUG_CONFIG, 0);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function __RGXInitSLC
+
+ @Description Initialise RGX SLC
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void __RGXInitSLC(const void *hPrivate)
+{
+ if (RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK))
+ {
+ IMG_UINT32 ui32Reg;
+ IMG_UINT32 ui32RegVal;
+
+ if (RGXDeviceHasErnBrnPower(hPrivate, HW_ERN_51468_BIT_MASK))
+ {
+ /*
+ * SLC control
+ */
+ ui32Reg = RGX_CR_SLC3_CTRL_MISC;
+ ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH |
+ RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN;
+ RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+ }
+ else
+ {
+ /*
+ * SLC control
+ */
+ ui32Reg = RGX_CR_SLC3_CTRL_MISC;
+ ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH |
+ RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN;
+ RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+
+ /*
+ * SLC scramble bits
+ */
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 ui32Count=0;
+ IMG_UINT32 ui32SLCBanks = RGXGetDeviceSLCBanks(hPrivate);
+ IMG_UINT64 aui64ScrambleValues[4];
+ IMG_UINT32 aui32ScrambleRegs[] = {
+ RGX_CR_SLC3_SCRAMBLE,
+ RGX_CR_SLC3_SCRAMBLE2,
+ RGX_CR_SLC3_SCRAMBLE3,
+ RGX_CR_SLC3_SCRAMBLE4
+ };
+
+ if (2 == ui32SLCBanks)
+ {
+ aui64ScrambleValues[0] = IMG_UINT64_C(0x6965a99a55696a6a);
+ aui64ScrambleValues[1] = IMG_UINT64_C(0x6aa9aa66959aaa9a);
+ aui64ScrambleValues[2] = IMG_UINT64_C(0x9a5665965a99a566);
+ aui64ScrambleValues[3] = IMG_UINT64_C(0x5aa69596aa66669a);
+ ui32Count = 4;
+ }
+ else if (4 == ui32SLCBanks)
+ {
+ aui64ScrambleValues[0] = IMG_UINT64_C(0xc6788d722dd29ce4);
+ aui64ScrambleValues[1] = IMG_UINT64_C(0x7272e4e11b279372);
+ aui64ScrambleValues[2] = IMG_UINT64_C(0x87d872d26c6c4be1);
+ aui64ScrambleValues[3] = IMG_UINT64_C(0xe1b4878d4b36e478);
+ ui32Count = 4;
+
+ }
+ else if (8 == ui32SLCBanks)
+ {
+ aui64ScrambleValues[0] = IMG_UINT64_C(0x859d6569e8fac688);
+ aui64ScrambleValues[1] = IMG_UINT64_C(0xf285e1eae4299d33);
+ aui64ScrambleValues[2] = IMG_UINT64_C(0x1e1af2be3c0aa447);
+ ui32Count = 3;
+ }
+
+ for (i = 0; i < ui32Count; i++)
+ {
+ IMG_UINT32 ui32Reg = aui32ScrambleRegs[i];
+ IMG_UINT64 ui64Value = aui64ScrambleValues[i];
+ RGXWriteReg64(hPrivate, ui32Reg, ui64Value);
+ }
+ }
+ }
+
+ if (RGXDeviceHasErnBrnPower(hPrivate, HW_ERN_45914_BIT_MASK))
+ {
+ /* Disable the forced SLC coherency which the hardware enables for compatibility with older pdumps */
+ RGXCommentLogPower(hPrivate, "Disable forced SLC coherency");
+ RGXWriteReg64(hPrivate, RGX_CR_GARTEN_SLC, 0);
+ }
+ }
+ else
+ {
+ IMG_UINT32 ui32Reg;
+ IMG_UINT32 ui32RegVal;
+
+#if defined(FIX_HW_BRN_36492)
+ /* Because the WA for this BRN forbids using SLC reset, need to inval it instead */
+ RGXCommentLogPower(hPrivate, "Invalidate the SLC");
+ RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_FLUSH_INVAL, RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN);
+
+ /* Poll for completion */
+ RGXPollReg32(hPrivate, RGX_CR_SLC_STATUS0, 0x0, RGX_CR_SLC_STATUS0_MASKFULL);
+#endif
+
+ /*
+ * SLC Bypass control
+ */
+ ui32Reg = RGX_CR_SLC_CTRL_BYPASS;
+ ui32RegVal = 0;
+
+ if (RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_SLCSIZE8_BIT_MASK) ||
+ RGXDeviceHasErnBrnPower(hPrivate, FIX_HW_BRN_61450_BIT_MASK))
+ {
+ RGXCommentLogPower(hPrivate, "Bypass SLC for IPF_OBJ and IPF_CPF");
+ ui32RegVal |= RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN | RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN;
+ }
+
+ if (RGXGetDeviceSLCSize(hPrivate) < (128*1024))
+ {
+ /* Bypass SLC for textures if the SLC size is less than 128kB */
+ RGXCommentLogPower(hPrivate, "Bypass SLC for TPU");
+ ui32RegVal |= RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN;
+ }
+
+ if (ui32RegVal != 0)
+ {
+ RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+ }
+
+ /*
+ * SLC Misc control.
+ *
+ * Note: This is a 64bit register and we set only the lower 32bits leaving the top
+ * 32bits (RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS) unchanged from the HW default.
+ */
+ ui32Reg = RGX_CR_SLC_CTRL_MISC;
+ ui32RegVal = (RGXReadReg32(hPrivate, ui32Reg) & RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN) |
+ RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1;
+ if (RGXDeviceHasErnBrnPower(hPrivate, FIX_HW_BRN_60084_BIT_MASK))
+ {
+#if !defined(SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING)
+ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
+#else
+ if (RGXDeviceHasErnBrnPower(hPrivate, HW_ERN_61389_BIT_MASK))
+ {
+ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
+ }
+#endif
+ }
+ /* Bypass burst combiner if SLC line size is smaller than 1024 bits */
+ if (RGXGetDeviceCacheLineSize(hPrivate) < 1024)
+ {
+ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN;
+ }
+
+ RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+ }
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXInitBIF
+
+ @Description Initialise RGX BIF
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void RGXInitBIF(const void *hPrivate)
+{
+ if (!RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_MIPS_BIT_MASK))
+ {
+ IMG_DEV_PHYADDR sPCAddr;
+
+ /*
+ * Acquire the address of the Kernel Page Catalogue.
+ */
+ RGXAcquireKernelMMUPC(hPrivate, &sPCAddr);
+
+ /*
+ * Write the kernel catalogue base.
+ */
+ RGXCommentLogPower(hPrivate, "RGX firmware MMU Page Catalogue");
+
+ if (!RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_SLC_VIVT_BIT_MASK))
+ {
+ /* Write the cat-base address */
+ RGXWriteKernelMMUPC64(hPrivate,
+ RGX_CR_BIF_CAT_BASE0,
+ RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT,
+ RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT,
+ ((sPCAddr.uiAddr
+ >> RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT)
+ << RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT)
+ & ~RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK);
+ /*
+ * Trusted Firmware boot
+ */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ RGXCommentLogPower(hPrivate, "RGXInitBIF: Trusted Device enabled");
+ RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN);
+#endif
+ }
+ else
+ {
+ IMG_UINT32 uiPCAddr;
+ uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT)
+ << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
+ & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK);
+ /* Set the mapping context */
+ RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, 0);
+
+ /* Write the cat-base address */
+ RGXWriteKernelMMUPC32(hPrivate,
+ RGX_CR_MMU_CBASE_MAPPING,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+ uiPCAddr);
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Set-up MMU ID 1 mapping to the same PC used by MMU ID 0 */
+ RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, 1);
+ RGXWriteKernelMMUPC32(hPrivate,
+ RGX_CR_MMU_CBASE_MAPPING,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+ uiPCAddr);
+#endif /* SUPPORT_TRUSTED_DEVICE */
+ }
+ }
+ else
+ {
+ /*
+ * Trusted Firmware boot
+ */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ RGXCommentLogPower(hPrivate, "RGXInitBIF: Trusted Device enabled");
+ RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN);
+#endif
+ }
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXAXIACELiteInit
+
+ @Description Initialise AXI-ACE Lite interface
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void RGXAXIACELiteInit(const void *hPrivate)
+{
+ IMG_UINT32 ui32RegAddr;
+ IMG_UINT64 ui64RegVal;
+
+ ui32RegAddr = RGX_CR_AXI_ACE_LITE_CONFIGURATION;
+
+ /* Setup AXI-ACE config. Set everything to outer cache */
+ ui64RegVal = (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) |
+ (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT);
+
+ if (RGXDeviceHasErnBrnPower(hPrivate, FIX_HW_BRN_42321_BIT_MASK))
+ {
+ ui64RegVal |= (((IMG_UINT64) 1) << RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT);
+ }
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ if (RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_SLC_VIVT_BIT_MASK))
+ {
+ RGXCommentLogPower(hPrivate, "OSID 0 and 1 are trusted");
+ ui64RegVal |= IMG_UINT64_C(0xFC)
+ << RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT;
+ }
+#endif
+
+ RGXCommentLogPower(hPrivate, "Init AXI-ACE interface");
+ RGXWriteReg64(hPrivate, ui32RegAddr, ui64RegVal);
+}
+
+
+PVRSRV_ERROR RGXStart(const void *hPrivate)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BOOL bDoFWSlaveBoot;
+ IMG_CHAR *pcRGXFW_PROCESSOR;
+ IMG_BOOL bMetaFW;
+
+ if (RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_MIPS_BIT_MASK))
+ {
+ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS;
+ bMetaFW = IMG_FALSE;
+ bDoFWSlaveBoot = IMG_FALSE;
+ }
+ else
+ {
+ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+ bMetaFW = IMG_TRUE;
+ bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate);
+ }
+
+ if (RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK))
+ {
+ /* Disable the default sys_bus_secure protection to perform minimal setup */
+ RGXCommentLogPower(hPrivate, "RGXStart: Disable sys_bus_secure");
+ RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0);
+ (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */
+ }
+
+#if defined(FIX_HW_BRN_37453)
+ /* Force all clocks on*/
+ RGXCommentLogPower(hPrivate, "RGXStart: force all clocks on");
+ RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL, RGX_CR_CLK_CTRL_ALL_ON);
+#endif
+
+#if defined(SUPPORT_SHARED_SLC) && !defined(FIX_HW_BRN_36492)
+ /* When the SLC is shared, the SLC reset is performed by the System layer when calling
+ * RGXInitSLC (before any device uses it), therefore mask out the SLC bit to avoid
+ * soft_resetting it here. If HW_BRN_36492, the bit is already masked out.
+ */
+#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL ^ RGX_CR_SOFT_RESET_SLC_EN)
+ RGXCommentLogPower(hPrivate, "RGXStart: Shared SLC (don't reset SLC as part of RGX reset)");
+#else
+#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL)
+#endif
+
+ if (RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ /* Set RGX in soft-reset */
+ RGXCommentLogPower(hPrivate, "RGXStart: soft reset assert step 1");
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS);
+
+ RGXCommentLogPower(hPrivate, "RGXStart: soft reset assert step 2");
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_JONES_ALL | RGX_S7_SOFT_RESET_DUSTS);
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, RGX_S7_SOFT_RESET2);
+
+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+ /* Take everything out of reset but META/MIPS */
+ RGXCommentLogPower(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR);
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS | RGX_CR_SOFT_RESET_GARTEN_EN);
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, 0x0);
+
+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+ RGXCommentLogPower(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR);
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+
+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+ }
+ else
+ {
+ /* Set RGX in soft-reset */
+ RGXCommentLogPower(hPrivate, "RGXStart: soft reset everything");
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL);
+
+ /* Take Rascal and Dust out of reset */
+ RGXCommentLogPower(hPrivate, "RGXStart: Rascal and Dust out of reset");
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN);
+
+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+ /* Take everything out of reset but META/MIPS */
+ RGXCommentLogPower(hPrivate, "RGXStart: Take everything out of reset but %s", pcRGXFW_PROCESSOR);
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+ }
+
+
+#if !defined(FIX_HW_BRN_37453)
+ /* Enable clocks */
+ RGXEnableClocks(hPrivate);
+#endif
+
+ /*
+ * Initialise SLC.
+ */
+#if !defined(SUPPORT_SHARED_SLC)
+ __RGXInitSLC(hPrivate);
+#endif
+
+ if (bMetaFW)
+ {
+ if (bDoFWSlaveBoot)
+ {
+ /* Configure META to Slave boot */
+ RGXCommentLogPower(hPrivate, "RGXStart: META Slave boot");
+ RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0);
+
+ }
+ else
+ {
+ /* Configure META to Master boot */
+ RGXCommentLogPower(hPrivate, "RGXStart: META Master boot");
+ RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN);
+ }
+ }
+
+ /*
+ * Initialise Firmware wrapper
+ */
+ if (bMetaFW)
+ {
+ RGXInitMetaProcWrapper(hPrivate);
+ }
+ else
+ {
+ RGXInitMipsProcWrapper(hPrivate);
+ }
+
+ if (RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_AXI_ACELITE_BIT_MASK))
+ {
+ /* We must init the AXI-ACE interface before 1st BIF transaction */
+ RGXAXIACELiteInit(hPrivate);
+ }
+
+ /*
+ * Initialise BIF.
+ */
+ RGXInitBIF(hPrivate);
+
+ RGXCommentLogPower(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR);
+
+ /* Need to wait for at least 16 cycles before taking META/MIPS out of reset ... */
+ RGXWaitCycles(hPrivate, 32, 3);
+
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0);
+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+ /* ... and afterwards */
+ RGXWaitCycles(hPrivate, 32, 3);
+
+#if defined(FIX_HW_BRN_37453)
+ /* We rely on the 32 clk sleep from above */
+
+ /* Switch clocks back to auto */
+ RGXCommentLogPower(hPrivate, "RGXStart: set clocks back to auto");
+ RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL, RGX_CR_CLK_CTRL_ALL_AUTO);
+#endif
+
+ if (bMetaFW && bDoFWSlaveBoot)
+ {
+ eError = RGXIOCoherencyTest(hPrivate);
+ if (eError != PVRSRV_OK) return eError;
+
+ RGXCommentLogPower(hPrivate, "RGXStart: RGX Firmware Slave boot Start");
+ eError = RGXStartFirmware(hPrivate);
+ if (eError != PVRSRV_OK) return eError;
+ }
+ else
+ {
+ RGXCommentLogPower(hPrivate, "RGXStart: RGX Firmware Master boot Start");
+ }
+
+ /* Enable Sys Bus security */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ RGXCommentLogPower(hPrivate, "RGXStart: Enable sys_bus_secure");
+ RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN);
+ (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */
+#endif
+
+ return eError;
+}
+
+static INLINE void ClearIRQStatusRegister(const void *hPrivate, IMG_BOOL bMetaFW)
+{
+ IMG_UINT32 ui32IRQClearReg;
+ IMG_UINT32 ui32IRQClearMask;
+
+ if(bMetaFW)
+ {
+ ui32IRQClearReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+ ui32IRQClearMask = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK;
+ }
+ else
+ {
+ ui32IRQClearReg = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR;
+ ui32IRQClearMask = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN;
+ }
+
+ RGXWriteReg32(hPrivate, ui32IRQClearReg, ui32IRQClearMask);
+
+#if defined(RGX_FEATURE_OCPBUS)
+ RGXWriteReg32(hPrivate, RGX_CR_OCP_IRQSTATUS_2, RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN);
+#endif
+}
+
+PVRSRV_ERROR RGXStop(const void *hPrivate)
+{
+ IMG_BOOL bMetaFW = !RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_MIPS_BIT_MASK);
+ PVRSRV_ERROR eError;
+
+ ClearIRQStatusRegister(hPrivate, bMetaFW);
+
+ /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */
+ if (!RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SIDEKICK_IDLE,
+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN),
+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN));
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_JONES_IDLE,
+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN),
+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN));
+ }
+
+ if (eError != PVRSRV_OK) return eError;
+
+
+#if !defined(SUPPORT_SHARED_SLC)
+ /* Wait for SLC to signal IDLE */
+ if (!RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SLC_IDLE,
+ RGX_CR_SLC_IDLE_MASKFULL,
+ RGX_CR_SLC_IDLE_MASKFULL);
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SLC3_IDLE,
+ RGX_CR_SLC3_IDLE_MASKFULL,
+ RGX_CR_SLC3_IDLE_MASKFULL);
+ }
+#endif /* SUPPORT_SHARED_SLC */
+ if (eError != PVRSRV_OK) return eError;
+
+
+ /* Unset MTS DM association with threads */
+ RGXWriteReg32(hPrivate,
+ RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC,
+ RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+ & RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL);
+ RGXWriteReg32(hPrivate,
+ RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC,
+ RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+ & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL);
+ RGXWriteReg32(hPrivate,
+ RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC,
+ RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+ & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL);
+ RGXWriteReg32(hPrivate,
+ RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC,
+ RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+ & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL);
+
+
+#if defined(PDUMP)
+ if (bMetaFW)
+ {
+ /* Disabling threads is only required for pdumps to stop the fw gracefully */
+
+ /* Disable thread 0 */
+ eError = RGXWriteMetaRegThroughSP(hPrivate,
+ META_CR_T0ENABLE_OFFSET,
+ ~META_CR_TXENABLE_ENABLE_BIT);
+ if (eError != PVRSRV_OK) return eError;
+
+ /* Disable thread 1 */
+ eError = RGXWriteMetaRegThroughSP(hPrivate,
+ META_CR_T1ENABLE_OFFSET,
+ ~META_CR_TXENABLE_ENABLE_BIT);
+ if (eError != PVRSRV_OK) return eError;
+
+ /* Clear down any irq raised by META (done after disabling the FW
+ * threads to avoid a race condition).
+ * This is only really needed for PDumps but we do it anyway driver-live.
+ */
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0);
+
+ /* Wait for the Slave Port to finish all the transactions */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+ if (eError != PVRSRV_OK) return eError;
+ }
+#endif
+
+
+ /* Extra Idle checks */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_BIF_STATUS_MMU,
+ 0,
+ RGX_CR_BIF_STATUS_MMU_MASKFULL);
+ if (eError != PVRSRV_OK) return eError;
+
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_BIFPM_STATUS_MMU,
+ 0,
+ RGX_CR_BIFPM_STATUS_MMU_MASKFULL);
+ if (eError != PVRSRV_OK) return eError;
+
+ if (!RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) &&
+ !RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_BIF_READS_EXT_STATUS,
+ 0,
+ RGX_CR_BIF_READS_EXT_STATUS_MASKFULL);
+ if (eError != PVRSRV_OK) return eError;
+ }
+
+
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_BIFPM_READS_EXT_STATUS,
+ 0,
+ RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL);
+ if (eError != PVRSRV_OK) return eError;
+
+ {
+ IMG_UINT64 ui64SLCMask = RGX_CR_SLC_STATUS1_MASKFULL;
+ eError = RGXPollReg64(hPrivate,
+ RGX_CR_SLC_STATUS1,
+ 0,
+ ui64SLCMask);
+ if (eError != PVRSRV_OK) return eError;
+ }
+
+ if (4 == RGXGetDeviceSLCBanks(hPrivate))
+ {
+ eError = RGXPollReg64(hPrivate,
+ RGX_CR_SLC_STATUS2,
+ 0,
+ RGX_CR_SLC_STATUS2_MASKFULL);
+ if (eError != PVRSRV_OK) return eError;
+ }
+
+#if !defined(SUPPORT_SHARED_SLC)
+ /* Wait for SLC to signal IDLE */
+ if (!RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SLC_IDLE,
+ RGX_CR_SLC_IDLE_MASKFULL,
+ RGX_CR_SLC_IDLE_MASKFULL);
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SLC3_IDLE,
+ RGX_CR_SLC3_IDLE_MASKFULL,
+ RGX_CR_SLC3_IDLE_MASKFULL);
+ }
+#endif /* SUPPORT_SHARED_SLC */
+ if (eError != PVRSRV_OK) return eError;
+
+
+ /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */
+ if (!RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SIDEKICK_IDLE,
+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN),
+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN));
+ }
+ else
+ {
+ if (!RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_JONES_IDLE,
+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN),
+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN));
+ }
+ }
+
+ if (eError != PVRSRV_OK) return eError;
+
+
+ if (bMetaFW)
+ {
+ IMG_UINT32 ui32RegValue;
+
+ eError = RGXReadMetaRegThroughSP(hPrivate,
+ META_CR_TxVECINT_BHALT,
+ &ui32RegValue);
+ if (eError != PVRSRV_OK) return eError;
+
+ if ((ui32RegValue & 0xFFFFFFFFU) == 0x0)
+ {
+ /* Wait for Sidekick/Jones to signal IDLE including
+ * the Garten Wrapper if there is no debugger attached
+ * (TxVECINT_BHALT = 0x0) */
+ if (!RGXDeviceHasFeaturePower(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SIDEKICK_IDLE,
+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN,
+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN);
+ if (eError != PVRSRV_OK) return eError;
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_JONES_IDLE,
+ RGX_CR_JONES_IDLE_GARTEN_EN,
+ RGX_CR_JONES_IDLE_GARTEN_EN);
+ if (eError != PVRSRV_OK) return eError;
+ }
+ }
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SIDEKICK_IDLE,
+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN,
+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN);
+ if (eError != PVRSRV_OK) return eError;
+ }
+
+ return eError;
+}
+
+
+/*
+ * RGXInitSLC
+ */
+#if defined(SUPPORT_SHARED_SLC)
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void *pvPowerParams;
+
+ if (psDeviceNode == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psDevInfo = psDeviceNode->pvDevice;
+ pvPowerParams = &psDevInfo->sPowerParams;
+
+#if !defined(FIX_HW_BRN_36492)
+ /* reset the SLC */
+ RGXCommentLogPower(pvPowerParams, "RGXInitSLC: soft reset SLC");
+ RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_SLC_EN);
+
+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+ (void) RGXReadReg64(pvPowerParams, RGX_CR_SOFT_RESET);
+
+ /* Take everything out of reset */
+ RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, 0x0);
+#endif
+
+ __RGXInitSLC(pvPowerParams);
+
+ return PVRSRV_OK;
+}
+#endif
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX start/stop header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX start/stop functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXSTARTSTOP_H__)
+#define __RGXSTARTSTOP_H__
+
+/* The routines declared here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when DRM security is enabled).
+ * Any new dependency should be added to rgxlayer_km.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxlayer_km.h"
+
+/*!
+*******************************************************************************
+
+ @Function RGXStart
+
+ @Description Perform GPU reset and initialisation
+
+ @Input hPrivate : Implementation specific data
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXStart(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXStop
+
+ @Description Stop Rogue in preparation for power down
+
+ @Input hPrivate : Implementation specific data
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXStop(const void *hPrivate);
+
+#endif /* __RGXSTARTSTOP_H__ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX TA/3D routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX TA/3D routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* for the offsetof macro */
+#include <stddef.h>
+
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "ri_server.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgx_memallocflags.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "rgxtimerquery.h"
+#include "htbuffer.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "physmem.h"
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "process_stats.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "hash.h"
+#include "rgxworkest.h"
+
+#define HASH_CLEAN_LIMIT 6
+#endif
+
+typedef struct _DEVMEM_REF_LOOKUP_
+{
+ IMG_UINT32 ui32ZSBufferID;
+ RGX_ZSBUFFER_DATA *psZSBuffer;
+} DEVMEM_REF_LOOKUP;
+
+typedef struct _DEVMEM_FREELIST_LOOKUP_
+{
+ IMG_UINT32 ui32FreeListID;
+ RGX_FREELIST *psFreeList;
+} DEVMEM_FREELIST_LOOKUP;
+
+typedef struct {
+ DEVMEM_MEMDESC *psContextStateMemDesc;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+} RGX_SERVER_RC_TA_DATA;
+
+typedef struct {
+ DEVMEM_MEMDESC *psContextStateMemDesc;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+} RGX_SERVER_RC_3D_DATA;
+
+struct _RGX_SERVER_RENDER_CONTEXT_ {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWRenderContextMemDesc;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ RGX_SERVER_RC_TA_DATA sTAData;
+ RGX_SERVER_RC_3D_DATA s3DData;
+ IMG_UINT32 ui32CleanupStatus;
+#define RC_CLEANUP_TA_COMPLETE (1 << 0)
+#define RC_CLEANUP_3D_COMPLETE (1 << 1)
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListTAFence;
+ SYNC_ADDR_LIST sSyncAddrListTAUpdate;
+ SYNC_ADDR_LIST sSyncAddrList3DFence;
+ SYNC_ADDR_LIST sSyncAddrList3DUpdate;
+ ATOMIC_T hJobId;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ WORKEST_HOST_DATA sWorkEstData;
+#endif
+};
+
+
+#if ! defined(NO_HARDWARE)
+static
+#ifdef __GNUC__
+ __attribute__((noreturn))
+#endif
+void sleep_for_ever(void)
+{
+#if defined(__KLOCWORK__) // klocworks would report an infinite loop because of while(1).
+ PVR_ASSERT(0);
+#else
+ while(1)
+ {
+ OSSleepms(~0); // sleep the maximum amount of time possible
+ }
+#endif
+}
+#endif
+
+
+/*
+ Static functions used by render context code
+*/
+
+static
+PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ psTAData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_TA,
+ PDUMP_FLAGS_NONE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free it's resources */
+#if defined(DEBUG)
+ /* Log the number of TA context stores which occurred */
+ {
+ RGXFWIF_TACTX_STATE *psFWTAState;
+
+ eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+ (void**)&psFWTAState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware render context state (%u)",
+ __FUNCTION__, eError));
+ }
+ else
+ {
+ /* Release the CPU virt addr */
+ DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+ }
+ }
+#endif
+ FWCommonContextFree(psTAData->psServerCommonContext);
+ DevmemFwFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc);
+ psTAData->psServerCommonContext = NULL;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ ps3DData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_3D,
+ PDUMP_FLAGS_NONE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free it's resources */
+#if defined(DEBUG)
+ /* Log the number of 3D context stores which occurred */
+ {
+ RGXFWIF_3DCTX_STATE *psFW3DState;
+
+ eError = DevmemAcquireCpuVirtAddr(ps3DData->psContextStateMemDesc,
+ (void**)&psFW3DState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware render context state (%u)",
+ __FUNCTION__, eError));
+ }
+ else
+ {
+ /* Release the CPU virt addr */
+ DevmemReleaseCpuVirtAddr(ps3DData->psContextStateMemDesc);
+ }
+ }
+#endif
+
+ FWCommonContextFree(ps3DData->psServerCommonContext);
+ DevmemFwFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc);
+ ps3DData->psServerCommonContext = NULL;
+ return PVRSRV_OK;
+}
+
+static void _RGXDumpPMRPageList(DLLIST_NODE *psNode)
+{
+ RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+ PVRSRV_ERROR eError;
+
+ eError = PMRDumpPageList(psPMRNode->psPMR,
+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Error (%u) printing pmr %p", eError, psPMRNode->psPMR));
+ }
+}
+
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList)
+{
+ DLLIST_NODE *psNode, *psNext;
+
+ PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016llx",
+ psFreeList->sFreeListFWDevVAddr.ui32Addr,
+ psFreeList->ui32FreelistID,
+ psFreeList->ui64FreelistChecksum));
+
+ /* Dump Init FreeList page list */
+ PVR_LOG((" Initial Memory block"));
+ dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+ {
+ _RGXDumpPMRPageList(psNode);
+ }
+
+ /* Dump Grow FreeList page list */
+ PVR_LOG((" Grow Memory blocks"));
+ dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+ {
+ _RGXDumpPMRPageList(psNode);
+ }
+
+ return IMG_TRUE;
+}
+
+static PVRSRV_ERROR _UpdateFwFreelistSize(RGX_FREELIST *psFreeList,
+ IMG_BOOL bGrow,
+ IMG_UINT32 ui32DeltaSize)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sGPCCBCmd;
+
+ sGPCCBCmd.eCmdType = (bGrow) ? RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE : RGXFWIF_KCCB_CMD_FREELIST_SHRINK_UPDATE;
+ sGPCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+ sGPCCBCmd.uCmdData.sFreeListGSData.ui32DeltaSize = ui32DeltaSize;
+ sGPCCBCmd.uCmdData.sFreeListGSData.ui32NewSize = psFreeList->ui32CurrentFLPages;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Send FW update: freelist [FWAddr=0x%08x] has 0x%08x pages",
+ psFreeList->sFreeListFWDevVAddr.ui32Addr,
+ psFreeList->ui32CurrentFLPages));
+
+ /* Submit command to the firmware. */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psFreeList->psDevInfo,
+ RGXFWIF_DM_GP,
+ &sGPCCBCmd,
+ sizeof(sGPCCBCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_UpdateFwFreelistSize: failed to update FW freelist size. (error = %u)", eError));
+ return eError;
+ }
+
+ return eError;
+}
+
+static void _CheckFreelist(RGX_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumOfPagesToCheck,
+ IMG_UINT64 ui64ExpectedCheckSum,
+ IMG_UINT64 *pui64CalculatedCheckSum)
+{
+#if defined(NO_HARDWARE)
+ /* No checksum needed as we have all information in the pdumps */
+ PVR_UNREFERENCED_PARAMETER(psFreeList);
+ PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck);
+ PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum);
+ *pui64CalculatedCheckSum = 0;
+#else
+ PVRSRV_ERROR eError;
+ size_t uiNumBytes;
+ IMG_UINT8* pui8Buffer;
+ IMG_UINT32* pui32Buffer;
+ IMG_UINT32 ui32CheckSumAdd = 0;
+ IMG_UINT32 ui32CheckSumXor = 0;
+ IMG_UINT32 ui32Entry;
+ IMG_UINT32 ui32Entry2;
+ IMG_BOOL bFreelistBad = IMG_FALSE;
+
+ *pui64CalculatedCheckSum = 0;
+
+ /* Allocate Buffer of the size of the freelist */
+ pui8Buffer = OSAllocMem(psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32));
+ if (pui8Buffer == NULL)
+ {
+ PVR_LOG(("_CheckFreelist: Failed to allocate buffer to check freelist %p!", psFreeList));
+ sleep_for_ever();
+ //PVR_ASSERT(0);
+ return;
+ }
+
+ /* Copy freelist content into Buffer */
+ eError = PMR_ReadBytes(psFreeList->psFreeListPMR,
+ psFreeList->uiFreeListPMROffset + (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32),
+ pui8Buffer,
+ psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32),
+ &uiNumBytes);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(pui8Buffer);
+ PVR_LOG(("_CheckFreelist: Failed to get freelist data for freelist %p!", psFreeList));
+ sleep_for_ever();
+ //PVR_ASSERT(0);
+ return;
+ }
+
+ PVR_ASSERT(uiNumBytes == psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32));
+ PVR_ASSERT(ui32NumOfPagesToCheck <= psFreeList->ui32CurrentFLPages);
+
+ /* Generate checksum */
+ pui32Buffer = (IMG_UINT32 *)pui8Buffer;
+ for(ui32Entry = 0; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++)
+ {
+ ui32CheckSumAdd += pui32Buffer[ui32Entry];
+ ui32CheckSumXor ^= pui32Buffer[ui32Entry];
+
+ /* Check for double entries */
+ for (ui32Entry2 = 0; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++)
+ {
+ if ((ui32Entry != ui32Entry2) &&
+ (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2]))
+ {
+ PVR_LOG(("_CheckFreelist: Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d",
+ psFreeList->sFreeListFWDevVAddr.ui32Addr,
+ pui32Buffer[ui32Entry2],
+ ui32Entry,
+ ui32Entry2,
+ psFreeList->ui32CurrentFLPages));
+ bFreelistBad = IMG_TRUE;
+ }
+ }
+ }
+
+ OSFreeMem(pui8Buffer);
+
+ /* Check the calculated checksum against the expected checksum... */
+ *pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+
+ if (ui64ExpectedCheckSum != 0 && ui64ExpectedCheckSum != *pui64CalculatedCheckSum)
+ {
+ PVR_LOG(("_CheckFreelist: Checksum mismatch for freelist %p! Expected 0x%016llx calculated 0x%016llx",
+ psFreeList, ui64ExpectedCheckSum, *pui64CalculatedCheckSum));
+ bFreelistBad = IMG_TRUE;
+ }
+
+ if (bFreelistBad)
+ {
+ PVR_LOG(("_CheckFreelist: Sleeping for ever!"));
+ sleep_for_ever();
+// PVR_ASSERT(!bFreelistBad);
+ }
+#endif
+}
+
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumPages,
+ PDLLIST_NODE pListHeader)
+{
+ RGX_PMR_NODE *psPMRNode;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32MappingTable = 0;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiLength;
+ IMG_DEVMEM_SIZE_T uistartPage;
+ PVRSRV_ERROR eError;
+ const IMG_CHAR * pszAllocName = "Free List";
+
+ /* Are we allowed to grow ? */
+ if ((psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) < ui32NumPages)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"Freelist [0x%p]: grow by %u pages denied. Max PB size reached (current pages %u/%u)",
+ psFreeList,
+ ui32NumPages,
+ psFreeList->ui32CurrentFLPages,
+ psFreeList->ui32MaxFLPages));
+ return PVRSRV_ERROR_PBSIZE_ALREADY_MAX;
+ }
+
+ /* Allocate kernel memory block structure */
+ psPMRNode = OSAllocMem(sizeof(*psPMRNode));
+ if (psPMRNode == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowFreeList: failed to allocate host data structure"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+
+ /*
+ * Lock protects simultaneous manipulation of:
+ * - the memory block list
+ * - the freelist's ui32CurrentFLPages
+ */
+ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+
+ psPMRNode->ui32NumPages = ui32NumPages;
+ psPMRNode->psFreeList = psFreeList;
+
+ /* Allocate Memory Block */
+ PDUMPCOMMENT("Allocate PB Block (Pages %08X)", ui32NumPages);
+ uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
+ eError = PhysmemNewRamBackedPMR(NULL,
+ psFreeList->psDevInfo->psDeviceNode,
+ uiSize,
+ uiSize,
+ 1,
+ 1,
+ &ui32MappingTable,
+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE,
+ OSStringLength(pszAllocName) + 1,
+ pszAllocName,
+ &psPMRNode->psPMR);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXGrowFreeList: Failed to allocate PB block of size: 0x%016llX",
+ (IMG_UINT64)uiSize));
+ goto ErrorBlockAlloc;
+ }
+
+ /* Zeroing physical pages pointed by the PMR */
+ if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+ {
+ eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXGrowFreeList: Failed to zero PMR %p of freelist %p with Error %d",
+ psPMRNode->psPMR,
+ psFreeList,
+ eError));
+ PVR_ASSERT(0);
+ }
+ }
+
+ uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+ uistartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+ uiOffset = psFreeList->uiFreeListPMROffset + (uistartPage * sizeof(IMG_UINT32));
+
+#if defined(PVR_RI_DEBUG)
+
+ eError = RIWritePMREntryKM(psPMRNode->psPMR,
+ OSStringNLength(pszAllocName, RI_MAX_TEXT_LEN),
+ pszAllocName,
+ uiSize);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: call to RIWritePMREntryKM failed (eError=%d)",
+ __func__,
+ eError));
+ }
+
+ /* Attach RI information */
+ eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR,
+ OSStringNLength(pszAllocName, RI_MAX_TEXT_LEN),
+ pszAllocName,
+ 0,
+ uiSize,
+ uiSize,
+ IMG_FALSE,
+ IMG_FALSE,
+ &psPMRNode->hRIHandle);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: call to RIWriteMEMDESCEntryKM failed (eError=%d)",
+ __func__,
+ eError));
+ }
+
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ /* write Freelist with Memory Block physical addresses */
+ eError = PMRWritePMPageList(
+ /* Target PMR, offset, and length */
+ psFreeList->psFreeListPMR,
+ uiOffset,
+ uiLength,
+ /* Referenced PMR, and "page" granularity */
+ psPMRNode->psPMR,
+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+ &psPMRNode->psPageList);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXGrowFreeList: Failed to write pages of Node %p",
+ psPMRNode));
+ goto ErrorPopulateFreelist;
+ }
+
+ /* We add It must be added to the tail, otherwise the freelist population won't work */
+ dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock);
+
+ /* Update number of available pages */
+ psFreeList->ui32CurrentFLPages += ui32NumPages;
+
+ /* Update statistics */
+ if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages)
+ {
+ psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages;
+ }
+
+ if (psFreeList->bCheckFreelist)
+ {
+ /* We can only do a freelist check if the list is full (e.g. at initial creation time) */
+ if (psFreeList->ui32CurrentFLPages == ui32NumPages)
+ {
+ IMG_UINT64 ui64Dummy;
+ _CheckFreelist(psFreeList, ui32NumPages, psFreeList->ui64FreelistChecksum, &ui64Dummy);
+ }
+ }
+
+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"Freelist [%p]: grow by %u pages (current pages %u/%u)",
+ psFreeList,
+ ui32NumPages,
+ psFreeList->ui32CurrentFLPages,
+ psFreeList->ui32MaxFLPages));
+
+ return PVRSRV_OK;
+
+ /* Error handling */
+ErrorPopulateFreelist:
+ PMRUnrefPMR(psPMRNode->psPMR);
+
+ErrorBlockAlloc:
+ OSFreeMem(psPMRNode);
+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+
+}
+
+static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader,
+ RGX_FREELIST *psFreeList)
+{
+ DLLIST_NODE *psNode;
+ RGX_PMR_NODE *psPMRNode;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32OldValue;
+
+ /*
+ * Lock protects simultaneous manipulation of:
+ * - the memory block list
+ * - the freelist's ui32CurrentFLPages value
+ */
+ PVR_ASSERT(pListHeader);
+ PVR_ASSERT(psFreeList);
+ PVR_ASSERT(psFreeList->psDevInfo);
+ PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList);
+
+ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+ /* Get node from head of list and remove it */
+ psNode = dllist_get_next_node(pListHeader);
+ if (psNode)
+ {
+ dllist_remove_node(psNode);
+
+ psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+ PVR_ASSERT(psPMRNode);
+ PVR_ASSERT(psPMRNode->psPMR);
+ PVR_ASSERT(psPMRNode->psFreeList);
+
+ /* remove block from freelist list */
+
+ /* Unwrite Freelist with Memory Block physical addresses */
+ eError = PMRUnwritePMPageList(psPMRNode->psPageList);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXRemoveBlockFromFreeListKM: Failed to unwrite pages of Node %p",
+ psPMRNode));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+#if defined(PVR_RI_DEBUG)
+
+ if (psPMRNode->hRIHandle)
+ {
+ PVRSRV_ERROR eError;
+
+ eError = RIDeleteMEMDESCEntryKM(psPMRNode->hRIHandle);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to RIDeleteMEMDESCEntryKM failed (eError=%d)", __func__, eError));
+ }
+ }
+
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ /* Free PMR (We should be the only one that holds a ref on the PMR) */
+ eError = PMRUnrefPMR(psPMRNode->psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXRemoveBlockFromFreeListKM: Failed to free PB block %p (error %u)",
+ psPMRNode->psPMR,
+ eError));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+ /* update available pages in freelist */
+ ui32OldValue = psFreeList->ui32CurrentFLPages;
+ psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages;
+
+ /* check underflow */
+ PVR_ASSERT(ui32OldValue > psFreeList->ui32CurrentFLPages);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)",
+ psFreeList,
+ psPMRNode->ui32NumPages,
+ psFreeList->ui32CurrentFLPages,
+ psFreeList->ui32MaxFLPages));
+
+ OSFreeMem(psPMRNode);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,"Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)",
+ psFreeList,
+ psFreeList->ui32InitFLPages));
+ eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN;
+ }
+
+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ return eError;
+}
+
+static RGX_FREELIST *FindFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID)
+{
+ DLLIST_NODE *psNode, *psNext;
+ RGX_FREELIST *psFreeList = NULL;
+
+ OSLockAcquire(psDevInfo->hLockFreeList);
+
+ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+ {
+ RGX_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+
+ if (psThisFreeList->ui32FreelistID == ui32FreelistID)
+ {
+ psFreeList = psThisFreeList;
+ break;
+ }
+ }
+
+ OSLockRelease(psDevInfo->hLockFreeList);
+ return psFreeList;
+}
+
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistID)
+{
+ RGX_FREELIST *psFreeList = NULL;
+ RGXFWIF_KCCB_CMD s3DCCBCmd;
+ IMG_UINT32 ui32GrowValue;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psDevInfo);
+
+ psFreeList = FindFreeList(psDevInfo, ui32FreelistID);
+
+ if (psFreeList)
+ {
+ /* Try to grow the freelist */
+ eError = RGXGrowFreeList(psFreeList,
+ psFreeList->ui32GrowFLPages,
+ &psFreeList->sMemoryBlockHead);
+ if (eError == PVRSRV_OK)
+ {
+ /* Grow successful, return size of grow size */
+ ui32GrowValue = psFreeList->ui32GrowFLPages;
+
+ psFreeList->ui32NumGrowReqByFW++;
+
+ #if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* Update Stats */
+ PVRSRVStatsUpdateFreelistStats(0,
+ 1, /* Add 1 to the appropriate counter (Requests by FW) */
+ psFreeList->ui32InitFLPages,
+ psFreeList->ui32NumHighPages,
+ psFreeList->ownerPid);
+
+ #endif
+
+ }
+ else
+ {
+ /* Grow failed */
+ ui32GrowValue = 0;
+ PVR_DPF((PVR_DBG_ERROR,"Grow for FreeList %p failed (error %u)",
+ psFreeList,
+ eError));
+ }
+
+ /* send feedback */
+ s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE;
+ s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+ s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaSize = ui32GrowValue;
+ s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewSize = psFreeList->ui32CurrentFLPages;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_3D,
+ &s3DCCBCmd,
+ sizeof(s3DCCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ /* Kernel CCB should never fill up, as the FW is processing them right away */
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ else
+ {
+ /* Should never happen */
+ PVR_DPF((PVR_DBG_ERROR,"FreeList Lookup for FreeList ID 0x%08x failed (Populate)", ui32FreelistID));
+ PVR_ASSERT(IMG_FALSE);
+ }
+}
+
+static void _RGXCheckFreeListReconstruction(PDLLIST_NODE psNode)
+{
+
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGX_FREELIST *psFreeList;
+ RGX_PMR_NODE *psPMRNode;
+ PVRSRV_ERROR eError;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiLength;
+ IMG_UINT32 ui32StartPage;
+
+ psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+ psFreeList = psPMRNode->psFreeList;
+ PVR_ASSERT(psFreeList);
+ psDevInfo = psFreeList->psDevInfo;
+ PVR_ASSERT(psDevInfo);
+
+ uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+ ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+ uiOffset = psFreeList->uiFreeListPMROffset + (ui32StartPage * sizeof(IMG_UINT32));
+
+ PMRUnwritePMPageList(psPMRNode->psPageList);
+ psPMRNode->psPageList = NULL;
+ eError = PMRWritePMPageList(
+ /* Target PMR, offset, and length */
+ psFreeList->psFreeListPMR,
+ uiOffset,
+ uiLength,
+ /* Referenced PMR, and "page" granularity */
+ psPMRNode->psPMR,
+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+ &psPMRNode->psPageList);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Error (%u) writing FL 0x%08x", eError, (IMG_UINT32)psFreeList->ui32FreelistID));
+ }
+
+ /* Zeroing physical pages pointed by the reconstructed freelist */
+ if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+ {
+ eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_RGXCheckFreeListReconstruction: Failed to zero PMR %p of freelist %p with Error %d",
+ psPMRNode->psPMR,
+ psFreeList,
+ eError));
+ PVR_ASSERT(0);
+ }
+ }
+
+ psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages;
+}
+
+
+static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList)
+{
+ DLLIST_NODE *psNode, *psNext;
+ RGXFWIF_FREELIST *psFWFreeList;
+ PVRSRV_ERROR eError;
+
+ //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID));
+
+ /* Do the FreeList Reconstruction */
+ psFreeList->ui32CurrentFLPages = 0;
+
+ /* Reconstructing Init FreeList pages */
+ dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+ {
+ _RGXCheckFreeListReconstruction(psNode);
+ }
+
+ /* Reconstructing Grow FreeList pages */
+ dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+ {
+ _RGXCheckFreeListReconstruction(psNode);
+ }
+
+ /* Reset the firmware freelist structure */
+ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1;
+ psFWFreeList->ui32AllocatedPageCount = 0;
+ psFWFreeList->ui32AllocatedMMUPageCount = 0;
+ psFWFreeList->ui32HWRCounter++;
+
+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+ /* Check the Freelist checksum if required (as the list is fully populated) */
+ if (psFreeList->bCheckFreelist)
+ {
+ IMG_UINT64 ui64CheckSum;
+
+ _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+ }
+
+ return eError;
+}
+
+
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistsCount,
+ IMG_UINT32 *paui32Freelists)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32Loop;
+ RGXFWIF_KCCB_CMD sTACCBCmd;
+
+ PVR_ASSERT(psDevInfo != NULL);
+ PVR_ASSERT(ui32FreelistsCount <= (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS));
+
+ //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: %u freelist(s) requested for reconstruction", ui32FreelistsCount));
+
+ /*
+ * Initialise the response command (in case we don't find a freelist ID)...
+ */
+ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE;
+ sTACCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount;
+
+ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+ {
+ sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] |
+ RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+ }
+
+ /*
+ * The list of freelists we have been given for reconstruction will
+ * consist of local and global freelists (maybe MMU as well). Any
+ * local freelists will have their global list specified as well.
+ * However there may be other local freelists not listed, which are
+ * going to have their global freelist reconstructed. Therefore we
+ * have to find those freelists as well meaning we will have to
+ * iterate the entire list of freelists to find which must be reconstructed.
+ */
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+ {
+ RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+ IMG_BOOL bReconstruct = IMG_FALSE;
+
+ /*
+ * Check if this freelist needs to be reconstructed (was it requested
+ * or was its global freelist requested)...
+ */
+ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+ {
+ if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID ||
+ paui32Freelists[ui32Loop] == psFreeList->ui32FreelistGlobalID)
+ {
+ bReconstruct = IMG_TRUE;
+ break;
+ }
+ }
+
+ if (bReconstruct)
+ {
+ eError = RGXReconstructFreeList(psFreeList);
+ if (eError == PVRSRV_OK)
+ {
+ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+ {
+ if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID)
+ {
+ /* Reconstruction of this requested freelist was successful... */
+ sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &= ~RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+ break;
+ }
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Reconstructing of FreeList %p failed (error %u)",
+ psFreeList,
+ eError));
+ }
+ }
+ }
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+ /* Check that all freelists were found and reconstructed... */
+ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+ {
+ PVR_ASSERT((sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &
+ RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG) == 0);
+ }
+
+ /* send feedback */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_TA,
+ &sTACCBCmd,
+ sizeof(sTACCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /* Kernel CCB should never fill up, as the FW is processing them right away */
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* Create HWRTDataSet */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateHWRTData(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 psRenderTarget, /* FIXME this should not be IMG_UINT32 */
+ IMG_DEV_VIRTADDR psPMMListDevVAddr,
+ IMG_DEV_VIRTADDR psVFPPageTableAddr,
+ RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS],
+ RGX_RTDATA_CLEANUP_DATA **ppsCleanupData,
+ DEVMEM_MEMDESC **ppsRTACtlMemDesc,
+ IMG_UINT32 ui32PPPScreen,
+ IMG_UINT32 ui32PPPGridOffset,
+ IMG_UINT64 ui64PPPMultiSampleCtl,
+ IMG_UINT32 ui32TPCStride,
+ IMG_DEV_VIRTADDR sTailPtrsDevVAddr,
+ IMG_UINT32 ui32TPCSize,
+ IMG_UINT32 ui32TEScreen,
+ IMG_UINT32 ui32TEAA,
+ IMG_UINT32 ui32TEMTILE1,
+ IMG_UINT32 ui32TEMTILE2,
+ IMG_UINT32 ui32MTileStride,
+ IMG_UINT32 ui32ISPMergeLowerX,
+ IMG_UINT32 ui32ISPMergeLowerY,
+ IMG_UINT32 ui32ISPMergeUpperX,
+ IMG_UINT32 ui32ISPMergeUpperY,
+ IMG_UINT32 ui32ISPMergeScaleX,
+ IMG_UINT32 ui32ISPMergeScaleY,
+ IMG_UINT16 ui16MaxRTs,
+ DEVMEM_MEMDESC **ppsMemDesc,
+ IMG_UINT32 *puiHWRTData)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+ RGXFWIF_HWRTDATA *psHWRTData;
+ RGXFWIF_RTA_CTL *psRTACtl;
+ IMG_UINT32 ui32Loop;
+ RGX_RTDATA_CLEANUP_DATA *psTmpCleanup;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* Prepare cleanup struct */
+ psTmpCleanup = OSAllocZMem(sizeof(*psTmpCleanup));
+ if (psTmpCleanup == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto AllocError;
+ }
+
+ *ppsCleanupData = psTmpCleanup;
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psTmpCleanup->psCleanupSync,
+ "HWRTData cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto SyncAlloc;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ /*
+ * This FW RT-Data is only mapped into kernel for initialisation.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_HWRTDATA),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+ "FwHWRTData",
+ ppsMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateHWRTData: DevmemAllocate for RGX_FWIF_HWRTDATA failed"));
+ goto FWRTDataAllocateError;
+ }
+
+ psTmpCleanup->psDeviceNode = psDeviceNode;
+ psTmpCleanup->psFWHWRTDataMemDesc = *ppsMemDesc;
+
+ RGXSetFirmwareAddress(&pFirmwareAddr, *ppsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+
+ *puiHWRTData = pFirmwareAddr.ui32Addr;
+
+ eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, (void **)&psHWRTData);
+ PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError);
+
+ /* FIXME: MList is something that that PM writes physical addresses to,
+ * so ideally its best allocated in kernel */
+ psHWRTData->psPMMListDevVAddr = psPMMListDevVAddr;
+ psHWRTData->psParentRenderTarget.ui32Addr = psRenderTarget;
+ #if defined(SUPPORT_VFP)
+ psHWRTData->sVFPPageTableAddr = psVFPPageTableAddr;
+ #endif
+
+ psHWRTData->ui32PPPScreen = ui32PPPScreen;
+ psHWRTData->ui32PPPGridOffset = ui32PPPGridOffset;
+ psHWRTData->ui64PPPMultiSampleCtl = ui64PPPMultiSampleCtl;
+ psHWRTData->ui32TPCStride = ui32TPCStride;
+ psHWRTData->sTailPtrsDevVAddr = sTailPtrsDevVAddr;
+ psHWRTData->ui32TPCSize = ui32TPCSize;
+ psHWRTData->ui32TEScreen = ui32TEScreen;
+ psHWRTData->ui32TEAA = ui32TEAA;
+ psHWRTData->ui32TEMTILE1 = ui32TEMTILE1;
+ psHWRTData->ui32TEMTILE2 = ui32TEMTILE2;
+ psHWRTData->ui32MTileStride = ui32MTileStride;
+ psHWRTData->ui32ISPMergeLowerX = ui32ISPMergeLowerX;
+ psHWRTData->ui32ISPMergeLowerY = ui32ISPMergeLowerY;
+ psHWRTData->ui32ISPMergeUpperX = ui32ISPMergeUpperX;
+ psHWRTData->ui32ISPMergeUpperY = ui32ISPMergeUpperY;
+ psHWRTData->ui32ISPMergeScaleX = ui32ISPMergeScaleX;
+ psHWRTData->ui32ISPMergeScaleY = ui32ISPMergeScaleY;
+
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+ {
+ psTmpCleanup->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop];
+ psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount++;
+ psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psTmpCleanup->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr;
+ /* invalid initial snapshot value, the snapshot is always taken during first kick
+ * and hence the value get replaced during the first kick anyway. So it's safe to set it 0.
+ */
+ psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0;
+ }
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+ PDUMPCOMMENT("Allocate RGXFW RTA control");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_RTA_CTL),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+ "FwRTAControl",
+ ppsRTACtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate RGX RTA control (%u)",
+ eError));
+ goto FWRTAAllocateError;
+ }
+ psTmpCleanup->psRTACtlMemDesc = *ppsRTACtlMemDesc;
+ RGXSetFirmwareAddress(&psHWRTData->psRTACtl,
+ *ppsRTACtlMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+
+ eError = DevmemAcquireCpuVirtAddr(*ppsRTACtlMemDesc, (void **)&psRTACtl);
+ PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTACpuMapError);
+ psRTACtl->ui32RenderTargetIndex = 0;
+ psRTACtl->ui32ActiveRenderTargets = 0;
+
+ if (ui16MaxRTs > 1)
+ {
+ /* Allocate memory for the checks */
+ PDUMPCOMMENT("Allocate memory for shadow render target cache");
+ eError = DevmemFwAllocate(psDevInfo,
+ ui16MaxRTs * sizeof(IMG_UINT32),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED|
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+ "FwShadowRTCache",
+ &psTmpCleanup->psRTArrayMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate %d bytes for render target array (%u)",
+ ui16MaxRTs, eError));
+ goto FWAllocateRTArryError;
+ }
+
+ RGXSetFirmwareAddress(&psRTACtl->sValidRenderTargets,
+ psTmpCleanup->psRTArrayMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+
+ /* Allocate memory for the checks */
+ PDUMPCOMMENT("Allocate memory for tracking renders accumulation");
+ eError = DevmemFwAllocate(psDevInfo,
+ ui16MaxRTs * sizeof(IMG_UINT32),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED|
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+ "FwRendersAccumulation",
+ &psTmpCleanup->psRendersAccArrayMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate %d bytes for render target array (%u) (renders accumulation)",
+ ui16MaxRTs, eError));
+ goto FWAllocateRTAccArryError;
+ }
+
+ RGXSetFirmwareAddress(&psRTACtl->sNumRenders,
+ psTmpCleanup->psRendersAccArrayMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+ psRTACtl->ui16MaxRTs = ui16MaxRTs;
+ }
+ else
+ {
+ psRTACtl->sValidRenderTargets.ui32Addr = 0;
+ psRTACtl->sNumRenders.ui32Addr = 0;
+ psRTACtl->ui16MaxRTs = 1;
+ }
+
+ PDUMPCOMMENT("Dump HWRTData 0x%08X", *puiHWRTData);
+ DevmemPDumpLoadMem(*ppsMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS);
+ PDUMPCOMMENT("Dump RTACtl");
+ DevmemPDumpLoadMem(*ppsRTACtlMemDesc, 0, sizeof(*psRTACtl), PDUMP_FLAGS_CONTINUOUS);
+
+ DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+ DevmemReleaseCpuVirtAddr(*ppsRTACtlMemDesc);
+ return PVRSRV_OK;
+
+FWAllocateRTAccArryError:
+ DevmemFwFree(psDevInfo, psTmpCleanup->psRTArrayMemDesc);
+FWAllocateRTArryError:
+ DevmemReleaseCpuVirtAddr(*ppsRTACtlMemDesc);
+FWRTACpuMapError:
+ RGXUnsetFirmwareAddress(*ppsRTACtlMemDesc);
+ DevmemFwFree(psDevInfo, *ppsRTACtlMemDesc);
+FWRTAAllocateError:
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+ {
+ PVR_ASSERT(psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+ psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount--;
+ }
+ OSLockRelease(psDevInfo->hLockFreeList);
+ DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+FWRTDataCpuMapError:
+ RGXUnsetFirmwareAddress(*ppsMemDesc);
+ DevmemFwFree(psDevInfo, *ppsMemDesc);
+FWRTDataAllocateError:
+ SyncPrimFree(psTmpCleanup->psCleanupSync);
+SyncAlloc:
+ *ppsCleanupData = NULL;
+ OSFreeMem(psTmpCleanup);
+
+AllocError:
+ return eError;
+}
+
+/* Destroy HWRTDataSet */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyHWRTData(RGX_RTDATA_CLEANUP_DATA *psCleanupData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError;
+ PRGXFWIF_HWRTDATA psHWRTData;
+ IMG_UINT32 ui32Loop;
+
+ PVR_ASSERT(psCleanupData);
+
+ RGXSetFirmwareAddress(&psHWRTData, psCleanupData->psFWHWRTDataMemDesc, 0, RFW_FWADDR_NOREF_FLAG);
+
+ /* Cleanup HWRTData in TA */
+ eError = RGXFWRequestHWRTDataCleanUp(psCleanupData->psDeviceNode,
+ psHWRTData,
+ psCleanupData->psCleanupSync,
+ RGXFWIF_DM_TA);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+
+ psDevInfo = psCleanupData->psDeviceNode->pvDevice;
+
+ /* Cleanup HWRTData in 3D */
+ eError = RGXFWRequestHWRTDataCleanUp(psCleanupData->psDeviceNode,
+ psHWRTData,
+ psCleanupData->psCleanupSync,
+ RGXFWIF_DM_3D);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+
+ /* If we got here then TA and 3D operations on this RTData have finished */
+ if(psCleanupData->psRTACtlMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psCleanupData->psRTACtlMemDesc);
+ DevmemFwFree(psDevInfo, psCleanupData->psRTACtlMemDesc);
+ }
+
+ RGXUnsetFirmwareAddress(psCleanupData->psFWHWRTDataMemDesc);
+ DevmemFwFree(psDevInfo, psCleanupData->psFWHWRTDataMemDesc);
+
+ if(psCleanupData->psRTArrayMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psCleanupData->psRTArrayMemDesc);
+ DevmemFwFree(psDevInfo, psCleanupData->psRTArrayMemDesc);
+ }
+
+ if(psCleanupData->psRendersAccArrayMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psCleanupData->psRendersAccArrayMemDesc);
+ DevmemFwFree(psDevInfo, psCleanupData->psRendersAccArrayMemDesc);
+ }
+
+ SyncPrimFree(psCleanupData->psCleanupSync);
+
+ /* decrease freelist refcount */
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+ {
+ PVR_ASSERT(psCleanupData->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+ psCleanupData->apsFreeLists[ui32Loop]->ui32RefCount--;
+ }
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+ OSFreeMem(psCleanupData);
+
+ return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32MaxFLPages,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32GrowFLPages,
+ RGX_FREELIST *psGlobalFreeList,
+ IMG_BOOL bCheckFreelist,
+ IMG_DEV_VIRTADDR sFreeListDevVAddr,
+ PMR *psFreeListPMR,
+ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset,
+ RGX_FREELIST **ppsFreeList)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_FREELIST *psFWFreeList;
+ DEVMEM_MEMDESC *psFWFreelistMemDesc;
+ RGX_FREELIST *psFreeList;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ /* Allocate kernel freelist struct */
+ psFreeList = OSAllocZMem(sizeof(*psFreeList));
+ if (psFreeList == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateFreeList: failed to allocate host data structure"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psFreeList->psCleanupSync,
+ "ta3d free list cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateFreeList: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto SyncAlloc;
+ }
+
+ /*
+ * This FW FreeList context is only mapped into kernel for initialisation
+ * and reconstruction (at other times it is not mapped and only used by
+ * the FW. Therefore the GPU cache doesn't need coherency, and write-combine
+ * is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFWFreeList),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+ "FwFreeList",
+ &psFWFreelistMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateFreeList: DevmemAllocate for RGXFWIF_FREELIST failed"));
+ goto FWFreeListAlloc;
+ }
+
+ /* Initialise host data structures */
+ psFreeList->psDevInfo = psDevInfo;
+ psFreeList->psFreeListPMR = psFreeListPMR;
+ psFreeList->uiFreeListPMROffset = uiFreeListPMROffset;
+ psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc;
+ RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+ psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++;
+ psFreeList->ui32FreelistGlobalID = (psGlobalFreeList ? psGlobalFreeList->ui32FreelistID : 0);
+ psFreeList->ui32MaxFLPages = ui32MaxFLPages;
+ psFreeList->ui32InitFLPages = ui32InitFLPages;
+ psFreeList->ui32GrowFLPages = ui32GrowFLPages;
+ psFreeList->ui32CurrentFLPages = 0;
+ psFreeList->ui64FreelistChecksum = 0;
+ psFreeList->ui32RefCount = 0;
+ psFreeList->bCheckFreelist = bCheckFreelist;
+ dllist_init(&psFreeList->sMemoryBlockHead);
+ dllist_init(&psFreeList->sMemoryBlockInitHead);
+
+
+ /* Add to list of freelists */
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode);
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+
+ /* Initialise FW data structure */
+ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+ PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap);
+
+ psFWFreeList->ui32MaxPages = ui32MaxFLPages;
+ psFWFreeList->ui32CurrentPages = ui32InitFLPages;
+ psFWFreeList->ui32GrowPages = ui32GrowFLPages;
+ psFWFreeList->ui32CurrentStackTop = ui32InitFLPages - 1;
+ psFWFreeList->psFreeListDevVAddr = sFreeListDevVAddr;
+ psFWFreeList->ui64CurrentDevVAddr = sFreeListDevVAddr.uiAddr + ((ui32MaxFLPages - ui32InitFLPages) * sizeof(IMG_UINT32));
+ psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID;
+ psFWFreeList->bGrowPending = IMG_FALSE;
+
+ PVR_DPF((PVR_DBG_MESSAGE,"Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, Max FL base address 0x%016llx, Init FL base address 0x%016llx",
+ psFreeList,
+ ui32MaxFLPages,
+ ui32InitFLPages,
+ sFreeListDevVAddr.uiAddr,
+ psFWFreeList->psFreeListDevVAddr.uiAddr));
+
+ PDUMPCOMMENT("Dump FW FreeList");
+ DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS);
+
+ /*
+ * Separate dump of the Freelist's number of Pages and stack pointer.
+ * This allows to easily modify the PB size in the out2.txt files.
+ */
+ PDUMPCOMMENT("FreeList TotalPages");
+ DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+ offsetof(RGXFWIF_FREELIST, ui32CurrentPages),
+ psFWFreeList->ui32CurrentPages,
+ PDUMP_FLAGS_CONTINUOUS);
+ PDUMPCOMMENT("FreeList StackPointer");
+ DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+ offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop),
+ psFWFreeList->ui32CurrentStackTop,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+
+ /* Add initial PB block */
+ eError = RGXGrowFreeList(psFreeList,
+ ui32InitFLPages,
+ &psFreeList->sMemoryBlockInitHead);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXCreateFreeList: failed to allocate initial memory block for free list 0x%016llx (error = %u)",
+ sFreeListDevVAddr.uiAddr,
+ eError));
+ goto FWFreeListCpuMap;
+ }
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* Update Stats */
+ PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+ 0,
+ psFreeList->ui32InitFLPages,
+ psFreeList->ui32NumHighPages,
+ psFreeList->ownerPid);
+
+#endif
+
+ psFreeList->ownerPid = OSGetCurrentClientProcessIDKM();
+ /* return values */
+ *ppsFreeList = psFreeList;
+
+ return PVRSRV_OK;
+
+ /* Error handling */
+
+FWFreeListCpuMap:
+ /* Remove freelists from list */
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ dllist_remove_node(&psFreeList->sNode);
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+ RGXUnsetFirmwareAddress(psFWFreelistMemDesc);
+ DevmemFwFree(psDevInfo, psFWFreelistMemDesc);
+
+FWFreeListAlloc:
+ SyncPrimFree(psFreeList->psCleanupSync);
+
+SyncAlloc:
+ OSFreeMem(psFreeList);
+
+ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ RGXDestroyFreeList
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psFreeList);
+
+ if (psFreeList->ui32RefCount != 0)
+ {
+ /* Freelist still busy */
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ /* Freelist is not in use => start firmware cleanup */
+ eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo,
+ psFreeList->sFreeListFWDevVAddr,
+ psFreeList->psCleanupSync);
+ if(eError != PVRSRV_OK)
+ {
+ /* Can happen if the firmware took too long to handle the cleanup request,
+ * or if SLC-flushes didn't went through (due to some GPU lockup) */
+ return eError;
+ }
+
+ /* Remove FreeList from linked list before we destroy it... */
+ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+ dllist_remove_node(&psFreeList->sNode);
+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ if (psFreeList->bCheckFreelist)
+ {
+ RGXFWIF_FREELIST *psFWFreeList;
+ IMG_UINT64 ui32CurrentStackTop;
+ IMG_UINT64 ui64CheckSum;
+
+ /* Get the current stack pointer for this free list */
+ DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+ ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop;
+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+ if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1)
+ {
+ /* Do consistency tests (as the list is fully populated) */
+ _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+ }
+ else
+ {
+ /* Check for duplicate pages, but don't check the checksum as the list is not fully populated */
+ _CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum);
+ }
+ }
+
+ /* Destroy FW structures */
+ RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc);
+ DevmemFwFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc);
+
+ /* Remove grow shrink blocks */
+ while (!dllist_is_empty(&psFreeList->sMemoryBlockHead))
+ {
+ eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+
+ /* Remove initial PB block */
+ eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* consistency checks */
+ PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead));
+ PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0);
+
+ SyncPrimFree(psFreeList->psCleanupSync);
+
+ /* free Freelist */
+ OSFreeMem(psFreeList);
+
+ return eError;
+}
+
+
+
+/*
+ RGXAddBlockToFreeListKM
+*/
+
+IMG_EXPORT
+PVRSRV_ERROR RGXAddBlockToFreeListKM(RGX_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumPages)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if we have reference to freelist's PMR */
+ if (psFreeList->psFreeListPMR == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Freelist is not configured for grow"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* grow freelist */
+ eError = RGXGrowFreeList(psFreeList,
+ ui32NumPages,
+ &psFreeList->sMemoryBlockHead);
+ if(eError == PVRSRV_OK)
+ {
+ /* update freelist data in firmware */
+ _UpdateFwFreelistSize(psFreeList, IMG_TRUE, ui32NumPages);
+
+ psFreeList->ui32NumGrowReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* Update Stats */
+ PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+ 0,
+ psFreeList->ui32InitFLPages,
+ psFreeList->ui32NumHighPages,
+ psFreeList->ownerPid);
+
+#endif
+ }
+
+ return eError;
+}
+
+/*
+ RGXRemoveBlockFromFreeListKM
+*/
+
+IMG_EXPORT
+PVRSRV_ERROR RGXRemoveBlockFromFreeListKM(RGX_FREELIST *psFreeList)
+{
+ PVRSRV_ERROR eError;
+
+ /*
+ * Make sure the pages part of the memory block are not in use anymore.
+ * Instruct the firmware to update the freelist pointers accordingly.
+ */
+
+ eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead,
+ psFreeList);
+
+ return eError;
+}
+
+
+/*
+ RGXCreateRenderTarget
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRenderTarget(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_VIRTADDR psVHeapTableDevVAddr,
+ RGX_RT_CLEANUP_DATA **ppsCleanupData,
+ IMG_UINT32 *sRenderTargetFWDevVAddr)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_RENDER_TARGET *psRenderTarget;
+ RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_RT_CLEANUP_DATA *psCleanupData;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psCleanupData = OSAllocZMem(sizeof(*psCleanupData));
+ if (psCleanupData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_out;
+ }
+
+ psCleanupData->psDeviceNode = psDeviceNode;
+ /*
+ * This FW render target context is only mapped into kernel for initialisation.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psRenderTarget),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+ "FwRenderTarget",
+ &psCleanupData->psRenderTargetMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRenderTarget: DevmemAllocate for Render Target failed"));
+ goto err_free;
+ }
+ RGXSetFirmwareAddress(&pFirmwareAddr, psCleanupData->psRenderTargetMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+ *sRenderTargetFWDevVAddr = pFirmwareAddr.ui32Addr;
+
+ eError = DevmemAcquireCpuVirtAddr(psCleanupData->psRenderTargetMemDesc, (void **)&psRenderTarget);
+ PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", err_fwalloc);
+
+ psRenderTarget->psVHeapTableDevVAddr = psVHeapTableDevVAddr;
+ psRenderTarget->bTACachesNeedZeroing = IMG_FALSE;
+ PDUMPCOMMENT("Dump RenderTarget");
+ DevmemPDumpLoadMem(psCleanupData->psRenderTargetMemDesc, 0, sizeof(*psRenderTarget), PDUMP_FLAGS_CONTINUOUS);
+ DevmemReleaseCpuVirtAddr(psCleanupData->psRenderTargetMemDesc);
+
+ *ppsCleanupData = psCleanupData;
+
+err_out:
+ return eError;
+
+err_free:
+ OSFreeMem(psCleanupData);
+ goto err_out;
+
+err_fwalloc:
+ DevmemFwFree(psDevInfo, psCleanupData->psRenderTargetMemDesc);
+ goto err_free;
+
+}
+
+
+/*
+ RGXDestroyRenderTarget
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRenderTarget(RGX_RT_CLEANUP_DATA *psCleanupData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = psCleanupData->psDeviceNode;
+
+ RGXUnsetFirmwareAddress(psCleanupData->psRenderTargetMemDesc);
+
+ /*
+ Note:
+ When we get RT cleanup in the FW call that instead
+ */
+ /* Flush the the SLC before freeing */
+ {
+ RGXFWIF_KCCB_CMD sFlushInvalCmd;
+ PVRSRV_ERROR eError;
+
+ /* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+ sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+ eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sFlushInvalCmd,
+ sizeof(sFlushInvalCmd),
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDestroyRenderTarget: Failed to schedule SLC flush command with error (%u)", eError));
+ }
+ else
+ {
+ /* Wait for the SLC flush to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDestroyRenderTarget: SLC flush and invalidate aborted with error (%u)", eError));
+ }
+ }
+ }
+
+ DevmemFwFree(psDeviceNode->pvDevice, psCleanupData->psRenderTargetMemDesc);
+ OSFreeMem(psCleanupData);
+ return PVRSRV_OK;
+}
+
+/*
+ RGXCreateZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ RGX_ZSBUFFER_DATA **ppsZSBuffer,
+ IMG_UINT32 *pui32ZSBufferFWDevVAddr)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_FWZSBUFFER *psFWZSBuffer;
+ RGX_ZSBUFFER_DATA *psZSBuffer;
+ DEVMEM_MEMDESC *psFWZSBufferMemDesc;
+ IMG_BOOL bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE;
+
+ /* Allocate host data structure */
+ psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer));
+ if (psZSBuffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate cleanup data structure for ZS-Buffer"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocCleanup;
+ }
+
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psZSBuffer->psCleanupSync,
+ "ta3d zs buffer cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto ErrorSyncAlloc;
+ }
+
+ /* Populate Host data */
+ psZSBuffer->psDevInfo = psDevInfo;
+ psZSBuffer->psReservation = psReservation;
+ psZSBuffer->psPMR = psPMR;
+ psZSBuffer->uiMapFlags = uiMapFlags;
+ psZSBuffer->ui32RefCount = 0;
+ psZSBuffer->bOnDemand = bOnDemand;
+ if (bOnDemand)
+ {
+ psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++;
+ psZSBuffer->psMapping = NULL;
+
+ OSLockAcquire(psDevInfo->hLockZSBuffer);
+ dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode);
+ OSLockRelease(psDevInfo->hLockZSBuffer);
+ }
+
+ /* Allocate firmware memory for ZS-Buffer. */
+ PDUMPCOMMENT("Allocate firmware ZS-Buffer data structure");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFWZSBuffer),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+ "FwZSBuffer",
+ &psFWZSBufferMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate firmware ZS-Buffer (%u)", eError));
+ goto ErrorAllocFWZSBuffer;
+ }
+ psZSBuffer->psZSBufferMemDesc = psFWZSBufferMemDesc;
+
+ /* Temporarily map the firmware render context to the kernel. */
+ eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc,
+ (void **)&psFWZSBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to map firmware ZS-Buffer (%u)", eError));
+ goto ErrorAcquireFWZSBuffer;
+ }
+
+ /* Populate FW ZS-Buffer data structure */
+ psFWZSBuffer->bOnDemand = bOnDemand;
+ psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_ZSBUFFER_UNBACKED : RGXFWIF_ZSBUFFER_BACKED;
+ psFWZSBuffer->ui32ZSBufferID = psZSBuffer->ui32ZSBufferID;
+
+ /* Get firmware address of ZS-Buffer. */
+ RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+
+ /* Dump the ZS-Buffer and the memory content */
+ PDUMPCOMMENT("Dump firmware ZS-Buffer");
+ DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS);
+
+ /* Release address acquired above. */
+ DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc);
+
+
+ /* define return value */
+ *ppsZSBuffer = psZSBuffer;
+ *pui32ZSBufferFWDevVAddr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)",
+ psZSBuffer,
+ (bOnDemand) ? "On-Demand": "Up-front"));
+
+ psZSBuffer->owner=OSGetCurrentClientProcessIDKM();
+
+ return PVRSRV_OK;
+
+ /* error handling */
+
+ErrorAcquireFWZSBuffer:
+ DevmemFwFree(psDevInfo, psFWZSBufferMemDesc);
+
+ErrorAllocFWZSBuffer:
+ SyncPrimFree(psZSBuffer->psCleanupSync);
+
+ErrorSyncAlloc:
+ OSFreeMem(psZSBuffer);
+
+ErrorAllocCleanup:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ RGXDestroyZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+ POS_LOCK hLockZSBuffer;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psZSBuffer);
+ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+ /* Request ZS Buffer cleanup */
+ eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo,
+ psZSBuffer->sZSBufferFWDevVAddr,
+ psZSBuffer->psCleanupSync);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ /* Free the firmware render context. */
+ RGXUnsetFirmwareAddress(psZSBuffer->psZSBufferMemDesc);
+ DevmemFwFree(psZSBuffer->psDevInfo, psZSBuffer->psZSBufferMemDesc);
+
+ /* Remove Deferred Allocation from list */
+ if (psZSBuffer->bOnDemand)
+ {
+ OSLockAcquire(hLockZSBuffer);
+ PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode));
+ dllist_remove_node(&psZSBuffer->sNode);
+ OSLockRelease(hLockZSBuffer);
+ }
+
+ SyncPrimFree(psZSBuffer->psCleanupSync);
+
+ PVR_ASSERT(psZSBuffer->ui32RefCount == 0);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"ZS-Buffer [%p] destroyed",psZSBuffer));
+
+ /* Free ZS-Buffer host data structure */
+ OSFreeMem(psZSBuffer);
+
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+ POS_LOCK hLockZSBuffer;
+ PVRSRV_ERROR eError;
+
+ if (!psZSBuffer)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (!psZSBuffer->bOnDemand)
+ {
+ /* Only deferred allocations can be populated */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"ZS Buffer [%p, ID=0x%08x]: Physical backing requested",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID));
+ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+ OSLockAcquire(hLockZSBuffer);
+
+ if (psZSBuffer->ui32RefCount == 0)
+ {
+ if (psZSBuffer->bOnDemand)
+ {
+ IMG_HANDLE hDevmemHeap;
+
+ PVR_ASSERT(psZSBuffer->psMapping == NULL);
+
+ /* Get Heap */
+ eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap);
+ PVR_ASSERT(psZSBuffer->psMapping == NULL);
+
+ eError = DevmemIntMapPMR(hDevmemHeap,
+ psZSBuffer->psReservation,
+ psZSBuffer->psPMR,
+ psZSBuffer->uiMapFlags,
+ &psZSBuffer->psMapping);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Unable populate ZS Buffer [%p, ID=0x%08x] with error %u",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID,
+ eError));
+ OSLockRelease(hLockZSBuffer);
+ return eError;
+
+ }
+ PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID));
+ }
+ }
+
+ /* Increase refcount*/
+ psZSBuffer->ui32RefCount++;
+
+ OSLockRelease(hLockZSBuffer);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+ RGX_POPULATION **ppsPopulation)
+{
+ RGX_POPULATION *psPopulation;
+ PVRSRV_ERROR eError;
+
+ psZSBuffer->ui32NumReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PVRSRVStatsUpdateZSBufferStats(1,0,psZSBuffer->owner);
+#endif
+
+ /* Do the backing */
+ eError = RGXBackingZSBuffer(psZSBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ goto OnErrorBacking;
+ }
+
+ /* Create the handle to the backing */
+ psPopulation = OSAllocMem(sizeof(*psPopulation));
+ if (psPopulation == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto OnErrorAlloc;
+ }
+
+ psPopulation->psZSBuffer = psZSBuffer;
+
+ /* return value */
+ *ppsPopulation = psPopulation;
+
+ return PVRSRV_OK;
+
+OnErrorAlloc:
+ RGXUnbackingZSBuffer(psZSBuffer);
+
+OnErrorBacking:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+ POS_LOCK hLockZSBuffer;
+ PVRSRV_ERROR eError;
+
+ if (!psZSBuffer)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PVR_ASSERT(psZSBuffer->ui32RefCount);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID));
+
+ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+ OSLockAcquire(hLockZSBuffer);
+
+ if (psZSBuffer->bOnDemand)
+ {
+ if (psZSBuffer->ui32RefCount == 1)
+ {
+ PVR_ASSERT(psZSBuffer->psMapping);
+
+ eError = DevmemIntUnmapPMR(psZSBuffer->psMapping);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Unable to unpopulate ZS Buffer [%p, ID=0x%08x] with error %u",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID,
+ eError));
+ OSLockRelease(hLockZSBuffer);
+ return eError;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID));
+ }
+ }
+
+ /* Decrease refcount*/
+ psZSBuffer->ui32RefCount--;
+
+ OSLockRelease(hLockZSBuffer);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation)
+{
+ PVRSRV_ERROR eError;
+
+ if (!psPopulation)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ OSFreeMem(psPopulation);
+
+ return PVRSRV_OK;
+}
+
+static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID)
+{
+ DLLIST_NODE *psNode, *psNext;
+ RGX_ZSBUFFER_DATA *psZSBuffer = NULL;
+
+ OSLockAcquire(psDevInfo->hLockZSBuffer);
+
+ dllist_foreach_node(&psDevInfo->sZSBufferHead, psNode, psNext)
+ {
+ RGX_ZSBUFFER_DATA *psThisZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode);
+
+ if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID)
+ {
+ psZSBuffer = psThisZSBuffer;
+ break;
+ }
+ }
+
+ OSLockRelease(psDevInfo->hLockZSBuffer);
+ return psZSBuffer;
+}
+
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32ZSBufferID)
+{
+ RGX_ZSBUFFER_DATA *psZSBuffer;
+ RGXFWIF_KCCB_CMD sTACCBCmd;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psDevInfo);
+
+ /* scan all deferred allocations */
+ psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+ if (psZSBuffer)
+ {
+ IMG_BOOL bBackingDone = IMG_TRUE;
+
+ /* Populate ZLS */
+ eError = RGXBackingZSBuffer(psZSBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Populating ZS-Buffer failed with error %u (ID = 0x%08x)", eError, ui32ZSBufferID));
+ bBackingDone = IMG_FALSE;
+ }
+
+ /* send confirmation */
+ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE;
+ sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+ sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_TA,
+ &sTACCBCmd,
+ sizeof(sTACCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /* Kernel CCB should never fill up, as the FW is processing them right away */
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ psZSBuffer->ui32NumReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PVRSRVStatsUpdateZSBufferStats(0,1,psZSBuffer->owner);
+#endif
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)", ui32ZSBufferID));
+ }
+}
+
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32ZSBufferID)
+{
+ RGX_ZSBUFFER_DATA *psZSBuffer;
+ RGXFWIF_KCCB_CMD sTACCBCmd;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psDevInfo);
+
+ /* scan all deferred allocations */
+ psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+ if (psZSBuffer)
+ {
+ /* Unpopulate ZLS */
+ eError = RGXUnbackingZSBuffer(psZSBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"UnPopulating ZS-Buffer failed failed with error %u (ID = 0x%08x)", eError, ui32ZSBufferID));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+ /* send confirmation */
+ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE;
+ sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+ sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_TA,
+ &sTACCBCmd,
+ sizeof(sTACCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /* Kernel CCB should never fill up, as the FW is processing them right away */
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)", ui32ZSBufferID));
+ }
+}
+
+static
+PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_DEV_VIRTADDR sVDMCallStackAddr,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_RC_TA_DATA *psTAData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_TACTX_STATE *psContextState;
+ PVRSRV_ERROR eError;
+ /*
+ Allocate device memory for the firmware GPU context suspend state.
+ Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware TA context suspend state");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_TACTX_STATE),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwTAContextState",
+ &psTAData->psContextStateMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+ eError));
+ goto fail_tacontextsuspendalloc;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+ (void **)&psContextState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to map firmware render context state (%u)",
+ eError));
+ goto fail_suspendcpuvirtacquire;
+ }
+ psContextState->uTAReg_VDM_CALL_STACK_POINTER_Init = sVDMCallStackAddr.uiAddr;
+ DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_TA,
+ RGXFWIF_DM_TA,
+ psAllocatedMemDesc,
+ ui32AllocatedOffset,
+ psFWMemContextMemDesc,
+ psTAData->psContextStateMemDesc,
+ RGX_TA_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &psTAData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to init TA fw common context (%u)",
+ eError));
+ goto fail_tacommoncontext;
+ }
+
+ /*
+ * Dump the FW 3D context suspend state buffer
+ */
+ PDUMPCOMMENT("Dump the TA context suspend state buffer");
+ DevmemPDumpLoadMem(psTAData->psContextStateMemDesc,
+ 0,
+ sizeof(RGXFWIF_TACTX_STATE),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psTAData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+fail_tacommoncontext:
+fail_suspendcpuvirtacquire:
+ DevmemFwFree(psDevInfo, psTAData->psContextStateMemDesc);
+fail_tacontextsuspendalloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+static
+PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_RC_3D_DATA *ps3DData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+
+ /*
+ Allocate device memory for the firmware GPU context suspend state.
+ Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware 3D context suspend state");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_3DCTX_STATE),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "Fw3DContextState",
+ &ps3DData->psContextStateMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+ eError));
+ goto fail_3dcontextsuspendalloc;
+ }
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_3D,
+ RGXFWIF_DM_3D,
+ psAllocatedMemDesc,
+ ui32AllocatedOffset,
+ psFWMemContextMemDesc,
+ ps3DData->psContextStateMemDesc,
+ RGX_3D_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &ps3DData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to init 3D fw common context (%u)",
+ eError));
+ goto fail_3dcommoncontext;
+ }
+
+ /*
+ * Dump the FW 3D context suspend state buffer
+ */
+ PDUMPCOMMENT("Dump the 3D context suspend state buffer");
+ DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc,
+ 0,
+ sizeof(RGXFWIF_3DCTX_STATE),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ ps3DData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+fail_3dcommoncontext:
+ DevmemFwFree(psDevInfo, ps3DData->psContextStateMemDesc);
+fail_3dcontextsuspendalloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+
+/*
+ * PVRSRVRGXCreateRenderContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sMCUFenceAddr,
+ IMG_DEV_VIRTADDR sVDMCallStackAddr,
+ IMG_UINT32 ui32FrameworkRegisterSize,
+ IMG_PBYTE pabyFrameworkRegisters,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_RENDER_CONTEXT **ppsRenderContext)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_SERVER_RENDER_CONTEXT *psRenderContext;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_COMMON_CONTEXT_INFO sInfo;
+
+ /* Prepare cleanup structure */
+ *ppsRenderContext = NULL;
+ psRenderContext = OSAllocZMem(sizeof(*psRenderContext));
+ if (psRenderContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRenderContext->psDeviceNode = psDeviceNode;
+
+ /*
+ Create the FW render context, this has the TA and 3D FW common
+ contexts embedded within it
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_FWRENDERCONTEXT),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwRenderContext",
+ &psRenderContext->psFWRenderContextMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_fwrendercontext;
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ WorkEstRCInit(&(psRenderContext->sWorkEstData));
+#endif
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psRenderContext->psCleanupSync,
+ "ta3d render context cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+ &psRenderContext->psFWFrameworkMemDesc,
+ ui32FrameworkRegisterSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU framework state (%u)",
+ eError));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psRenderContext->psFWFrameworkMemDesc,
+ pabyFrameworkRegisters,
+ ui32FrameworkRegisterSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to populate the framework buffer (%u)",
+ eError));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc;
+ sInfo.psMCUFenceAddr = &sMCUFenceAddr;
+
+ eError = _CreateTAContext(psConnection,
+ psDeviceNode,
+ psRenderContext->psFWRenderContextMemDesc,
+ offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),
+ psFWMemContextMemDesc,
+ sVDMCallStackAddr,
+ ui32Priority,
+ &sInfo,
+ &psRenderContext->sTAData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_tacontext;
+ }
+
+ eError = _Create3DContext(psConnection,
+ psDeviceNode,
+ psRenderContext->psFWRenderContextMemDesc,
+ offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),
+ psFWMemContextMemDesc,
+ ui32Priority,
+ &sInfo,
+ &psRenderContext->s3DData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_3dcontext;
+ }
+
+ SyncAddrListInit(&psRenderContext->sSyncAddrListTAFence);
+ SyncAddrListInit(&psRenderContext->sSyncAddrListTAUpdate);
+ SyncAddrListInit(&psRenderContext->sSyncAddrList3DFence);
+ SyncAddrListInit(&psRenderContext->sSyncAddrList3DUpdate);
+
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+ }
+
+ *ppsRenderContext= psRenderContext;
+ return PVRSRV_OK;
+
+fail_3dcontext:
+ _DestroyTAContext(&psRenderContext->sTAData,
+ psDeviceNode,
+ psRenderContext->psCleanupSync);
+fail_tacontext:
+fail_frameworkcopy:
+ DevmemFwFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+ SyncPrimFree(psRenderContext->psCleanupSync);
+fail_syncalloc:
+ DevmemFwFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+fail_fwrendercontext:
+ OSFreeMem(psRenderContext);
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+/*
+ * PVRSRVRGXDestroyRenderContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psRenderContext->psDeviceNode->pvDevice;
+ RGXFWIF_FWRENDERCONTEXT *psFWRenderContext;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ IMG_UINT32 ui32WorkEstCCBSubmitted;
+#endif
+
+ /* remove node from list before calling destroy - as destroy, if successful
+ * will invalidate the node
+ * must be re-added if destroy fails
+ */
+ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+ dllist_remove_node(&(psRenderContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+
+ /* Cleanup the TA if we haven't already */
+ if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0)
+ {
+ eError = _DestroyTAContext(&psRenderContext->sTAData,
+ psRenderContext->psDeviceNode,
+ psRenderContext->psCleanupSync);
+ if (eError == PVRSRV_OK)
+ {
+ psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE;
+ }
+ else
+ {
+ goto e0;
+ }
+ }
+
+ /* Cleanup the 3D if we haven't already */
+ if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0)
+ {
+ eError = _Destroy3DContext(&psRenderContext->s3DData,
+ psRenderContext->psDeviceNode,
+ psRenderContext->psCleanupSync);
+ if (eError == PVRSRV_OK)
+ {
+ psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE;
+ }
+ else
+ {
+ goto e0;
+ }
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+ (void **)&psFWRenderContext);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXDestroyRenderContextKM: Failed to map firmware render context (%u)",
+ eError));
+ goto e0;
+ }
+
+ ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted;
+
+ DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+
+ /* Check if all of the workload estimation CCB commands for this workload
+ * are read
+ */
+ if(ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)
+ {
+ eError = PVRSRV_ERROR_RETRY;
+ goto e0;
+ }
+#endif
+
+ /*
+ Only if both TA and 3D contexts have been cleaned up can we
+ free the shared resources
+ */
+ if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE))
+ {
+
+ /* Update SPM statistics */
+ eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+ (void **)&psFWRenderContext);
+ if (eError == PVRSRV_OK)
+ {
+ DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXDestroyRenderContextKM: Failed to map firmware render context (%u)",
+ eError));
+ }
+
+ /* Free the framework buffer */
+ DevmemFwFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+
+ /* Free the firmware render context */
+ DevmemFwFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+
+ /* Free the cleanup sync */
+ SyncPrimFree(psRenderContext->psCleanupSync);
+
+ SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAFence);
+ SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAUpdate);
+ SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DFence);
+ SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ WorkEstRCDeInit(&(psRenderContext->sWorkEstData),
+ psDevInfo);
+#endif
+
+ OSFreeMem(psRenderContext);
+ }
+
+ return PVRSRV_OK;
+
+e0:
+ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+ return eError;
+}
+
+
+/* TODO !!! this was local on the stack, and we managed to blow the stack for the kernel.
+ * THIS - 46 argument function needs to be sorted out.
+ */
+/* 1 command for the TA */
+static RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[1];
+/* Up to 3 commands for the 3D (partial render fence, partial reader, and render) */
+static RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[3];
+
+/*
+ * PVRSRVRGXKickTA3DKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientTAFenceCount,
+ SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock,
+ IMG_UINT32 *paui32ClientTAFenceSyncOffset,
+ IMG_UINT32 *paui32ClientTAFenceValue,
+ IMG_UINT32 ui32ClientTAUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **apsClientTAUpdateSyncPrimBlock,
+ IMG_UINT32 *paui32ClientTAUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientTAUpdateValue,
+ IMG_UINT32 ui32ServerTASyncPrims,
+ IMG_UINT32 *paui32ServerTASyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerTASyncs,
+ IMG_UINT32 ui32Client3DFenceCount,
+ SYNC_PRIMITIVE_BLOCK **apsClient3DFenceSyncPrimBlock,
+ IMG_UINT32 *paui32Client3DFenceSyncOffset,
+ IMG_UINT32 *paui32Client3DFenceValue,
+ IMG_UINT32 ui32Client3DUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock,
+ IMG_UINT32 *paui32Client3DUpdateSyncOffset,
+ IMG_UINT32 *paui32Client3DUpdateValue,
+ IMG_UINT32 ui32Server3DSyncPrims,
+ IMG_UINT32 *paui32Server3DSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServer3DSyncs,
+ SYNC_PRIMITIVE_BLOCK *psPRFenceSyncPrimBlock,
+ IMG_UINT32 ui32PRFenceSyncOffset,
+ IMG_UINT32 ui32PRFenceValue,
+ IMG_INT32 i32CheckFenceFD,
+ IMG_INT32 i32UpdateTimelineFD,
+ IMG_INT32 *pi32UpdateFenceFD,
+ IMG_CHAR szFenceName[32],
+ IMG_UINT32 ui32TACmdSize,
+ IMG_PBYTE pui8TADMCmd,
+ IMG_UINT32 ui323DPRCmdSize,
+ IMG_PBYTE pui83DPRDMCmd,
+ IMG_UINT32 ui323DCmdSize,
+ IMG_PBYTE pui83DDMCmd,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_BOOL bLastTAInScene,
+ IMG_BOOL bKickTA,
+ IMG_BOOL bKickPR,
+ IMG_BOOL bKick3D,
+ IMG_BOOL bAbort,
+ IMG_UINT32 ui32PDumpFlags,
+ RGX_RTDATA_CLEANUP_DATA *psRTDataCleanup,
+ RGX_ZSBUFFER_DATA *psZBuffer,
+ RGX_ZSBUFFER_DATA *psSBuffer,
+ IMG_BOOL bCommitRefCountsTA,
+ IMG_BOOL bCommitRefCounts3D,
+ IMG_BOOL *pbCommittedRefCountsTA,
+ IMG_BOOL *pbCommittedRefCounts3D,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 *paui32SyncPMRFlags,
+ PMR **ppsSyncPMRs,
+ IMG_UINT32 ui32RenderTargetSize,
+ IMG_UINT32 ui32NumberOfDrawCalls,
+ IMG_UINT32 ui32NumberOfIndices,
+ IMG_UINT32 ui32NumberOfMRTs,
+ IMG_UINT64 ui64DeadlineInus)
+{
+
+ IMG_UINT32 ui32TACmdCount=0;
+ IMG_UINT32 ui323DCmdCount=0;
+ IMG_UINT32 ui32TACmdOffset=0;
+ IMG_UINT32 ui323DCmdOffset=0;
+ RGXFWIF_UFO sPRUFO;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eError2;
+ IMG_INT32 i32UpdateFenceFD = -1;
+ IMG_UINT32 ui32JobId;
+
+ IMG_UINT32 ui32ClientPRUpdateCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress = NULL;
+ IMG_UINT32 *paui32ClientPRUpdateValue = NULL;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+ PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress;
+ PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress;
+ PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress;
+ PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress;
+ PRGXFWIF_UFO_ADDR uiPRFenceUFOAddress;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTA;
+ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickData3D;
+ IMG_UINT32 ui32TACommandOffset = 0;
+ IMG_UINT32 ui323DCommandOffset = 0;
+ IMG_UINT32 ui32TACmdHeaderOffset = 0;
+ IMG_UINT32 ui323DCmdHeaderOffset = 0;
+ IMG_UINT32 ui323DFullRenderCommandOffset = 0;
+ IMG_UINT32 ui32TACmdOffsetWrapCheck = 0;
+ IMG_UINT32 ui323DCmdOffsetWrapCheck = 0;
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_append_data *psAppendData = NULL;
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ /* Android fd sync update info */
+ struct pvr_sync_append_data *psFDData = NULL;
+ if (i32UpdateTimelineFD >= 0 && !pi32UpdateFenceFD)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#else
+ if (i32UpdateTimelineFD >= 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Providing native sync timeline (%d) in non native sync enabled driver",
+ __func__, i32UpdateTimelineFD));
+ }
+ if (i32CheckFenceFD >= 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Providing native check sync (%d) in non native sync enabled driver",
+ __func__, i32CheckFenceFD));
+ }
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ sWorkloadKickDataTA.ui64ReturnDataIndex = 0;
+ sWorkloadKickData3D.ui64ReturnDataIndex = 0;
+#endif
+
+ ui32JobId = OSAtomicIncrement(&psRenderContext->hJobId);
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szFenceName[31] = '\0';
+ *pbCommittedRefCountsTA = IMG_FALSE;
+ *pbCommittedRefCounts3D = IMG_FALSE;
+
+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence,
+ ui32ClientTAFenceCount,
+ apsClientTAFenceSyncPrimBlock,
+ paui32ClientTAFenceSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+
+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate,
+ ui32ClientTAUpdateCount,
+ apsClientTAUpdateSyncPrimBlock,
+ paui32ClientTAUpdateSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+
+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence,
+ ui32Client3DFenceCount,
+ apsClient3DFenceSyncPrimBlock,
+ paui32Client3DFenceSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+
+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate,
+ ui32Client3DUpdateCount,
+ apsClient3DUpdateSyncPrimBlock,
+ paui32Client3DUpdateSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+
+ eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock,
+ ui32PRFenceSyncOffset,
+ &uiPRFenceUFOAddress);
+
+ if(eError != PVRSRV_OK)
+ {
+ goto err_pr_fence_address;
+ }
+
+
+
+ /* Sanity check the server fences */
+ for (i=0;i<ui32ServerTASyncPrims;i++)
+ {
+ if (!(paui32ServerTASyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on TA) must fence", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ }
+ }
+
+ for (i=0;i<ui32Server3DSyncPrims;i++)
+ {
+ if (!(paui32Server3DSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on 3D) must fence", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ }
+ }
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRenderContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+ /*
+ Sanity check we have a PR kick if there are client or server fences
+ */
+ if (!bKickPR && ((ui32Client3DFenceCount != 0) || (ui32Server3DSyncPrims != 0)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: 3D fence (client or server) passed without a PR kick", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32SyncPMRCount)
+ {
+#if defined(SUPPORT_BUFFER_SYNC)
+ PVRSRV_DEVICE_NODE *psDeviceNode = psRenderContext->psDeviceNode;
+ IMG_UINT32 ui32ClientIntUpdateCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiClientIntUpdateUFOAddress = NULL;
+ IMG_UINT32 *paui32ClientIntUpdateValue = NULL;
+ int err;
+
+ if (!bKickTA)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync only supported for kicks including a TA",
+ __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (!bKickPR)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync only supported for kicks including a PR",
+ __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (bKick3D)
+ {
+ ui32ClientIntUpdateCount = ui32Client3DUpdateCount;
+ pauiClientIntUpdateUFOAddress = pauiClient3DUpdateUFOAddress;
+ paui32ClientIntUpdateValue = paui32Client3DUpdateValue;
+ }
+ else
+ {
+ ui32ClientIntUpdateCount = ui32ClientPRUpdateCount;
+ pauiClientIntUpdateUFOAddress = pauiClientPRUpdateUFOAddress;
+ paui32ClientIntUpdateValue = paui32ClientPRUpdateValue;
+ }
+
+ err = pvr_buffer_sync_append_start(psDeviceNode->psBufferSyncContext,
+ ui32SyncPMRCount,
+ ppsSyncPMRs,
+ paui32SyncPMRFlags,
+ ui32ClientTAFenceCount,
+ pauiClientTAFenceUFOAddress,
+ paui32ClientTAFenceValue,
+ ui32ClientIntUpdateCount,
+ pauiClientIntUpdateUFOAddress,
+ paui32ClientIntUpdateValue,
+ &psAppendData);
+ if (err)
+ {
+ eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_sync_append;
+ }
+
+ pvr_buffer_sync_append_checks_get(psAppendData,
+ &ui32ClientTAFenceCount,
+ &pauiClientTAFenceUFOAddress,
+ &paui32ClientTAFenceValue);
+
+ if (bKick3D)
+ {
+ pvr_buffer_sync_append_updates_get(psAppendData,
+ &ui32Client3DUpdateCount,
+ &pauiClient3DUpdateUFOAddress,
+ &paui32Client3DUpdateValue);
+ }
+ else
+ {
+ pvr_buffer_sync_append_updates_get(psAppendData,
+ &ui32ClientPRUpdateCount,
+ &pauiClientPRUpdateUFOAddress,
+ &paui32ClientPRUpdateValue);
+ }
+#else
+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __FUNCTION__, ui32SyncPMRCount));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+ }
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ /*
+ * The hardware requires a PR to be submitted if there is a TA (otherwise
+ * it can wedge if we run out of PB space with no PR to run)
+ *
+ * If we only have a TA, attach native checks to the TA and updates to the PR
+ * If we have a TA and 3D, attach checks to TA, updates to 3D
+ * If we only have a 3D, attach checks and updates to the 3D
+ *
+ * Note that 'updates' includes the cleanup syncs for 'check' fence FDs, in
+ * addition to the update fence FD (if supplied)
+ *
+ * Currently, the client driver never kicks only the 3D, so we only support
+ * that for the time being.
+ */
+ if (i32CheckFenceFD >= 0 || i32UpdateTimelineFD >= 0)
+ {
+ IMG_UINT32 ui32ClientIntUpdateCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiClientIntUpdateUFOAddress = NULL;
+ IMG_UINT32 *paui32ClientIntUpdateValue = NULL;
+
+ if (!bKickTA)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Native syncs only supported for kicks including a TA",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_fdsync;
+ }
+ if (!bKickPR)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Native syncs require a PR for all kicks",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_fdsync;
+ }
+ /* If we have a 3D, attach updates to that. Otherwise, we attach it to a PR */
+ if (bKick3D)
+ {
+ ui32ClientIntUpdateCount = ui32Client3DUpdateCount;
+ pauiClientIntUpdateUFOAddress = pauiClient3DUpdateUFOAddress;
+ paui32ClientIntUpdateValue = paui32Client3DUpdateValue;
+ }
+ else
+ {
+ ui32ClientIntUpdateCount = ui32ClientPRUpdateCount;
+ pauiClientIntUpdateUFOAddress = pauiClientPRUpdateUFOAddress;
+ paui32ClientIntUpdateValue = paui32ClientPRUpdateValue;
+ }
+
+ eError =
+ pvr_sync_append_fences(szFenceName,
+ i32CheckFenceFD,
+ i32UpdateTimelineFD,
+ ui32ClientIntUpdateCount,
+ pauiClientIntUpdateUFOAddress,
+ paui32ClientIntUpdateValue,
+ ui32ClientTAFenceCount,
+ pauiClientTAFenceUFOAddress,
+ paui32ClientTAFenceValue,
+ &psFDData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_fdsync;
+ }
+ /* If we have a 3D, attach updates to that. Otherwise, we attach it to a PR */
+ if (bKick3D)
+ {
+ pvr_sync_get_updates(psFDData, &ui32Client3DUpdateCount,
+ &pauiClient3DUpdateUFOAddress, &paui32Client3DUpdateValue);
+ }
+ else
+ {
+ pvr_sync_get_updates(psFDData, &ui32ClientPRUpdateCount,
+ &pauiClientPRUpdateUFOAddress, &paui32ClientPRUpdateValue);
+ }
+ pvr_sync_get_checks(psFDData, &ui32ClientTAFenceCount,
+ &pauiClientTAFenceUFOAddress, &paui32ClientTAFenceValue);
+ if (ui32ClientPRUpdateCount)
+ {
+ PVR_ASSERT(pauiClientPRUpdateUFOAddress);
+ PVR_ASSERT(paui32ClientPRUpdateValue);
+ }
+ if (ui32Client3DUpdateCount)
+ {
+ PVR_ASSERT(pauiClient3DUpdateUFOAddress);
+ PVR_ASSERT(paui32Client3DUpdateValue);
+ }
+ }
+#endif /* SUPPORT_NATIVE_FENCE_SYNC */
+
+ /* Init and acquire to TA command if required */
+ if(bKickTA)
+ {
+ RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Prepare workload estimation */
+ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+ &(psRenderContext->sWorkEstData),
+ &(psRenderContext->sWorkEstData.sWorkloadMatchingDataTA),
+ ui32RenderTargetSize,
+ ui32NumberOfDrawCalls,
+ ui32NumberOfIndices,
+ ui32NumberOfMRTs,
+ ui64DeadlineInus,
+ &sWorkloadKickDataTA);
+#endif
+
+ /* Init the TA command helper */
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psTAData->psServerCommonContext),
+ ui32ClientTAFenceCount,
+ pauiClientTAFenceUFOAddress,
+ paui32ClientTAFenceValue,
+ ui32ClientTAUpdateCount,
+ pauiClientTAUpdateUFOAddress,
+ paui32ClientTAUpdateValue,
+ ui32ServerTASyncPrims,
+ paui32ServerTASyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerTASyncs,
+ ui32TACmdSize,
+ pui8TADMCmd,
+ & pPreAddr,
+ (bKick3D ? NULL : & pPostAddr),
+ (bKick3D ? NULL : & pRMWUFOAddr),
+ RGXFWIF_CCB_CMD_TYPE_TA,
+ ui32ExtJobRef,
+ ui32JobId,
+ ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ &sWorkloadKickDataTA,
+#else
+ NULL,
+#endif
+ "TA",
+ asTACmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_tacmdinit;
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* The following is used to determine the offset of the command header
+ * containing the workload estimation data so that can be accessed when
+ * the KCCB is read.
+ */
+ ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asTACmdHelperData);
+#endif
+
+ eError = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asTACmdHelperData),
+ asTACmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_taacquirecmd;
+ }
+ else
+ {
+ ui32TACmdCount++;
+ }
+ }
+
+ /* Only kick the 3D if required */
+ if (bKickPR)
+ {
+ RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+ /*
+ The command helper doesn't know about the PR fence so create
+ the command with all the fences against it and later create
+ the PR command itself which _must_ come after the PR fence.
+ */
+ sPRUFO.puiAddrUFO = uiPRFenceUFOAddress;
+ sPRUFO.ui32Value = ui32PRFenceValue;
+
+ /* Init the PR fence command helper */
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+ ui32Client3DFenceCount,
+ pauiClient3DFenceUFOAddress,
+ paui32Client3DFenceValue,
+ 0,
+ NULL,
+ NULL,
+ (bKick3D ? ui32Server3DSyncPrims : 0),
+ paui32Server3DSyncFlags,
+ PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK,
+ pasServer3DSyncs,
+ sizeof(sPRUFO),
+ (IMG_UINT8*) &sPRUFO,
+ NULL,
+ NULL,
+ NULL,
+ RGXFWIF_CCB_CMD_TYPE_FENCE_PR,
+ ui32ExtJobRef,
+ ui32JobId,
+ ui32PDumpFlags,
+ NULL,
+ "3D-PR-Fence",
+ &as3DCmdHelperData[ui323DCmdCount++]);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_prfencecmdinit;
+ }
+
+ /* Init the 3D PR command helper */
+ /*
+ See note above PVRFDSyncQueryFencesKM as to why updates for android
+ syncs are passed in with the PR
+ */
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+ 0,
+ NULL,
+ NULL,
+ ui32ClientPRUpdateCount,
+ pauiClientPRUpdateUFOAddress,
+ paui32ClientPRUpdateValue,
+ 0,
+ NULL,
+ SYNC_FLAG_MASK_ALL,
+ NULL,
+ ui323DPRCmdSize,
+ pui83DPRDMCmd,
+ NULL,
+ NULL,
+ NULL,
+ RGXFWIF_CCB_CMD_TYPE_3D_PR,
+ ui32ExtJobRef,
+ ui32JobId,
+ ui32PDumpFlags,
+ NULL,
+ "3D-PR",
+ &as3DCmdHelperData[ui323DCmdCount++]);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_prcmdinit;
+ }
+ }
+
+ if (bKick3D || bAbort)
+ {
+ RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Prepare workload estimation */
+ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+ &(psRenderContext->sWorkEstData),
+ &(psRenderContext->sWorkEstData.sWorkloadMatchingData3D),
+ ui32RenderTargetSize,
+ ui32NumberOfDrawCalls,
+ ui32NumberOfIndices,
+ ui32NumberOfMRTs,
+ ui64DeadlineInus,
+ &sWorkloadKickData3D);
+#endif
+ /* Init the 3D command helper */
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+ 0,
+ NULL,
+ NULL,
+ ui32Client3DUpdateCount,
+ pauiClient3DUpdateUFOAddress,
+ paui32Client3DUpdateValue,
+ ui32Server3DSyncPrims,
+ paui32Server3DSyncFlags,
+ PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE,
+ pasServer3DSyncs,
+ ui323DCmdSize,
+ pui83DDMCmd,
+ (bKickTA ? NULL : & pPreAddr),
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_3D,
+ ui32ExtJobRef,
+ ui32JobId,
+ ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ &sWorkloadKickData3D,
+#else
+ NULL,
+#endif
+ "3D",
+ &as3DCmdHelperData[ui323DCmdCount++]);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_3dcmdinit;
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* The following are used to determine the offset of the command header
+ * containing the workload estimation data so that can be accessed when
+ * the KCCB is read.
+ */
+ ui323DCmdHeaderOffset =
+ RGXCmdHelperGetDMCommandHeaderOffset(&as3DCmdHelperData[ui323DCmdCount - 1]);
+ ui323DFullRenderCommandOffset =
+ RGXCmdHelperGetCommandOffset(as3DCmdHelperData,
+ ui323DCmdCount - 1);
+#endif
+ }
+
+ /* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */
+ if (ui323DCmdCount > IMG_ARR_NUM_ELEMS(as3DCmdHelperData))
+ {
+ goto fail_3dcmdinit;
+ }
+
+ if (ui323DCmdCount)
+ {
+ PVR_ASSERT(bKickPR || bKick3D);
+
+ /* Acquire space for all the 3D command(s) */
+ eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount,
+ as3DCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ /* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling
+ * of a new TA command with the same Write offset in Kernel CCB.
+ */
+ goto fail_3dacquirecmd;
+ }
+ }
+
+ /*
+ We should acquire the space in the kernel CCB here as after this point
+ we release the commands which will take operations on server syncs
+ which can't be undone
+ */
+
+ /*
+ Everything is ready to go now, release the commands
+ */
+ if (ui32TACmdCount)
+ {
+ ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(ui32TACmdCount,
+ asTACmdHelperData,
+ "TA",
+ FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ ui32TACmdOffsetWrapCheck =
+ RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+ /* This checks if the command would wrap around at the end of the CCB
+ * and therefore would start at an offset of 0 rather than the current
+ * command offset.
+ */
+ if(ui32TACmdOffset < ui32TACmdOffsetWrapCheck)
+ {
+ ui32TACommandOffset = ui32TACmdOffset;
+ }
+ else
+ {
+ ui32TACommandOffset = 0;
+ }
+#endif
+ }
+
+ if (ui323DCmdCount)
+ {
+ ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+ as3DCmdHelperData,
+ "3D",
+ FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr);
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+ if(ui323DCmdOffset < ui323DCmdOffsetWrapCheck)
+ {
+ ui323DCommandOffset = ui323DCmdOffset;
+ }
+ else
+ {
+ ui323DCommandOffset = 0;
+ }
+#endif
+ }
+
+ if (ui32TACmdCount)
+ {
+ RGXFWIF_KCCB_CMD sTAKCCBCmd;
+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr;
+
+ /* Construct the kernel TA CCB command. */
+ sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sTAKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext);
+ sTAKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+ /* Add the Workload data into the KCCB kick */
+ sTAKCCBCmd.uCmdData.sCmdKickData.sWorkloadDataFWAddress.ui32Addr = 0;
+ sTAKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Store the offset to the CCCB command header so that it can be
+ * referenced when the KCCB command reaches the FW
+ */
+ sTAKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset =
+ ui32TACommandOffset + ui32TACmdHeaderOffset;
+#endif
+
+ if(bCommitRefCountsTA)
+ {
+ AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTAKCCBCmd.uCmdData.sCmdKickData.apsCleanupCtl,
+ &sTAKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl,
+ RGXFWIF_DM_TA,
+ bKickTA,
+ psRTDataCleanup,
+ psZBuffer,
+ psSBuffer);
+ *pbCommittedRefCountsTA = IMG_TRUE;
+ }
+ else
+ {
+ sTAKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+ }
+
+ HTBLOGK(HTB_SF_MAIN_KICK_TA,
+ sTAKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui32TACmdOffset
+ );
+ RGX_HWPERF_HOST_ENQ(psRenderContext, OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx, ui32ExtJobRef, ui32JobId,
+ RGX_HWPERF_KICK_TYPE_TA3D);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_TA,
+ &sTAKCCBCmd,
+ sizeof(sTAKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psRenderContext->psDeviceNode->pvDevice,
+ ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_TA3D);
+#endif
+ }
+
+ if (ui323DCmdCount)
+ {
+ RGXFWIF_KCCB_CMD s3DKCCBCmd;
+
+ /* Construct the kernel 3D CCB command. */
+ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext);
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+ /* Add the Workload data into the KCCB kick */
+ s3DKCCBCmd.uCmdData.sCmdKickData.sWorkloadDataFWAddress.ui32Addr = 0;
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Store the offset to the CCCB command header so that it can be
+ * referenced when the KCCB command reaches the FW
+ */
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset;
+#endif
+
+ if(bCommitRefCounts3D)
+ {
+ AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DKCCBCmd.uCmdData.sCmdKickData.apsCleanupCtl,
+ &s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl,
+ RGXFWIF_DM_3D,
+ bKick3D,
+ psRTDataCleanup,
+ psZBuffer,
+ psSBuffer);
+ *pbCommittedRefCounts3D = IMG_TRUE;
+ }
+ else
+ {
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+ }
+
+
+ HTBLOGK(HTB_SF_MAIN_KICK_3D,
+ s3DKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui323DCmdOffset);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_3D,
+ &s3DKCCBCmd,
+ sizeof(s3DKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ }
+
+ /*
+ * Now check eError (which may have returned an error from our earlier calls
+ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+ * so we check it now...
+ */
+ if (eError != PVRSRV_OK )
+ {
+ goto fail_3dacquirecmd;
+ }
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ if (i32UpdateTimelineFD >= 0)
+ {
+ /* If we get here, this should never fail. Hitting that likely implies
+ * a code error above */
+ i32UpdateFenceFD = pvr_sync_get_update_fd(psFDData);
+ if (i32UpdateFenceFD < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+ __FUNCTION__));
+ /* If we fail here, we cannot rollback the syncs as the hw already
+ * has references to resources they may be protecting in the kick
+ * so fallthrough */
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_3dacquirecmd;
+ }
+ }
+#if defined(NO_HARDWARE)
+ pvr_sync_nohw_complete_fences(psFDData);
+#endif
+ pvr_sync_free_append_fences_data(psFDData);
+
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psAppendData)
+ {
+ pvr_buffer_sync_append_finish(psAppendData);
+ }
+#endif
+
+ *pi32UpdateFenceFD = i32UpdateFenceFD;
+
+ return PVRSRV_OK;
+
+fail_3dacquirecmd:
+fail_3dcmdinit:
+fail_prcmdinit:
+fail_prfencecmdinit:
+fail_taacquirecmd:
+fail_tacmdinit:
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ pvr_sync_rollback_append_fences(psFDData);
+ pvr_sync_free_append_fences_data(psFDData);
+fail_fdsync:
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_append_abort(psAppendData);
+fail_sync_append:
+#endif
+err_pr_fence_address:
+err_populate_sync_addr_list:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+ if (psRenderContext->sTAData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext,
+ psConnection,
+ psRenderContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_TA);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the TA part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ goto fail_tacontext;
+ }
+ psRenderContext->sTAData.ui32Priority = ui32Priority;
+ }
+
+ if (psRenderContext->s3DData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext,
+ psConnection,
+ psRenderContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_3D);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ goto fail_3dcontext;
+ }
+ psRenderContext->s3DData.ui32Priority = ui32Priority;
+ }
+ return PVRSRV_OK;
+
+fail_3dcontext:
+fail_tacontext:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ * PVRSRVRGXGetLastRenderContextResetReasonKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 *peLastResetReason,
+ IMG_UINT32 *pui32LastResetJobRef)
+{
+ RGX_SERVER_RC_TA_DATA *psRenderCtxTAData;
+ RGX_SERVER_RC_3D_DATA *psRenderCtx3DData;
+ RGX_SERVER_COMMON_CONTEXT *psCurrentServerTACommonCtx, *psCurrentServer3DCommonCtx;
+ RGXFWIF_CONTEXT_RESET_REASON eLastTAResetReason, eLast3DResetReason;
+ IMG_UINT32 ui32LastTAResetJobRef, ui32Last3DResetJobRef;
+
+ PVR_ASSERT(psRenderContext != NULL);
+ PVR_ASSERT(peLastResetReason != NULL);
+ PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+ psRenderCtxTAData = &(psRenderContext->sTAData);
+ psCurrentServerTACommonCtx = psRenderCtxTAData->psServerCommonContext;
+ psRenderCtx3DData = &(psRenderContext->s3DData);
+ psCurrentServer3DCommonCtx = psRenderCtx3DData->psServerCommonContext;
+
+ /* Get the last reset reasons from both the TA and 3D so they are reset... */
+ eLastTAResetReason = FWCommonContextGetLastResetReason(psCurrentServerTACommonCtx, &ui32LastTAResetJobRef);
+ eLast3DResetReason = FWCommonContextGetLastResetReason(psCurrentServer3DCommonCtx, &ui32Last3DResetJobRef);
+
+ /* Combine the reset reason from TA and 3D into one... */
+ *peLastResetReason = (IMG_UINT32) eLast3DResetReason;
+ *pui32LastResetJobRef = ui32Last3DResetJobRef;
+ if (eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_NONE ||
+ ((eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP ||
+ eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING) &&
+ (eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP ||
+ eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING)))
+ {
+ *peLastResetReason = eLastTAResetReason;
+ *pui32LastResetJobRef = ui32LastTAResetJobRef;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVRGXGetPartialRenderCountKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetPartialRenderCountKM(DEVMEM_MEMDESC *psHWRTDataMemDesc,
+ IMG_UINT32 *pui32NumPartialRenders)
+{
+ RGXFWIF_HWRTDATA *psHWRTData;
+ PVRSRV_ERROR eError;
+
+ eError = DevmemAcquireCpuVirtAddr(psHWRTDataMemDesc, (void **)&psHWRTData);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXGetPartialRenderCountKM: Failed to map Firmware Render Target Data (%u)", eError));
+ return eError;
+ }
+
+ *pui32NumPartialRenders = psHWRTData->ui32NumPartialRenders;
+
+ DevmemReleaseCpuVirtAddr(psHWRTDataMemDesc);
+
+ return PVRSRV_OK;
+}
+
+void CheckForStalledRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DLLIST_NODE *psNode, *psNext;
+ OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+ dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+
+ DumpStalledFWCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ DumpStalledFWCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32ContextBitMask = 0;
+
+ OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+ if(NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext)
+ {
+ if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_TA) == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TA;
+ }
+ }
+
+ if(NULL != psCurrentServerRenderCtx->s3DData.psServerCommonContext)
+ {
+ if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_3D) == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_3D;
+ }
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+ return ui32ContextBitMask;
+}
+
+/******************************************************************************
+ End of file (rgxta3d.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX TA and 3D Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX TA and 3D Functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTA3D_H__)
+#define __RGXTA3D_H__
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif_resetframework.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT;
+typedef struct _RGX_FREELIST_ RGX_FREELIST;
+typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE;
+
+typedef struct {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWHWRTDataMemDesc;
+ DEVMEM_MEMDESC *psRTACtlMemDesc;
+ DEVMEM_MEMDESC *psRTArrayMemDesc;
+ DEVMEM_MEMDESC *psRendersAccArrayMemDesc;
+ RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS];
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+} RGX_RTDATA_CLEANUP_DATA;
+
+struct _RGX_FREELIST_ {
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ /* Free list PMR */
+ PMR *psFreeListPMR;
+ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset;
+
+ /* Freelist config */
+ IMG_UINT32 ui32MaxFLPages;
+ IMG_UINT32 ui32InitFLPages;
+ IMG_UINT32 ui32CurrentFLPages;
+ IMG_UINT32 ui32GrowFLPages;
+ IMG_UINT32 ui32FreelistID;
+ IMG_UINT32 ui32FreelistGlobalID; /* related global freelist for this freelist */
+ IMG_UINT64 ui64FreelistChecksum; /* checksum over freelist content */
+ IMG_BOOL bCheckFreelist; /* freelist check enabled */
+ IMG_UINT32 ui32RefCount; /* freelist reference counting */
+
+ IMG_UINT32 ui32NumGrowReqByApp; /* Total number of grow requests by Application*/
+ IMG_UINT32 ui32NumGrowReqByFW; /* Total Number of grow requests by Firmware */
+ IMG_UINT32 ui32NumHighPages; /* High Mark of pages in the freelist */
+
+ IMG_PID ownerPid; /* Pid of the owner of the list */
+
+ /* Memory Blocks */
+ DLLIST_NODE sMemoryBlockHead;
+ DLLIST_NODE sMemoryBlockInitHead;
+ DLLIST_NODE sNode;
+
+ /* FW data structures */
+ DEVMEM_MEMDESC *psFWFreelistMemDesc;
+ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr;
+
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ HASH_TABLE* psWorkloadHashTable;
+#endif
+} ;
+
+struct _RGX_PMR_NODE_ {
+ RGX_FREELIST *psFreeList;
+ PMR *psPMR;
+ PMR_PAGELIST *psPageList;
+ DLLIST_NODE sMemoryBlock;
+ IMG_UINT32 ui32NumPages;
+ IMG_BOOL bInternal;
+#if defined(PVR_RI_DEBUG)
+ RI_HANDLE hRIHandle;
+#endif
+} ;
+
+typedef struct {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psRenderTargetMemDesc;
+} RGX_RT_CLEANUP_DATA;
+
+typedef struct {
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ DEVMEM_MEMDESC *psZSBufferMemDesc;
+ RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr;
+
+ DEVMEMINT_RESERVATION *psReservation;
+ PMR *psPMR;
+ DEVMEMINT_MAPPING *psMapping;
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+ IMG_UINT32 ui32ZSBufferID;
+ IMG_UINT32 ui32RefCount;
+ IMG_BOOL bOnDemand;
+
+ IMG_BOOL ui32NumReqByApp; /* Number of Backing Requests from Application */
+ IMG_BOOL ui32NumReqByFW; /* Number of Backing Requests from Firmware */
+
+ IMG_PID owner;
+
+ DLLIST_NODE sNode;
+
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+}RGX_ZSBUFFER_DATA;
+
+typedef struct {
+ RGX_ZSBUFFER_DATA *psZSBuffer;
+} RGX_POPULATION;
+
+/* Dump the physical pages of a freelist */
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList);
+
+
+/* Create HWRTDataSet */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateHWRTData(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 psRenderTarget,
+ IMG_DEV_VIRTADDR psPMMListDevVAddr,
+ IMG_DEV_VIRTADDR psVFPPageTableAddr,
+ RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS],
+ RGX_RTDATA_CLEANUP_DATA **ppsCleanupData,
+ DEVMEM_MEMDESC **ppsRTACtlMemDesc,
+ IMG_UINT32 ui32PPPScreen,
+ IMG_UINT32 ui32PPPGridOffset,
+ IMG_UINT64 ui64PPPMultiSampleCtl,
+ IMG_UINT32 ui32TPCStride,
+ IMG_DEV_VIRTADDR sTailPtrsDevVAddr,
+ IMG_UINT32 ui32TPCSize,
+ IMG_UINT32 ui32TEScreen,
+ IMG_UINT32 ui32TEAA,
+ IMG_UINT32 ui32TEMTILE1,
+ IMG_UINT32 ui32TEMTILE2,
+ IMG_UINT32 ui32MTileStride,
+ IMG_UINT32 ui32ISPMergeLowerX,
+ IMG_UINT32 ui32ISPMergeLowerY,
+ IMG_UINT32 ui32ISPMergeUpperX,
+ IMG_UINT32 ui32ISPMergeUpperY,
+ IMG_UINT32 ui32ISPMergeScaleX,
+ IMG_UINT32 ui32ISPMergeScaleY,
+ IMG_UINT16 ui16MaxRTs,
+ DEVMEM_MEMDESC **psMemDesc,
+ IMG_UINT32 *puiHWRTData);
+
+/* Destroy HWRTData */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyHWRTData(RGX_RTDATA_CLEANUP_DATA *psCleanupData);
+
+/* Create Render Target */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRenderTarget(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_VIRTADDR psVHeapTableDevVAddr,
+ RGX_RT_CLEANUP_DATA **ppsCleanupData,
+ IMG_UINT32 *sRenderTargetFWDevVAddr);
+
+/* Destroy render target */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRenderTarget(RGX_RT_CLEANUP_DATA *psCleanupData);
+
+
+/*
+ RGXCreateZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ RGX_ZSBUFFER_DATA **ppsZSBuffer,
+ IMG_UINT32 *sRenderTargetFWDevVAddr);
+
+/*
+ RGXDestroyZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+
+/*
+ * RGXBackingZSBuffer()
+ *
+ * Backs ZS-Buffer with physical pages
+ */
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXPopulateZSBufferKM()
+ *
+ * Backs ZS-Buffer with physical pages (called by Bridge calls)
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+ RGX_POPULATION **ppsPopulation);
+
+/*
+ * RGXUnbackingZSBuffer()
+ *
+ * Frees ZS-Buffer's physical pages
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXUnpopulateZSBufferKM()
+ *
+ * Frees ZS-Buffer's physical pages (called by Bridge calls )
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation);
+
+/*
+ RGXProcessRequestZSBufferBacking
+*/
+IMG_EXPORT
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32ZSBufferID);
+
+/*
+ RGXProcessRequestZSBufferUnbacking
+*/
+IMG_EXPORT
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32ZSBufferID);
+
+/*
+ RGXGrowFreeList
+*/
+IMG_INTERNAL
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumPages,
+ PDLLIST_NODE pListHeader);
+
+/* Create free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32MaxFLPages,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32GrowFLPages,
+ RGX_FREELIST *psGlobalFreeList,
+ IMG_BOOL bCheckFreelist,
+ IMG_DEV_VIRTADDR sFreeListDevVAddr,
+ PMR *psFreeListPMR,
+ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset,
+ RGX_FREELIST **ppsFreeList);
+
+/* Destroy free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList);
+
+/*
+ RGXProcessRequestGrow
+*/
+IMG_EXPORT
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistID);
+
+
+/* Grow free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXAddBlockToFreeListKM(RGX_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumPages);
+
+/* Shrink free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXRemoveBlockFromFreeListKM(RGX_FREELIST *psFreeList);
+
+
+/* Reconstruct free list after Hardware Recovery */
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistsCount,
+ IMG_UINT32 *paui32Freelists);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXCreateRenderContextKM
+
+ @Description
+ Server-side implementation of RGXCreateRenderContext
+
+ @Input pvDeviceNode - device node
+ @Input psTACCBMemDesc - TA CCB Memory descriptor
+ @Input psTACCBCtlMemDesc - TA CCB Ctrl Memory descriptor
+ @Input ps3DCCBMemDesc - 3D CCB Memory descriptor
+ @Input ps3DCCBCtlMemDesc - 3D CCB Ctrl Memory descriptor
+ @Input ui32Priority - context priority
+ @Input sMCUFenceAddr - MCU Fence device virtual address
+ @Input psVDMStackPointer - VDM call stack device virtual address
+ @Input ui32FrameworkRegisterSize - framework register size
+ @Input pbyFrameworkRegisters - ptr to framework register
+ @Input hMemCtxPrivData - memory context private data
+ @Output ppsCleanupData - clean up data
+ @Output ppsFWRenderContextMemDesc - firmware render context memory descriptor
+ @Output ppsFWContextStateMemDesc - firmware context state memory descriptor
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sMCUFenceAddr,
+ IMG_DEV_VIRTADDR sVDMCallStackAddr,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_RENDER_CONTEXT **ppsRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXDestroyRenderContextKM
+
+ @Description
+ Server-side implementation of RGXDestroyRenderContext
+
+ @Input psCleanupData - clean up data
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXKickTA3DKM
+
+ @Description
+ Server-side implementation of RGXKickTA3D
+
+ @Input psRTDataCleanup - RT data associated with the kick (or NULL)
+ @Input psZBuffer - Z-buffer associated with the kick (or NULL)
+ @Input psSBuffer - S-buffer associated with the kick (or NULL)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientTAFenceCount,
+ SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock,
+ IMG_UINT32 *paui32ClientTAFenceSyncOffset,
+ IMG_UINT32 *paui32ClientTAFenceValue,
+ IMG_UINT32 ui32ClientTAUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **apsClientUpdateSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientTAUpdateValue,
+ IMG_UINT32 ui32ServerTASyncPrims,
+ IMG_UINT32 *paui32ServerTASyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerTASyncs,
+ IMG_UINT32 ui32Client3DFenceCount,
+ SYNC_PRIMITIVE_BLOCK **apsClient3DFenceSyncPrimBlock,
+ IMG_UINT32 *pauiClient3DFenceSyncOffset,
+ IMG_UINT32 *paui32Client3DFenceValue,
+ IMG_UINT32 ui32Client3DUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock,
+ IMG_UINT32 *paui32Client3DUpdateSyncOffset,
+ IMG_UINT32 *paui32Client3DUpdateValue,
+ IMG_UINT32 ui32Server3DSyncPrims,
+ IMG_UINT32 *paui32Server3DSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServer3DSyncs,
+ SYNC_PRIMITIVE_BLOCK *psPRSyncPrimBlock,
+ IMG_UINT32 ui32PRSyncOffset,
+ IMG_UINT32 ui32PRFenceValue,
+ IMG_INT32 i32CheckFenceFD,
+ IMG_INT32 i32UpdateTimelineFD,
+ IMG_INT32 *pi32UpdateFenceFD,
+ IMG_CHAR szFenceName[32],
+ IMG_UINT32 ui32TACmdSize,
+ IMG_PBYTE pui8TADMCmd,
+ IMG_UINT32 ui323DPRCmdSize,
+ IMG_PBYTE pui83DPRDMCmd,
+ IMG_UINT32 ui323DCmdSize,
+ IMG_PBYTE pui83DDMCmd,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_BOOL bLastTAInScene,
+ IMG_BOOL bKickTA,
+ IMG_BOOL bKickPR,
+ IMG_BOOL bKick3D,
+ IMG_BOOL bAbort,
+ IMG_UINT32 ui32PDumpFlags,
+ RGX_RTDATA_CLEANUP_DATA *psRTDataCleanup,
+ RGX_ZSBUFFER_DATA *psZBuffer,
+ RGX_ZSBUFFER_DATA *psSBuffer,
+ IMG_BOOL bCommitRefCountsTA,
+ IMG_BOOL bCommitRefCounts3D,
+ IMG_BOOL *pbCommittedRefCountsTA,
+ IMG_BOOL *pbCommittedRefCounts3D,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 *paui32SyncPMRFlags,
+ PMR **ppsSyncPMRs,
+ IMG_UINT32 ui32RenderTargetSize,
+ IMG_UINT32 ui32NumberOfDrawCalls,
+ IMG_UINT32 ui32NumberOfIndices,
+ IMG_UINT32 ui32NumberOfMRTs,
+ IMG_UINT64 ui64DeadlineInus);
+
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 *peLastResetReason,
+ IMG_UINT32 *pui32LastResetJobRef);
+
+PVRSRV_ERROR PVRSRVRGXGetPartialRenderCountKM(DEVMEM_MEMDESC *psHWRTDataMemDesc,
+ IMG_UINT32 *pui32NumPartialRenders);
+
+/* Debug - check if render context is waiting on a fence */
+void CheckForStalledRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client contexts are stalled */
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXTA3D_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxtdmtransfer.c
+@Title Device specific TDM transfer queue routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtdmtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxtimerquery.h"
+#include "rgxhwperf.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+
+typedef struct {
+ RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+} RGX_SERVER_TQ_TDM_DATA;
+
+
+struct _RGX_SERVER_TQ_TDM_CONTEXT_ {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ IMG_UINT32 ui32Flags;
+ RGX_SERVER_TQ_TDM_DATA sTDMData;
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListFence;
+ SYNC_ADDR_LIST sSyncAddrListUpdate;
+ ATOMIC_T hJobId;
+};
+
+static PVRSRV_ERROR _CreateTDMTransferContext(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ DEVMEM_MEMDESC * psFWMemContextMemDesc,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO * psInfo,
+ RGX_SERVER_TQ_TDM_DATA * psTDMData)
+{
+ PVRSRV_ERROR eError;
+
+ eError = FWCommonContextAllocate(
+ psConnection,
+ psDeviceNode,
+ REQ_TYPE_TQ_TDM,
+ RGXFWIF_DM_TDM,
+ NULL,
+ 0,
+ psFWMemContextMemDesc,
+ NULL,
+ RGX_TQ2D_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &psTDMData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+ psTDMData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+fail_contextalloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+static PVRSRV_ERROR _DestroyTDMTransferContext(
+ RGX_SERVER_TQ_TDM_DATA * psTDMData,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM * psCleanupSync)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(
+ psDeviceNode,
+ psTDMData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_TDM,
+ PDUMP_FLAGS_NONE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free it's resources */
+ FWCommonContextFree(psTDMData->psServerCommonContext);
+ return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sMCUFenceAddr,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext)
+{
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext;
+
+ DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_COMMON_CONTEXT_INFO sInfo;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ /* Allocate the server side structure */
+ *ppsTransferContext = NULL;
+ psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+ if (psTransferContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psTransferContext->psDeviceNode = psDeviceNode;
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psTransferContext->psCleanupSync,
+ "transfer context cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+ &psTransferContext->psFWFrameworkMemDesc,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate firmware GPU framework state (%u)",
+ eError));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc,
+ pabyFrameworkCommand,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to populate the framework buffer (%u)",
+ eError));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+ sInfo.psMCUFenceAddr = &sMCUFenceAddr;
+
+ eError = _CreateTDMTransferContext(psConnection,
+ psDeviceNode,
+ psFWMemContextMemDesc,
+ ui32Priority,
+ &sInfo,
+ &psTransferContext->sTDMData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_tdmtransfercontext;
+ }
+
+ SyncAddrListInit(&psTransferContext->sSyncAddrListFence);
+ SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate);
+
+ {
+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+ *ppsTransferContext = psTransferContext;
+ }
+
+ *ppsTransferContext = psTransferContext;
+
+ return PVRSRV_OK;
+
+fail_tdmtransfercontext:
+fail_frameworkcopy:
+ DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+ SyncPrimFree(psTransferContext->psCleanupSync);
+fail_syncalloc:
+ OSFreeMem(psTransferContext);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ *ppsTransferContext = NULL;
+ return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+
+ /* remove node from list before calling destroy - as destroy, if successful
+ * will invalidate the node
+ * must be re-added if destroy fails
+ */
+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+ dllist_remove_node(&(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+
+
+ eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData,
+ psTransferContext->psDeviceNode,
+ psTransferContext->psCleanupSync);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_destroyTDM;
+ }
+
+ DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+ SyncPrimFree(psTransferContext->psCleanupSync);
+
+ SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence);
+ SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate);
+
+ OSFreeMem(psTransferContext);
+
+ return PVRSRV_OK;
+
+ fail_destroyTDM:
+
+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientFenceSyncOffset,
+ IMG_UINT32 * paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientUpdateSyncOffset,
+ IMG_UINT32 * paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 * paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ** papsServerSyncs,
+ IMG_INT32 i32CheckFenceFD,
+ IMG_INT32 i32UpdateTimelineFD,
+ IMG_INT32 * pi32UpdateFenceFD,
+ IMG_CHAR szFenceName[32],
+ IMG_UINT32 ui32FWCommandSize,
+ IMG_UINT8 * pui8FWCommand,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 * paui32SyncPMRFlags,
+ PMR ** ppsSyncPMRs)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+ PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress = NULL;
+ PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress = NULL;
+ IMG_UINT32 * paui32IntFenceValue = paui32ClientFenceValue;
+ IMG_UINT32 ui32IntClientFenceCount = ui32ClientFenceCount;
+ IMG_UINT32 * paui32IntUpdateValue = paui32ClientUpdateValue;
+ IMG_UINT32 ui32IntClientUpdateCount = ui32ClientUpdateCount;
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ IMG_INT32 i32UpdateFenceFD = -1;
+ IMG_UINT32 ui32JobId;
+
+ IMG_UINT32 ui32CmdOffset = 0;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_append_data *psAppendData = NULL;
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ struct pvr_sync_append_data *psFDFenceData = NULL;
+
+ if (i32UpdateTimelineFD >= 0 && !pi32UpdateFenceFD)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#else
+ if (i32UpdateTimelineFD >= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing native sync timeline (%d) in non native sync enabled driver",
+ __func__, i32UpdateTimelineFD));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (i32CheckFenceFD >= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing native check sync (%d) in non native sync enabled driver",
+ __func__, i32CheckFenceFD));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#endif
+
+ ui32JobId = OSAtomicIncrement(&psTransferContext->hJobId);
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szFenceName[31] = '\0';
+
+ if (ui32SyncPMRCount != 0)
+ {
+ if (!ppsSyncPMRs)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ /* We can't allocate the required amount of stack space on all consumer architectures */
+ psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA));
+ if (psCmdHelper == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_allochelper;
+ }
+
+
+ /*
+ Init the command helper commands for all the prepares
+ */
+ {
+ RGX_CLIENT_CCB *psClientCCB;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx;
+ IMG_CHAR *pszCommandName;
+ RGXFWIF_CCB_CMD_TYPE eType;
+
+ psServerCommonCtx = psTransferContext->sTDMData.psServerCommonContext;
+ psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+ pszCommandName = "TQ-TDM";
+ eType = (ui32FWCommandSize == 0) ? RGXFWIF_CCB_CMD_TYPE_NULL : RGXFWIF_CCB_CMD_TYPE_TQ_TDM;
+
+ eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOSyncPrimBlock,
+ paui32ClientFenceSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_populate_sync_addr_list;
+ }
+ pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+
+ eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOSyncPrimBlock,
+ paui32ClientUpdateSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_populate_sync_addr_list;
+ }
+ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (ui32SyncPMRCount)
+ {
+ int err;
+
+ err = pvr_buffer_sync_append_start(psDeviceNode->psBufferSyncContext,
+ ui32SyncPMRCount,
+ ppsSyncPMRs,
+ paui32SyncPMRFlags,
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ paui32IntFenceValue,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ &psAppendData);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to append buffer syncs (errno=%d)", __FUNCTION__, err));
+ eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_sync_append;
+ }
+
+ pvr_buffer_sync_append_checks_get(psAppendData,
+ &ui32IntClientFenceCount,
+ &pauiIntFenceUFOAddress,
+ &paui32IntFenceValue);
+
+ pvr_buffer_sync_append_updates_get(psAppendData,
+ &ui32IntClientUpdateCount,
+ &pauiIntUpdateUFOAddress,
+ &paui32IntUpdateValue);
+ }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ if (i32CheckFenceFD >= 0 || i32UpdateTimelineFD >= 0)
+ {
+ eError =
+ pvr_sync_append_fences(szFenceName,
+ i32CheckFenceFD,
+ i32UpdateTimelineFD,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ paui32IntFenceValue,
+ &psFDFenceData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_syncinit;
+ }
+ pvr_sync_get_updates(psFDFenceData, &ui32IntClientUpdateCount,
+ &pauiIntUpdateUFOAddress, &paui32IntUpdateValue);
+ pvr_sync_get_checks(psFDFenceData, &ui32IntClientFenceCount,
+ &pauiIntFenceUFOAddress, &paui32IntFenceValue);
+ }
+#endif
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+ /*
+ Create the command helper data for this command
+ */
+ eError = RGXCmdHelperInitCmdCCB(psClientCCB,
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ paui32IntFenceValue,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ ui32ServerSyncCount,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ papsServerSyncs,
+ ui32FWCommandSize,
+ pui8FWCommand,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ eType,
+ ui32ExtJobRef,
+ ui32JobId,
+ ui32PDumpFlags,
+ NULL,
+ pszCommandName,
+ psCmdHelper);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_initcmd;
+ }
+ }
+
+ /*
+ Acquire space for all the commands in one go
+ */
+
+ eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_3dcmdacquire;
+ }
+
+
+ /*
+ We should acquire the kernel CCB(s) space here as the schedule could fail
+ and we would have to roll back all the syncs
+ */
+
+ /*
+ Only do the command helper release (which takes the server sync
+ operations if the acquire succeeded
+ */
+ ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(1,
+ psCmdHelper,
+ "TQ_TDM",
+ FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr);
+
+
+ /*
+ Even if we failed to acquire the client CCB space we might still need
+ to kick the HW to process a padding packet to release space for us next
+ time round
+ */
+ {
+ RGXFWIF_KCCB_CMD sTDMKCCBCmd;
+
+ /* Construct the kernel 3D CCB command. */
+ sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */
+ /* s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */
+ /* ui323DCmdOffset); */
+ RGX_HWPERF_HOST_ENQ(psTransferContext, OSGetCurrentClientProcessIDKM(),
+ FWCommonContextGetFWAddress(psTransferContext->
+ sTDMData.psServerCommonContext).ui32Addr,
+ ui32ExtJobRef, ui32JobId, RGX_HWPERF_KICK_TYPE_TQTDM);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_TDM,
+ & sTDMKCCBCmd,
+ sizeof(sTDMKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psDeviceNode->pvDevice,
+ FWCommonContextGetFWAddress(psTransferContext->
+ sTDMData.psServerCommonContext).ui32Addr,
+ ui32JobId, RGX_HWPERF_KICK_TYPE_TQTDM);
+#endif
+ }
+
+ /*
+ * Now check eError (which may have returned an error from our earlier calls
+ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+ * so we check it now...
+ */
+ if (eError != PVRSRV_OK )
+ {
+ goto fail_2dcmdacquire;
+ }
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ if (i32UpdateTimelineFD >= 0)
+ {
+ /* If we get here, this should never fail. Hitting that likely implies
+ * a code error above */
+ i32UpdateFenceFD = pvr_sync_get_update_fd(psFDFenceData);
+ if (i32UpdateFenceFD < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+ __FUNCTION__));
+ /* If we fail here, we cannot rollback the syncs as the hw already
+ * has references to resources they may be protecting in the kick
+ * so fallthrough */
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_free_append_data;
+ }
+ }
+#if defined(NO_HARDWARE)
+ pvr_sync_nohw_complete_fences(psFDFenceData);
+#endif
+ /*
+ Free the merged sync memory if required
+ */
+ pvr_sync_free_append_fences_data(psFDFenceData);
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psAppendData)
+ {
+ pvr_buffer_sync_append_finish(psAppendData);
+ }
+#endif
+
+ * pi32UpdateFenceFD = i32UpdateFenceFD;
+
+ OSFreeMem(psCmdHelper);
+
+ return PVRSRV_OK;
+
+/*
+ No resources are created in this function so there is nothing to free
+ unless we had to merge syncs.
+ If we fail after the client CCB acquire there is still nothing to do
+ as only the client CCB release will modify the client CCB
+*/
+fail_2dcmdacquire:
+fail_3dcmdacquire:
+
+fail_initcmd:
+
+/* fail_pdumpcheck: */
+/* fail_cmdtype: */
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+fail_syncinit:
+ /* Relocated cleanup here as the loop could fail after the first iteration
+ * at the above goto tags at which point the psFDCheckData memory would
+ * have been allocated.
+ */
+ pvr_sync_rollback_append_fences(psFDFenceData);
+fail_free_append_data:
+ pvr_sync_free_append_fences_data(psFDFenceData);
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_append_abort(psAppendData);
+fail_sync_append:
+#endif
+fail_populate_sync_addr_list:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ OSFreeMem(psCmdHelper);
+fail_allochelper:
+ return eError;
+
+
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ RGXFWIF_KCCB_CMD sKCCBCmd;
+ PVRSRV_ERROR eError;
+
+ /* Schedule the firmware command */
+ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+ sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_TDM,
+ &sKCCBCmd,
+ sizeof(sKCCBCmd),
+ 0,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXTDMNotifyWriteOffsetUpdateKM: Failed to schedule the FW command %d (%s)",
+ eError, PVRSRVGETERRORSTRING(eError)));
+ }
+
+ return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+ if (psTransferContext->sTDMData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext,
+ psConnection,
+ psTransferContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_TDM);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+void CheckForStalledTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DLLIST_NODE *psNode, *psNext;
+
+ OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+ DumpStalledFWCommonContext(psCurrentServerTransferCtx->sTDMData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+}
+
+
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32ContextBitMask = 0;
+
+ OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+ if (CheckStalledClientCommonContext(
+ psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D)
+ == PVRSRV_ERROR_CCCB_STALLED) {
+ ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D;
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+ return ui32ContextBitMask;
+}
+
+
+
+/**************************************************************************//**
+ End of file (rgxtdmtransfer.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxtdmtransfer.h
+@Title RGX Transfer queue 2 Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX Transfer queue Functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTDMTRANSFER_H__)
+#define __RGXTDMTRANSFER_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "connection_server.h"
+
+typedef struct _RGX_SERVER_TQ_TDM_CONTEXT_ RGX_SERVER_TQ_TDM_CONTEXT;
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sMCUFenceAddr,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_TQ_TDM_CONTEXT **ppsTransferContext);
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext);
+
+
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientFenceSyncOffset,
+ IMG_UINT32 * paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientUpdateSyncOffset,
+ IMG_UINT32 * paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 * paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ** papsServerSyncs,
+ IMG_INT32 i32CheckFenceFD,
+ IMG_INT32 i32UpdateTimelineFD,
+ IMG_INT32 * pi32UpdateFenceFD,
+ IMG_CHAR szFenceName[32],
+ IMG_UINT32 ui32FWCommandSize,
+ IMG_UINT8 * pui8FWCommand,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 * pui32SyncPMRFlags,
+ PMR ** ppsSyncPMRs);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32Priority);
+
+/* Debug - check if transfer context is waiting on a fence */
+void CheckForStalledTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client transfer contexts are stalled */
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+
+#endif /* __RGXTDMTRANSFER_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device specific time correlation and calibration routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific time correlation and calibration routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxtimecorr.h"
+#include "rgxfwutils.h"
+#include "htbserver.h"
+#include "pvrsrv_apphint.h"
+
+/******************************************************************************
+ *
+ * - A calibration period is started on power-on and after a DVFS transition,
+ * and it's closed before a power-off and before a DVFS transition
+ * (so power-on -> dfvs -> dvfs -> power-off , power on -> dvfs -> dvfs...,
+ * where each arrow is a calibration period)
+ *
+ * - The timers on the Host and on the FW are correlated at the beginning of
+ * each period together with the (possibly calibrated) current GPU frequency
+ *
+ * - If the frequency has not changed since the last power-off/on sequence or
+ * before/after a DVFS transition (-> the transition didn't really happen)
+ * then multiple consecutive periods are merged (the higher the numbers the
+ * better the accuracy in the computed clock speed)
+ *
+ * - Correlation and calibration are also done more or less periodically
+ * (using a best effort approach)
+ *
+ *****************************************************************************/
+
+static IMG_UINT32 g_ui32ClockSource = PVRSRV_APPHINT_TIMECORRCLOCK;
+
+/*
+ AppHint interfaces
+*/
+
+static PVRSRV_ERROR _SetClock(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ static const IMG_CHAR *apszClocks[] = {
+ "mono", "mono_raw", "sched"
+ };
+
+ if (ui32Value >= RGXTIMECORR_CLOCK_LAST)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Invalid clock source type (%u)", ui32Value));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ g_ui32ClockSource = ui32Value;
+
+ PVR_DPF((PVR_DBG_WARNING, "Time correlation clock set to \"%s\"",
+ apszClocks[g_ui32ClockSource]));
+
+ if (psDeviceNode)
+ {
+ /* update correlation data, and unfortunately we have to remove
+ * 'const' to do so */
+ RGXGPUFreqCalibrateCorrelatePeriodic((PVRSRV_DEVICE_NODE *) psDeviceNode);
+ }
+
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+ PVR_UNREFERENCED_PARAMETER(apszClocks);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _GetClock(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ *pui32Value = g_ui32ClockSource;
+
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ return PVRSRV_OK;
+}
+
+void RGXGPUFreqCalibrationInitAppHintCallbacks(
+ const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_TimeCorrClock, _GetClock,
+ _SetClock, psDeviceNode, NULL);
+}
+
+/*
+ End of AppHint interface
+*/
+
+IMG_UINT64 RGXGPUFreqCalibrateClockns64(void)
+{
+ IMG_UINT64 ui64Clock;
+
+ switch (g_ui32ClockSource) {
+ case RGXTIMECORR_CLOCK_MONO:
+ return ((void) OSClockMonotonicns64(&ui64Clock), ui64Clock);
+ case RGXTIMECORR_CLOCK_MONO_RAW:
+ return OSClockMonotonicRawns64();
+ case RGXTIMECORR_CLOCK_SCHED:
+ return OSClockns64();
+ default:
+ PVR_ASSERT(IMG_FALSE);
+ return 0;
+ }
+}
+
+IMG_UINT64 RGXGPUFreqCalibrateClockus64(void)
+{
+ IMG_UINT32 rem;
+ return OSDivide64r64(RGXGPUFreqCalibrateClockns64(), 1000, &rem);
+}
+
+static void _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_BOOL bLogToHTB)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+ RGXFWIF_TIME_CORR *psTimeCorr;
+ IMG_UINT32 ui32NewSeqCount;
+ IMG_UINT32 ui32CoreClockSpeed;
+ IMG_UINT32 ui32Remainder;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ IMG_UINT64 ui64OSMonoTime = 0;
+#endif
+
+ ui32CoreClockSpeed = psGpuDVFSTable->aui32DVFSClock[psGpuDVFSTable->ui32CurrentDVFSId];
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ {
+ PVRSRV_ERROR eError;
+ eError = OSClockMonotonicns64(&ui64OSMonoTime);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_RGXMakeTimeCorrData: System Monotonic Clock not available."));
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ }
+#endif
+
+ ui32NewSeqCount = psGpuUtilFWCB->ui32TimeCorrSeqCount + 1;
+ psTimeCorr = &psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)];
+
+ psTimeCorr->ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo);
+ psTimeCorr->ui64OSTimeStamp = RGXGPUFreqCalibrateClockns64();
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ psTimeCorr->ui64OSMonoTimeStamp = ui64OSMonoTime;
+#endif
+ psTimeCorr->ui32CoreClockSpeed = ui32CoreClockSpeed;
+ psTimeCorr->ui32CRDeltaToOSDeltaKNs =
+ RGXFWIF_GET_CRDELTA_TO_OSDELTA_K_NS(ui32CoreClockSpeed, ui32Remainder);
+
+ /* Make sure the values are written to memory before updating the index of the current entry */
+ OSWriteMemoryBarrier();
+
+ /* Update the index of the current entry in the timer correlation array */
+ psGpuUtilFWCB->ui32TimeCorrSeqCount = ui32NewSeqCount;
+
+ PVR_DPF((PVR_DBG_MESSAGE,"RGXMakeTimeCorrData: Correlated OS timestamp %llu (ns) with CR timestamp %llu, GPU clock speed %uHz",
+ psTimeCorr->ui64OSTimeStamp, psTimeCorr->ui64CRTimeStamp, psTimeCorr->ui32CoreClockSpeed));
+
+ HTBSyncScale(
+ bLogToHTB,
+ psTimeCorr->ui64OSTimeStamp,
+ psTimeCorr->ui64CRTimeStamp,
+ psTimeCorr->ui32CoreClockSpeed);
+}
+
+
+static void _RGXGPUFreqCalibrationPeriodStart(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+ IMG_UINT32 ui32CoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+ IMG_UINT32 ui32Index = RGX_GPU_DVFS_GET_INDEX(ui32CoreClockSpeed);
+
+ IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+ IMG_UINT64 ui64OSTimestamp = RGXGPUFreqCalibrateClockus64();
+
+ psGpuDVFSTable->ui64CalibrationCRTimestamp = ui64CRTimestamp;
+ psGpuDVFSTable->ui64CalibrationOSTimestamp = ui64OSTimestamp;
+
+ /* Set the time needed to (re)calibrate the GPU frequency */
+ if ((psGpuDVFSTable->aui32DVFSClock[ui32Index] == 0) || /* We never met this frequency */
+ (psGpuDVFSTable->aui32DVFSClock[ui32Index] == ui32CoreClockSpeed)) /* We weren't able to calibrate this frequency previously */
+ {
+ psGpuDVFSTable->aui32DVFSClock[ui32Index] = ui32CoreClockSpeed;
+ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXGPUFreqCalibrationStart: using uncalibrated GPU frequency %u", ui32CoreClockSpeed));
+ }
+ else if (psGpuDVFSTable->ui32CalibrationPeriod == RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US)
+ {
+ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US;
+ }
+ else
+ {
+ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US;
+ }
+
+ /* Update the index to the DVFS table */
+ psGpuDVFSTable->ui32CurrentDVFSId = ui32Index;
+}
+
+
+static void _RGXGPUFreqCalibrationPeriodStop(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+ IMG_UINT64 ui64OSTimestamp = RGXGPUFreqCalibrateClockus64();
+
+ if (!psGpuDVFSTable->bAccumulatePeriod)
+ {
+ psGpuDVFSTable->ui64CalibrationCRTimediff = 0;
+ psGpuDVFSTable->ui64CalibrationOSTimediff = 0;
+ }
+
+ psGpuDVFSTable->ui64CalibrationCRTimediff +=
+ ui64CRTimestamp - psGpuDVFSTable->ui64CalibrationCRTimestamp;
+ psGpuDVFSTable->ui64CalibrationOSTimediff +=
+ ui64OSTimestamp - psGpuDVFSTable->ui64CalibrationOSTimestamp;
+}
+
+
+static IMG_UINT32 _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 ui32CalibratedClockSpeed;
+ IMG_UINT32 ui32Remainder;
+
+ ui32CalibratedClockSpeed =
+ RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(psGpuDVFSTable->ui64CalibrationCRTimediff,
+ psGpuDVFSTable->ui64CalibrationOSTimediff,
+ ui32Remainder);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "GPU frequency calibration: %u -> %u done over %llu us",
+ psGpuDVFSTable->aui32DVFSClock[psGpuDVFSTable->ui32CurrentDVFSId],
+ ui32CalibratedClockSpeed,
+ psGpuDVFSTable->ui64CalibrationOSTimediff));
+
+ psGpuDVFSTable->aui32DVFSClock[psGpuDVFSTable->ui32CurrentDVFSId] = ui32CalibratedClockSpeed;
+
+ /* Reset time deltas to avoid recalibrating the same frequency over and over again */
+ psGpuDVFSTable->ui64CalibrationCRTimediff = 0;
+ psGpuDVFSTable->ui64CalibrationOSTimediff = 0;
+
+ return ui32CalibratedClockSpeed;
+#else
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+ return psGpuDVFSTable->aui32DVFSClock[psGpuDVFSTable->ui32CurrentDVFSId];
+#endif
+}
+
+
+/*
+ RGXGPUFreqCalibratePrePowerOff
+*/
+void RGXGPUFreqCalibratePrePowerOff(IMG_HANDLE hDevHandle)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+
+ _RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+
+ if (psGpuDVFSTable->ui64CalibrationOSTimediff >= psGpuDVFSTable->ui32CalibrationPeriod)
+ {
+ _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable);
+ }
+}
+
+
+/*
+ RGXGPUFreqCalibratePostPowerOn
+*/
+void RGXGPUFreqCalibratePostPowerOn(IMG_HANDLE hDevHandle)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+ RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+ IMG_UINT32 ui32CoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+ /* If the frequency hasn't changed then accumulate the time diffs to get a better result */
+ psGpuDVFSTable->bAccumulatePeriod =
+ (RGX_GPU_DVFS_GET_INDEX(ui32CoreClockSpeed) == psGpuDVFSTable->ui32CurrentDVFSId);
+
+ _RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+
+ /* Update the timer correlation data */
+ /* Don't log timing data to the HTB log post power transition.
+ * Otherwise this will be logged before the HTB partition marker, breaking
+ * the log sync grammar. This data will be automatically repeated when the
+ * partition marker is written
+ */
+ _RGXMakeTimeCorrData(psDeviceNode, IMG_FALSE);
+}
+
+
+/*
+ RGXGPUFreqCalibratePreClockSpeedChange
+*/
+void RGXGPUFreqCalibratePreClockSpeedChange(IMG_HANDLE hDevHandle)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+
+ _RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+
+ /* Wait until RGXPostClockSpeedChange() to do anything as the GPU frequency may be left
+ * unchanged (in that case we delay calibration/correlation to get a better result later) */
+}
+
+
+/*
+ RGXGPUFreqCalibratePostClockSpeedChange
+*/
+IMG_UINT32 RGXGPUFreqCalibratePostClockSpeedChange(IMG_HANDLE hDevHandle, IMG_UINT32 ui32NewClockSpeed)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+ IMG_UINT32 ui32ReturnedClockSpeed = ui32NewClockSpeed;
+
+ if (RGX_GPU_DVFS_GET_INDEX(ui32NewClockSpeed) != psGpuDVFSTable->ui32CurrentDVFSId)
+ {
+ /* Only calibrate if the last period was long enough */
+ if (psGpuDVFSTable->ui64CalibrationOSTimediff >= RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US)
+ {
+ ui32ReturnedClockSpeed = _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable);
+ }
+
+ _RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+
+ /* Update the timer correlation data */
+ _RGXMakeTimeCorrData(psDeviceNode, IMG_TRUE);
+ psGpuDVFSTable->bAccumulatePeriod = IMG_FALSE;
+ }
+ else
+ {
+ psGpuDVFSTable->bAccumulatePeriod = IMG_TRUE;
+ }
+
+ return ui32ReturnedClockSpeed;
+}
+
+
+/*
+ RGXGPUFreqCalibrateCorrelatePeriodic
+*/
+void RGXGPUFreqCalibrateCorrelatePeriodic(IMG_HANDLE hDevHandle)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+ IMG_UINT64 ui64TimeNow = RGXGPUFreqCalibrateClockus64();
+ PVRSRV_DEV_POWER_STATE ePowerState;
+
+ /* Check if it's the right time to recalibrate the GPU clock frequency */
+ if ((ui64TimeNow - psGpuDVFSTable->ui64CalibrationOSTimestamp) < psGpuDVFSTable->ui32CalibrationPeriod) return;
+
+ /* Try to acquire the powerlock, if not possible then don't wait */
+ if (OSLockIsLocked(psDeviceNode->hPowerLock)) return; /* Better to not wait here if possible */
+ /* There's a chance that the powerlock could be taken here, it's not that bad even if not desirable
+ (TODO use OSTryLockAcquire, currently implemented under Linux only) */
+ if (PVRSRVPowerLock(psDeviceNode) != PVRSRV_OK) return;
+
+ /* If the GPU is off then we can't do anything */
+ PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+ if (ePowerState != PVRSRV_DEV_POWER_STATE_ON)
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ return;
+ }
+
+ /* All checks passed, we can calibrate and correlate */
+ _RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+ _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable);
+ _RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+ _RGXMakeTimeCorrData(psDeviceNode, IMG_TRUE);
+
+ PVRSRVPowerUnlock(psDeviceNode);
+}
+
+/*
+ RGXGPUFreqCalibrateClockSource
+*/
+RGXTIMECORR_CLOCK_TYPE RGXGPUFreqCalibrateGetClockSource(void)
+{
+ return g_ui32ClockSource;
+}
+
+/*
+ RGXGPUFreqCalibrateClockSource
+*/
+PVRSRV_ERROR RGXGPUFreqCalibrateSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGXTIMECORR_CLOCK_TYPE eClockType)
+{
+ return _SetClock(psDeviceNode, NULL, eClockType);
+}
+
+
+/******************************************************************************
+ End of file (rgxtimecorr.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX time correlation and calibration header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX time correlation and calibration routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTIMECORR_H__)
+#define __RGXTIMECORR_H__
+
+#include "img_types.h"
+#include "device.h"
+
+typedef enum {
+ RGXTIMECORR_CLOCK_MONO,
+ RGXTIMECORR_CLOCK_MONO_RAW,
+ RGXTIMECORR_CLOCK_SCHED,
+
+ RGXTIMECORR_CLOCK_LAST
+} RGXTIMECORR_CLOCK_TYPE;
+
+/*!
+******************************************************************************
+
+ @Function RGXGPUFreqCalibratePrePowerOff
+
+ @Description Manage GPU frequency and timer correlation data
+ before a power off.
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return void
+
+******************************************************************************/
+void RGXGPUFreqCalibratePrePowerOff(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function RGXGPUFreqCalibratePostPowerOn
+
+ @Description Manage GPU frequency and timer correlation data
+ after a power on.
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return void
+
+******************************************************************************/
+void RGXGPUFreqCalibratePostPowerOn(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function RGXGPUFreqCalibratePreClockSpeedChange
+
+ @Description Manage GPU frequency and timer correlation data
+ before a DVFS transition.
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return void
+
+******************************************************************************/
+void RGXGPUFreqCalibratePreClockSpeedChange(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function RGXGPUFreqCalibratePostClockSpeedChange
+
+ @Description Manage GPU frequency and timer correlation data
+ after a DVFS transition.
+
+ @Input hDevHandle : RGX Device Node
+ @Input ui32NewClockSpeed : GPU clock speed after the DVFS transition
+
+ @Return IMG_UINT32 : Calibrated GPU clock speed after the DVFS transition
+
+******************************************************************************/
+IMG_UINT32 RGXGPUFreqCalibratePostClockSpeedChange(IMG_HANDLE hDevHandle, IMG_UINT32 ui32NewClockSpeed);
+
+/*!
+******************************************************************************
+
+ @Function RGXGPUFreqCalibratePeriodic
+
+ @Description Calibrate the GPU clock speed and correlate the timers
+ at regular intervals.
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return void
+
+******************************************************************************/
+void RGXGPUFreqCalibrateCorrelatePeriodic(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function RGXGPUFreqCalibrateClockns64
+
+ @Description Returns value of currently selected clock (in ns).
+
+ @Return clock value from currently selected clock source
+
+******************************************************************************/
+IMG_UINT64 RGXGPUFreqCalibrateClockns64(void);
+
+/*!
+******************************************************************************
+
+ @Function RGXGPUFreqCalibrateClockns64
+
+ @Description Returns value of currently selected clock (in us).
+
+ @Return clock value from currently selected clock source
+
+******************************************************************************/
+IMG_UINT64 RGXGPUFreqCalibrateClockus64(void);
+
+/*!
+******************************************************************************
+
+ @Function RGXGPUFreqCalibrateClockSource
+
+ @Description Returns currently selected clock source
+
+ @Return clock source type
+
+******************************************************************************/
+RGXTIMECORR_CLOCK_TYPE RGXGPUFreqCalibrateGetClockSource(void);
+
+/*!
+******************************************************************************
+
+ @Function RGXGPUFreqCalibrateSetClockSource
+
+ @Description Sets clock source for correlation data.
+
+ @Input psDeviceNode : RGX Device Node
+ @Input eClockType : clock source type
+
+ @Return error code
+
+******************************************************************************/
+PVRSRV_ERROR RGXGPUFreqCalibrateSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGXTIMECORR_CLOCK_TYPE eClockType);
+
+void RGXGPUFreqCalibrationInitAppHintCallbacks(
+ const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* __RGXTIMECORR_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX Timer queries
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Timer queries
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxtimerquery.h"
+#include "rgxdevice.h"
+#include "rgxtimecorr.h"
+
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+
+PVRSRV_ERROR
+PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32QueryId)
+{
+ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (ui32QueryId >= RGX_MAX_TIMER_QUERIES)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo->bSaveStart = IMG_TRUE;
+ psDevInfo->bSaveEnd = IMG_TRUE;
+
+ /* clear the stamps, in case there is no Kick */
+ psDevInfo->pui64StartTimeById[ui32QueryId] = 0UL;
+ psDevInfo->pui64EndTimeById[ui32QueryId] = 0UL;
+
+ /* save of the active query index */
+ psDevInfo->ui32ActiveQueryId = ui32QueryId;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* clear off the flags set by Begin(). Note that _START_TIME is
+ * probably already cleared by Kick()
+ */
+ psDevInfo->bSaveStart = IMG_FALSE;
+ psDevInfo->bSaveEnd = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32QueryId,
+ IMG_UINT64 * pui64StartTime,
+ IMG_UINT64 * pui64EndTime)
+{
+ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+ IMG_UINT32 ui32Scheduled;
+ IMG_UINT32 ui32Completed;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (ui32QueryId >= RGX_MAX_TIMER_QUERIES)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ ui32Scheduled = psDevInfo->aui32ScheduledOnId[ui32QueryId];
+ ui32Completed = psDevInfo->pui32CompletedById[ui32QueryId];
+
+ /* if there was no kick since the Begin() on this id we return 0-s as Begin cleared
+ * the stamps. If there was no begin the returned data is undefined - but still
+ * safe from services pov
+ */
+ if (ui32Completed >= ui32Scheduled)
+ {
+ * pui64StartTime = psDevInfo->pui64StartTimeById[ui32QueryId];
+ * pui64EndTime = psDevInfo->pui64EndTimeById[ui32QueryId];
+
+ return PVRSRV_OK;
+ }
+ else
+ {
+ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ }
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT64 * pui64Time)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+ *pui64Time = RGXGPUFreqCalibrateClockns64();
+
+ return PVRSRV_OK;
+}
+
+
+
+/******************************************************************************
+ NOT BRIDGED/EXPORTED FUNCS
+******************************************************************************/
+/* writes a time stamp command in the client CCB */
+void
+RGXWriteTimestampCommand(IMG_PBYTE * ppbyPtr,
+ RGXFWIF_CCB_CMD_TYPE eCmdType,
+ PRGXFWIF_TIMESTAMP_ADDR pAddr)
+{
+ RGXFWIF_CCB_CMD_HEADER * psHeader;
+
+ psHeader = (RGXFWIF_CCB_CMD_HEADER *) (*ppbyPtr);
+
+ PVR_ASSERT(eCmdType == RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP
+ || eCmdType == RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP);
+
+ psHeader->eCmdType = eCmdType;
+ psHeader->ui32CmdSize = (sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1);
+
+ (*ppbyPtr) += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ (*(PRGXFWIF_TIMESTAMP_ADDR*)*ppbyPtr) = pAddr;
+
+ (*ppbyPtr) += psHeader->ui32CmdSize;
+}
+
+
+void
+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo,
+ PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr,
+ PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr,
+ PRGXFWIF_UFO_ADDR * ppUpdate)
+{
+ if (ppPreAddr != NULL)
+ {
+ if (psDevInfo->bSaveStart)
+ {
+ /* drop the SaveStart on the first Kick */
+ psDevInfo->bSaveStart = IMG_FALSE;
+
+ RGXSetFirmwareAddress(ppPreAddr,
+ psDevInfo->psStartTimeMemDesc,
+ sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId,
+ RFW_FWADDR_NOREF_FLAG);
+ }
+ else
+ {
+ ppPreAddr->ui32Addr = 0;
+ }
+ }
+
+ if (ppPostAddr != NULL && ppUpdate != NULL)
+ {
+ if (psDevInfo->bSaveEnd)
+ {
+ RGXSetFirmwareAddress(ppPostAddr,
+ psDevInfo->psEndTimeMemDesc,
+ sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId,
+ RFW_FWADDR_NOREF_FLAG);
+
+ psDevInfo->aui32ScheduledOnId[psDevInfo->ui32ActiveQueryId]++;
+
+ RGXSetFirmwareAddress(ppUpdate,
+ psDevInfo->psCompletedMemDesc,
+ sizeof(IMG_UINT32) * psDevInfo->ui32ActiveQueryId,
+ RFW_FWADDR_NOREF_FLAG);
+ }
+ else
+ {
+ ppUpdate->ui32Addr = 0;
+ ppPostAddr->ui32Addr = 0;
+ }
+ }
+}
+
+
+/******************************************************************************
+ End of file (rgxtimerquery.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX Timer queries
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX Timer queries functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined (_RGX_TIMERQUERIES_H_)
+#define _RGX_TIMERQUERIES_H_
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "rgxdevice.h"
+
+#include "connection_server.h"
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXBeginTimerQuery
+@Description Opens a new timer query.
+
+@Input ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ]
+@Return PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32QueryId);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXEndTimerQuery
+@Description Closes a timer query
+
+ The lack of ui32QueryId argument expresses the fact that there can't
+ be overlapping queries open.
+@Return PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode);
+
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXQueryTimer
+@Description Queries the state of the specified timer
+
+@Input ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ]
+@Out pui64StartTime
+@Out pui64EndTime
+@Return PVRSRV_OK on success.
+ PVRSRV_ERROR_RESOURCE_UNAVAILABLE if the device is still busy with
+ operations from the queried period
+ other error code otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32QueryId,
+ IMG_UINT64 * pui64StartTime,
+ IMG_UINT64 * pui64EndTime);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXCurrentTime
+@Description Returns the current state of the timer used in timer queries
+@Input psDevData Device data.
+@Out pui64Time
+@Return PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT64 * pui64Time);
+
+
+/******************************************************************************
+ NON BRIDGED/EXPORTED interface
+******************************************************************************/
+
+/* write the timestamp cmd from the helper*/
+void
+RGXWriteTimestampCommand(IMG_PBYTE * ppui8CmdPtr,
+ RGXFWIF_CCB_CMD_TYPE eCmdType,
+ PRGXFWIF_TIMESTAMP_ADDR pAddr);
+
+/* get the relevant data from the Kick to the helper*/
+void
+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo,
+ PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr,
+ PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr,
+ PRGXFWIF_UFO_ADDR * ppUpdate);
+
+#endif /* _RGX_TIMERQUERIES_H_ */
+
+/******************************************************************************
+ End of file (rgxtimerquery.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device specific transfer queue routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxtimerquery.h"
+#include "rgxhwperf.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_bvnc_defs_km.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+typedef struct {
+ DEVMEM_MEMDESC *psFWContextStateMemDesc;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+} RGX_SERVER_TQ_3D_DATA;
+
+
+typedef struct {
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+} RGX_SERVER_TQ_2D_DATA;
+
+struct _RGX_SERVER_TQ_CONTEXT_ {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ IMG_UINT32 ui32Flags;
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_2D (1<<0)
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_3D (1<<1)
+ RGX_SERVER_TQ_3D_DATA s3DData;
+ RGX_SERVER_TQ_2D_DATA s2DData;
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+ DLLIST_NODE sListNode;
+ ATOMIC_T hJobId;
+ IMG_UINT32 ui32PDumpFlags;
+ /* per-prepare sync address lists */
+ SYNC_ADDR_LIST asSyncAddrListFence[TQ_MAX_PREPARES_PER_SUBMIT];
+ SYNC_ADDR_LIST asSyncAddrListUpdate[TQ_MAX_PREPARES_PER_SUBMIT];
+};
+
+/*
+ Static functions used by transfer context code
+*/
+static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_TQ_3D_DATA *ps3DData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+
+ /*
+ Allocate device memory for the firmware GPU context suspend state.
+ Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware TQ/3D context suspend state");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_3DCTX_STATE),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwTQ3DContext",
+ &ps3DData->psFWContextStateMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextswitchstate;
+ }
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_TQ_3D,
+ RGXFWIF_DM_3D,
+ NULL,
+ 0,
+ psFWMemContextMemDesc,
+ ps3DData->psFWContextStateMemDesc,
+ RGX_TQ3D_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &ps3DData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+
+ PDUMPCOMMENT("Dump 3D context suspend state buffer");
+ DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS);
+
+ ps3DData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+fail_contextalloc:
+ DevmemFwFree(psDevInfo, ps3DData->psFWContextStateMemDesc);
+fail_contextswitchstate:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+static PVRSRV_ERROR _Create2DTransferContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_TQ_2D_DATA *ps2DData)
+{
+ PVRSRV_ERROR eError;
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_TQ_2D,
+ RGXFWIF_DM_2D,
+ NULL,
+ 0,
+ psFWMemContextMemDesc,
+ NULL,
+ RGX_TQ2D_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &ps2DData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+ ps2DData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+fail_contextalloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ ps2DData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_2D,
+ ui32PDumpFlags);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free it's resources */
+ FWCommonContextFree(ps2DData->psServerCommonContext);
+ ps2DData->psServerCommonContext = NULL;
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ ps3DData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_3D,
+ ui32PDumpFlags);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free it's resources */
+ DevmemFwFree(psDeviceNode->pvDevice, ps3DData->psFWContextStateMemDesc);
+ FWCommonContextFree(ps3DData->psServerCommonContext);
+ ps3DData->psServerCommonContext = NULL;
+ return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sMCUFenceAddr,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_TQ_CONTEXT **ppsTransferContext)
+{
+ RGX_SERVER_TQ_CONTEXT *psTransferContext;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_COMMON_CONTEXT_INFO sInfo;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Allocate the server side structure */
+ *ppsTransferContext = NULL;
+ psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+ if (psTransferContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psTransferContext->psDeviceNode = psDeviceNode;
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psTransferContext->psCleanupSync,
+ "transfer context cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+ &psTransferContext->psFWFrameworkMemDesc,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate firmware GPU framework state (%u)",
+ eError));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc,
+ pabyFrameworkCommand,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to populate the framework buffer (%u)",
+ eError));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+ sInfo.psMCUFenceAddr = &sMCUFenceAddr;
+
+ eError = _Create3DTransferContext(psConnection,
+ psDeviceNode,
+ psFWMemContextMemDesc,
+ ui32Priority,
+ &sInfo,
+ &psTransferContext->s3DData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_3dtransfercontext;
+ }
+ psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK)
+ {
+ eError = _Create2DTransferContext(psConnection,
+ psDeviceNode,
+ psFWMemContextMemDesc,
+ ui32Priority,
+ &sInfo,
+ &psTransferContext->s2DData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_2dtransfercontext;
+ }
+ psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+ }
+
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+ *ppsTransferContext = psTransferContext;
+ }
+
+ *ppsTransferContext = psTransferContext;
+
+ return PVRSRV_OK;
+
+
+fail_2dtransfercontext:
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK)
+ {
+ _Destroy3DTransferContext(&psTransferContext->s3DData,
+ psTransferContext->psDeviceNode,
+ psTransferContext->psCleanupSync,
+ psTransferContext->ui32PDumpFlags);
+ }
+
+fail_3dtransfercontext:
+fail_frameworkcopy:
+ DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+ SyncPrimFree(psTransferContext->psCleanupSync);
+fail_syncalloc:
+ OSFreeMem(psTransferContext);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ *ppsTransferContext = NULL;
+ return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+ IMG_UINT32 i;
+
+ /* remove node from list before calling destroy - as destroy, if successful
+ * will invalidate the node
+ * must be re-added if destroy fails
+ */
+ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+ dllist_remove_node(&(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+
+ if ((psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && \
+ (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+ {
+ eError = _Destroy2DTransferContext(&psTransferContext->s2DData,
+ psTransferContext->psDeviceNode,
+ psTransferContext->psCleanupSync,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_destroy2d;
+ }
+ /* We've freed the 2D context, don't try to free it again */
+ psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+ }
+
+ if (psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D)
+ {
+ eError = _Destroy3DTransferContext(&psTransferContext->s3DData,
+ psTransferContext->psDeviceNode,
+ psTransferContext->psCleanupSync,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_destroy3d;
+ }
+ /* We've freed the 3D context, don't try to free it again */
+ psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+ }
+
+ /* free any resources within the per-prepare UFO address stores */
+ for(i = 0; i < TQ_MAX_PREPARES_PER_SUBMIT; i++)
+ {
+ SyncAddrListDeinit(&psTransferContext->asSyncAddrListFence[i]);
+ SyncAddrListDeinit(&psTransferContext->asSyncAddrListUpdate[i]);
+ }
+
+ DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+ SyncPrimFree(psTransferContext->psCleanupSync);
+
+ OSFreeMem(psTransferContext);
+
+ return PVRSRV_OK;
+
+fail_destroy3d:
+
+fail_destroy2d:
+ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32PrepareCount,
+ IMG_UINT32 *paui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ***papauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 **papaui32ClientFenceSyncOffset,
+ IMG_UINT32 **papaui32ClientFenceValue,
+ IMG_UINT32 *paui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 **papaui32ClientUpdateSyncOffset,
+ IMG_UINT32 **papaui32ClientUpdateValue,
+ IMG_UINT32 *paui32ServerSyncCount,
+ IMG_UINT32 **papaui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ***papapsServerSyncs,
+ IMG_INT32 i32CheckFenceFD,
+ IMG_INT32 i32UpdateTimelineFD,
+ IMG_INT32 *pi32UpdateFenceFD,
+ IMG_CHAR szFenceName[32],
+ IMG_UINT32 *paui32FWCommandSize,
+ IMG_UINT8 **papaui8FWCommand,
+ IMG_UINT32 *pui32TQPrepareFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 *paui32SyncPMRFlags,
+ PMR **ppsSyncPMRs)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper;
+ RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper;
+ IMG_UINT32 ui323DCmdCount = 0;
+ IMG_UINT32 ui322DCmdCount = 0;
+ IMG_UINT32 ui323DCmdOffset = 0;
+ IMG_UINT32 ui322DCmdOffset = 0;
+ IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_NONE;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32IntClientFenceCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+ IMG_UINT32 *paui32IntFenceValue = NULL;
+ IMG_UINT32 ui32IntClientUpdateCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+ IMG_UINT32 *paui32IntUpdateValue = NULL;
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ IMG_INT32 i32UpdateFenceFD = -1;
+ IMG_UINT32 ui32JobId;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_append_data *psAppendData = NULL;
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ struct pvr_sync_append_data *psFDFenceData = NULL;
+
+ if (i32UpdateTimelineFD >= 0 && !pi32UpdateFenceFD)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#else
+ if (i32UpdateTimelineFD >= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing native sync timeline (%d) in non native sync enabled driver",
+ __func__, i32UpdateTimelineFD));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (i32CheckFenceFD >= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing native check sync (%d) in non native sync enabled driver",
+ __func__, i32CheckFenceFD));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#endif
+
+ ui32JobId = OSAtomicIncrement(&psTransferContext->hJobId);
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szFenceName[31] = '\0';
+
+ if ((ui32PrepareCount == 0) || (ui32PrepareCount > TQ_MAX_PREPARES_PER_SUBMIT))
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32SyncPMRCount != 0)
+ {
+ if (!ppsSyncPMRs)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ /* PMR sync is valid only when there is no batching */
+ if ((ui32PrepareCount != 1))
+#endif
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ if (i32CheckFenceFD >= 0 || i32UpdateTimelineFD >= 0)
+ {
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ /* Fence FD's are only valid in the 3D case with no batching */
+ if ((ui32PrepareCount !=1) && (!TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[0], 3D)))
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#else
+ /* We only support Fence FD's if built with SUPPORT_NATIVE_FENCE_SYNC */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+#endif
+ }
+
+ /* We can't allocate the required amount of stack space on all consumer architectures */
+ pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount);
+ if (pas3DCmdHelper == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc3dhelper;
+ }
+ pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount);
+ if (pas2DCmdHelper == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc2dhelper;
+ }
+
+ /*
+ Ensure we do the right thing for server syncs which cross call boundaries
+ */
+ for (i=0;i<ui32PrepareCount;i++)
+ {
+ IMG_BOOL bHaveStartPrepare = pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_START;
+ IMG_BOOL bHaveEndPrepare = IMG_FALSE;
+
+ if (bHaveStartPrepare)
+ {
+ IMG_UINT32 k;
+ /*
+ We've at the start of a transfer operation (which might be made
+ up of multiple HW operations) so check if we also have then
+ end of the transfer operation in the batch
+ */
+ for (k=i;k<ui32PrepareCount;k++)
+ {
+ if (pui32TQPrepareFlags[k] & TQ_PREP_FLAGS_END)
+ {
+ bHaveEndPrepare = IMG_TRUE;
+ break;
+ }
+ }
+
+ if (!bHaveEndPrepare)
+ {
+ /*
+ We don't have the complete command passed in this call
+ so drop the update request. When we get called again with
+ the last HW command in this transfer operation we'll do
+ the update at that point.
+ */
+ for (k=0;k<paui32ServerSyncCount[i];k++)
+ {
+ papaui32ServerSyncFlags[i][k] &= ~PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE;
+ }
+ }
+ }
+ }
+
+
+ /*
+ Init the command helper commands for all the prepares
+ */
+ for (i=0;i<ui32PrepareCount;i++)
+ {
+ RGX_CLIENT_CCB *psClientCCB;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx;
+ IMG_CHAR *pszCommandName;
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+ RGXFWIF_CCB_CMD_TYPE eType;
+ SYNC_ADDR_LIST *psSyncAddrListFence;
+ SYNC_ADDR_LIST *psSyncAddrListUpdate;
+
+ if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D))
+ {
+ psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext;
+ psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+ pszCommandName = "TQ-3D";
+ psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++];
+ eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D;
+ }
+ else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) && \
+ (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+ {
+ psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext;
+ psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+ pszCommandName = "TQ-2D";
+ psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++];
+ eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D;
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_cmdtype;
+ }
+
+ if (i == 0)
+ {
+ ui32PDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE;
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+ "%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr);
+ psTransferContext->ui32PDumpFlags |= ui32PDumpFlags;
+ }
+ else
+ {
+ IMG_UINT32 ui32NewPDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE;
+ if (ui32NewPDumpFlags != ui32PDumpFlags)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __FUNCTION__));
+ goto fail_pdumpcheck;
+ }
+ }
+
+ psSyncAddrListFence = &psTransferContext->asSyncAddrListFence[i];
+ ui32IntClientFenceCount = paui32ClientFenceCount[i];
+ eError = SyncAddrListPopulate(psSyncAddrListFence,
+ ui32IntClientFenceCount,
+ papauiClientFenceUFOSyncPrimBlock[i],
+ papaui32ClientFenceSyncOffset[i]);
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_populate_sync_addr_list;
+ }
+ pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+
+ paui32IntFenceValue = papaui32ClientFenceValue[i];
+ psSyncAddrListUpdate = &psTransferContext->asSyncAddrListUpdate[i];
+ ui32IntClientUpdateCount = paui32ClientUpdateCount[i];
+ eError = SyncAddrListPopulate(psSyncAddrListUpdate,
+ ui32IntClientUpdateCount,
+ papauiClientUpdateUFOSyncPrimBlock[i],
+ papaui32ClientUpdateSyncOffset[i]);
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_populate_sync_addr_list;
+ }
+ pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+ paui32IntUpdateValue = papaui32ClientUpdateValue[i];
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (ui32SyncPMRCount)
+ {
+ int err;
+
+ err = pvr_buffer_sync_append_start(psDeviceNode->psBufferSyncContext,
+ ui32SyncPMRCount,
+ ppsSyncPMRs,
+ paui32SyncPMRFlags,
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ paui32IntFenceValue,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ &psAppendData);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to append buffer syncs (errno=%d)", __FUNCTION__, err));
+ eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_sync_append;
+ }
+
+ pvr_buffer_sync_append_checks_get(psAppendData,
+ &ui32IntClientFenceCount,
+ &pauiIntFenceUFOAddress,
+ &paui32IntFenceValue);
+
+ pvr_buffer_sync_append_updates_get(psAppendData,
+ &ui32IntClientUpdateCount,
+ &pauiIntUpdateUFOAddress,
+ &paui32IntUpdateValue);
+ }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ if (i32CheckFenceFD >= 0 || i32UpdateTimelineFD >= 0)
+ {
+ eError =
+ pvr_sync_append_fences(szFenceName,
+ i32CheckFenceFD,
+ i32UpdateTimelineFD,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ paui32IntFenceValue,
+ &psFDFenceData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_syncinit;
+ }
+ pvr_sync_get_updates(psFDFenceData, &ui32IntClientUpdateCount,
+ &pauiIntUpdateUFOAddress, &paui32IntUpdateValue);
+ pvr_sync_get_checks(psFDFenceData, &ui32IntClientFenceCount,
+ &pauiIntFenceUFOAddress, &paui32IntFenceValue);
+ }
+#endif
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+ /*
+ Create the command helper data for this command
+ */
+ eError = RGXCmdHelperInitCmdCCB(psClientCCB,
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ paui32IntFenceValue,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ paui32ServerSyncCount[i],
+ papaui32ServerSyncFlags[i],
+ SYNC_FLAG_MASK_ALL,
+ papapsServerSyncs[i],
+ paui32FWCommandSize[i],
+ papaui8FWCommand[i],
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ eType,
+ ui32ExtJobRef,
+ ui32JobId,
+ ui32PDumpFlags,
+ NULL,
+ pszCommandName,
+ psCmdHelper);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_initcmd;
+ }
+ }
+
+ /*
+ Acquire space for all the commands in one go
+ */
+ if (ui323DCmdCount)
+ {
+ eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount,
+ &pas3DCmdHelper[0]);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_3dcmdacquire;
+ }
+ }
+
+ if (ui322DCmdCount)
+ {
+ eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount,
+ &pas2DCmdHelper[0]);
+ if (eError != PVRSRV_OK)
+ {
+ if (ui323DCmdCount)
+ {
+ ui323DCmdCount = 0;
+ ui322DCmdCount = 0;
+ }
+ else
+ {
+ goto fail_2dcmdacquire;
+ }
+ }
+ }
+
+ /*
+ We should acquire the kernel CCB(s) space here as the schedule could fail
+ and we would have to roll back all the syncs
+ */
+
+ /*
+ Only do the command helper release (which takes the server sync
+ operations if the acquire succeeded
+ */
+ if (ui323DCmdCount)
+ {
+ ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+ &pas3DCmdHelper[0],
+ "TQ_3D",
+ FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr);
+
+ }
+
+ if ((ui322DCmdCount) && (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+ {
+ ui322DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(ui322DCmdCount,
+ &pas2DCmdHelper[0],
+ "TQ_2D",
+ FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr);
+ }
+
+ /*
+ Even if we failed to acquire the client CCB space we might still need
+ to kick the HW to process a padding packet to release space for us next
+ time round
+ */
+ if (ui323DCmdCount)
+ {
+ RGXFWIF_KCCB_CMD s3DKCCBCmd;
+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr;
+
+ /* Construct the kernel 3D CCB command. */
+ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext);
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext));
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+ s3DKCCBCmd.uCmdData.sCmdKickData.sWorkloadDataFWAddress.ui32Addr = 0;
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+ HTBLOGK(HTB_SF_MAIN_KICK_3D,
+ s3DKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui323DCmdOffset);
+ RGX_HWPERF_HOST_ENQ(psTransferContext, OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx, ui32ExtJobRef, ui32JobId,
+ RGX_HWPERF_KICK_TYPE_TQ3D);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_3D,
+ &s3DKCCBCmd,
+ sizeof(s3DKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psDeviceNode->pvDevice,
+ ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_TQ3D);
+#endif
+ }
+
+ if ((ui322DCmdCount) && (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+ {
+ RGXFWIF_KCCB_CMD s2DKCCBCmd;
+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr;
+
+ /* Construct the kernel 3D CCB command. */
+ s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext);
+ s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext));
+ s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ HTBLOGK(HTB_SF_MAIN_KICK_2D,
+ s2DKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui322DCmdOffset);
+ RGX_HWPERF_HOST_ENQ(psTransferContext, OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx, ui32ExtJobRef, ui32JobId,
+ RGX_HWPERF_KICK_TYPE_TQ2D);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_2D,
+ &s2DKCCBCmd,
+ sizeof(s2DKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psDeviceNode->pvDevice,
+ ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_TQ2D);
+#endif
+ }
+
+ /*
+ * Now check eError (which may have returned an error from our earlier calls
+ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+ * so we check it now...
+ */
+ if (eError != PVRSRV_OK )
+ {
+ goto fail_2dcmdacquire;
+ }
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ if (i32UpdateTimelineFD >= 0)
+ {
+ /* If we get here, this should never fail. Hitting that likely implies
+ * a code error above */
+ i32UpdateFenceFD = pvr_sync_get_update_fd(psFDFenceData);
+ if (i32UpdateFenceFD < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+ __FUNCTION__));
+ /* If we fail here, we cannot rollback the syncs as the hw already
+ * has references to resources they may be protecting in the kick
+ * so fallthrough */
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_free_append_data;
+ }
+ }
+#if defined(NO_HARDWARE)
+ pvr_sync_nohw_complete_fences(psFDFenceData);
+#endif
+ /*
+ Free the merged sync memory if required
+ */
+ pvr_sync_free_append_fences_data(psFDFenceData);
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psAppendData)
+ {
+ pvr_buffer_sync_append_finish(psAppendData);
+ }
+#endif
+
+ *pi32UpdateFenceFD = i32UpdateFenceFD;
+
+ OSFreeMem(pas2DCmdHelper);
+ OSFreeMem(pas3DCmdHelper);
+
+ return PVRSRV_OK;
+
+/*
+ No resources are created in this function so there is nothing to free
+ unless we had to merge syncs.
+ If we fail after the client CCB acquire there is still nothing to do
+ as only the client CCB release will modify the client CCB
+*/
+fail_2dcmdacquire:
+fail_3dcmdacquire:
+
+fail_initcmd:
+
+fail_pdumpcheck:
+fail_cmdtype:
+
+fail_populate_sync_addr_list:
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+fail_syncinit:
+ /* Relocated cleanup here as the loop could fail after the first iteration
+ * at the above goto tags at which point the psFDCheckData memory would
+ * have been allocated.
+ */
+ pvr_sync_rollback_append_fences(psFDFenceData);
+fail_free_append_data:
+ pvr_sync_free_append_fences_data(psFDFenceData);
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_append_abort(psAppendData);
+fail_sync_append:
+#endif
+ PVR_ASSERT(eError != PVRSRV_OK);
+ OSFreeMem(pas2DCmdHelper);
+fail_alloc2dhelper:
+ OSFreeMem(pas3DCmdHelper);
+fail_alloc3dhelper:
+ return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ RGX_SERVER_TQ_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+ if ((psTransferContext->s2DData.ui32Priority != ui32Priority) && \
+ (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+ {
+ eError = ContextSetPriority(psTransferContext->s2DData.psServerCommonContext,
+ psConnection,
+ psTransferContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_2D);
+ if (eError != PVRSRV_OK)
+ {
+ if(eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 2D part of the transfercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ }
+ goto fail_2dcontext;
+ }
+ psTransferContext->s2DData.ui32Priority = ui32Priority;
+ }
+
+ if (psTransferContext->s3DData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psTransferContext->s3DData.psServerCommonContext,
+ psConnection,
+ psTransferContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_3D);
+ if (eError != PVRSRV_OK)
+ {
+ if(eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the transfercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ }
+ goto fail_3dcontext;
+ }
+ psTransferContext->s3DData.ui32Priority = ui32Priority;
+ }
+
+ return PVRSRV_OK;
+
+fail_3dcontext:
+
+fail_2dcontext:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void CheckForStalledTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DLLIST_NODE *psNode, *psNext;
+
+ OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+
+ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && \
+ (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+ {
+ DumpStalledFWCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+ if (psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D)
+ {
+ DumpStalledFWCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32ContextBitMask = 0;
+
+ OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+
+ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && \
+ (NULL != psCurrentServerTransferCtx->s2DData.psServerCommonContext) && \
+ (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+ {
+ if (CheckStalledClientCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ2D) == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ2D;
+ }
+ }
+
+ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) && (NULL != psCurrentServerTransferCtx->s3DData.psServerCommonContext))
+ {
+ if ((CheckStalledClientCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ3D) == PVRSRV_ERROR_CCCB_STALLED))
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ3D;
+ }
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+ return ui32ContextBitMask;
+}
+
+/**************************************************************************//**
+ End of file (rgxtransfer.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX Transfer queue Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX Transfer queue Functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTRANSFER_H__)
+#define __RGXTRANSFER_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "connection_server.h"
+
+typedef struct _RGX_SERVER_TQ_CONTEXT_ RGX_SERVER_TQ_CONTEXT;
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXCreateTransferContextKM
+
+ @Description
+ Server-side implementation of RGXCreateTransferContext
+
+ @Input pvDeviceNode - device node
+
+FIXME fill this in
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sMCUFenceAddr,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_TQ_CONTEXT **ppsTransferContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXDestroyTransferContextKM
+
+ @Description
+ Server-side implementation of RGXDestroyTransferContext
+
+ @Input psTransferContext - Transfer context
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVSubmitTransferKM
+
+ @Description
+ Schedules one or more 2D or 3D HW commands on the firmware
+
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32PrepareCount,
+ IMG_UINT32 *paui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ***papauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 **papaui32ClientFenceSyncOffset,
+ IMG_UINT32 **papaui32ClientFenceValue,
+ IMG_UINT32 *paui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 **papaui32ClientUpdateSyncOffset,
+ IMG_UINT32 **papaui32ClientUpdateValue,
+ IMG_UINT32 *paui32ServerSyncCount,
+ IMG_UINT32 **papaui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ***papapsServerSyncs,
+ IMG_INT32 i32CheckFenceFD,
+ IMG_INT32 i32UpdateTimelineFD,
+ IMG_INT32 *pi32UpdateFenceFD,
+ IMG_CHAR szFenceName[32],
+ IMG_UINT32 *paui32FWCommandSize,
+ IMG_UINT8 **papaui8FWCommand,
+ IMG_UINT32 *pui32TQPrepareFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 *paui32SyncPMRFlags,
+ PMR **ppsSyncPMRs);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ RGX_SERVER_TQ_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32Priority);
+
+/* Debug - check if transfer context is waiting on a fence */
+void CheckForStalledTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client transfer contexts are stalled */
+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXTRANSFER_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device specific utility routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "power.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgxfwutils.h"
+
+
+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_UINT32 *pui32State)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+ if (!psDeviceNode)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ psDevInfo = psDeviceNode->pvDevice;
+ *pui32State = psDevInfo->eActivePMConf;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_UINT32 ui32State)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+ if (!psDeviceNode || !psDeviceNode->pvDevice)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ if (RGX_ACTIVEPM_FORCE_OFF != ui32State
+ || !psDevInfo->pvAPMISRData)
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
+#if !defined(NO_HARDWARE)
+ eError = OSUninstallMISR(psDevInfo->pvAPMISRData);
+ if (PVRSRV_OK == eError)
+ {
+ psDevInfo->eActivePMConf = RGX_ACTIVEPM_FORCE_OFF;
+ psDevInfo->pvAPMISRData = NULL;
+ eError = PVRSRVSetDeviceDefaultPowerState(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON);
+ }
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR RGXQueryPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_BOOL *pbEnabled)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+ if (!psDeviceNode || !psDeviceNode->pvDevice)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ *pbEnabled = psDevInfo->bPDPEnabled;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_BOOL bEnable)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+ if (!psDeviceNode || !psDeviceNode->pvDevice)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ psDevInfo->bPDPEnabled = bEnable;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 *pui32DeviceFlags)
+{
+ if (!pui32DeviceFlags || !psDevInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *pui32DeviceFlags = psDevInfo->ui32DeviceFlags;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Config,
+ IMG_BOOL bSetNotClear)
+{
+ IMG_UINT32 ui32DeviceFlags = 0;
+
+ if (!psDevInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32Config & RGXKMIF_DEVICE_STATE_ZERO_FREELIST)
+ {
+ ui32DeviceFlags |= RGXKM_DEVICE_STATE_ZERO_FREELIST;
+ }
+
+ if (ui32Config & RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN)
+ {
+ ui32DeviceFlags |= RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN;
+ }
+
+ if (ui32Config & RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN)
+ {
+ ui32DeviceFlags |= RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN;
+ }
+
+ if (bSetNotClear)
+ {
+ psDevInfo->ui32DeviceFlags |= ui32DeviceFlags;
+ }
+ else
+ {
+ psDevInfo->ui32DeviceFlags &= ~ui32DeviceFlags;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*
+ * RGXRunScript
+ */
+PVRSRV_ERROR RGXRunScript(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGX_INIT_COMMAND *psScript,
+ IMG_UINT32 ui32NumCommands,
+ IMG_UINT32 ui32PdumpFlags,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ IMG_UINT32 ui32PC;
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 ui32LastLoopPoint = 0xFFFFFFFF;
+#endif /* NO_HARDWARE */
+
+ for (ui32PC = 0; ui32PC < ui32NumCommands; ui32PC++)
+ {
+ RGX_INIT_COMMAND *psComm = &psScript[ui32PC];
+
+ switch (psComm->eOp)
+ {
+ case RGX_INIT_OP_DBG_READ32_HW_REG:
+ {
+ IMG_UINT32 ui32RegVal;
+ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, psComm->sDBGReadHWReg.ui32Offset);
+ PVR_DUMPDEBUG_LOG("%s: 0x%08X", psComm->sDBGReadHWReg.aszName, ui32RegVal);
+ break;
+ }
+ case RGX_INIT_OP_DBG_READ64_HW_REG:
+ {
+ IMG_UINT64 ui64RegVal;
+ ui64RegVal = OSReadHWReg64(psDevInfo->pvRegsBaseKM, psComm->sDBGReadHWReg.ui32Offset);
+ PVR_DUMPDEBUG_LOG("%s: 0x%016llX", psComm->sDBGReadHWReg.aszName, ui64RegVal);
+ break;
+ }
+ case RGX_INIT_OP_WRITE_HW_REG:
+ {
+ if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+ {
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
+ }
+ PDUMPCOMMENT("RGXRunScript: Write HW reg operation");
+ PDUMPREG32(RGX_PDUMPREG_NAME,
+ psComm->sWriteHWReg.ui32Offset,
+ psComm->sWriteHWReg.ui32Value,
+ ui32PdumpFlags);
+ break;
+ }
+ case RGX_INIT_OP_PDUMP_HW_REG:
+ {
+ PDUMPCOMMENT("RGXRunScript: Dump HW reg operation");
+ PDUMPREG32(RGX_PDUMPREG_NAME, psComm->sPDumpHWReg.ui32Offset,
+ psComm->sPDumpHWReg.ui32Value, ui32PdumpFlags);
+ break;
+ }
+ case RGX_INIT_OP_COND_POLL_HW_REG:
+ {
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 ui32RegVal;
+
+ if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+ {
+ /* read the register used as condition */
+ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, psComm->sCondPollHWReg.ui32CondOffset);
+
+ /* if the conditions succeeds, poll the register */
+ if ((ui32RegVal & psComm->sCondPollHWReg.ui32CondMask) == psComm->sCondPollHWReg.ui32CondValue)
+ {
+ if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + psComm->sCondPollHWReg.ui32Offset),
+ psComm->sCondPollHWReg.ui32Value,
+ psComm->sCondPollHWReg.ui32Mask) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Cond Poll for Reg (0x%x) failed -> Cancel script.", psComm->sCondPollHWReg.ui32Offset));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "RGXRunScript: Skipping Poll for Reg (0x%x) because the condition is not met (Reg 0x%x ANDed with mask 0x%x equal to 0x%x but value 0x%x found instead).",
+ psComm->sCondPollHWReg.ui32Offset,
+ psComm->sCondPollHWReg.ui32CondOffset,
+ psComm->sCondPollHWReg.ui32CondMask,
+ psComm->sCondPollHWReg.ui32CondValue,
+ ui32RegVal));
+ }
+ }
+#endif
+ break;
+ }
+ case RGX_INIT_OP_POLL_64_HW_REG:
+ {
+ /* Split lower and upper words */
+ IMG_UINT32 ui32UpperValue = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Value >> 32);
+ IMG_UINT32 ui32LowerValue = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Value);
+
+ IMG_UINT32 ui32UpperMask = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Mask >> 32);
+ IMG_UINT32 ui32LowerMask = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Mask);
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXRunScript: 64 bit HW offset: %x", psComm->sPoll64HWReg.ui32Offset);
+
+ if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+ {
+ if (PVRSRVPollForValueKM((IMG_UINT32 *)(((IMG_UINT8*)psDevInfo->pvRegsBaseKM) + psComm->sPoll64HWReg.ui32Offset + 4),
+ ui32UpperValue,
+ ui32UpperMask) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Poll for upper part of Reg (0x%x) failed -> Cancel script.", psComm->sPoll64HWReg.ui32Offset));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ }
+ PDUMPREGPOL(RGX_PDUMPREG_NAME,
+ psComm->sPoll64HWReg.ui32Offset + 4,
+ ui32UpperValue,
+ ui32UpperMask,
+ ui32PdumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+
+ if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+ {
+ if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + psComm->sPoll64HWReg.ui32Offset),
+ ui32LowerValue,
+ ui32LowerMask) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Poll for lower part of Reg (0x%x) failed -> Cancel script.", psComm->sPoll64HWReg.ui32Offset));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ }
+ PDUMPREGPOL(RGX_PDUMPREG_NAME,
+ psComm->sPoll64HWReg.ui32Offset,
+ ui32LowerValue,
+ ui32LowerMask,
+ ui32PdumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+
+ break;
+ }
+ case RGX_INIT_OP_POLL_HW_REG:
+ {
+ if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+ {
+ if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + psComm->sPollHWReg.ui32Offset),
+ psComm->sPollHWReg.ui32Value,
+ psComm->sPollHWReg.ui32Mask) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Poll for Reg (0x%x) failed -> Cancel script.", psComm->sPollHWReg.ui32Offset));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ }
+ PDUMPREGPOL(RGX_PDUMPREG_NAME,
+ psComm->sPollHWReg.ui32Offset,
+ psComm->sPollHWReg.ui32Value,
+ psComm->sPollHWReg.ui32Mask,
+ ui32PdumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+
+ break;
+ }
+
+ case RGX_INIT_OP_LOOP_POINT:
+ {
+#if !defined(NO_HARDWARE)
+ ui32LastLoopPoint = ui32PC;
+#endif /* NO_HARDWARE */
+ break;
+ }
+
+ case RGX_INIT_OP_COND_BRANCH:
+ {
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM,
+ psComm->sConditionalBranchPoint.ui32Offset);
+
+ if((ui32RegVal & psComm->sConditionalBranchPoint.ui32Mask) != psComm->sConditionalBranchPoint.ui32Value)
+ {
+ ui32PC = ui32LastLoopPoint - 1;
+ }
+#endif /* NO_HARDWARE */
+
+ PDUMPIDLWITHFLAGS(30, ui32PdumpFlags);
+ break;
+ }
+ case RGX_INIT_OP_DBG_CALC:
+ {
+ IMG_UINT32 ui32RegVal1;
+ IMG_UINT32 ui32RegVal2;
+ IMG_UINT32 ui32RegVal3;
+ ui32RegVal1 = OSReadHWReg32(psDevInfo->pvRegsBaseKM, psComm->sDBGCalc.ui32Offset1);
+ ui32RegVal2 = OSReadHWReg32(psDevInfo->pvRegsBaseKM, psComm->sDBGCalc.ui32Offset2);
+ ui32RegVal3 = OSReadHWReg32(psDevInfo->pvRegsBaseKM, psComm->sDBGCalc.ui32Offset3);
+ if (ui32RegVal1 + ui32RegVal2 > ui32RegVal3)
+ {
+ PVR_DUMPDEBUG_LOG("%s: 0x%08X", psComm->sDBGCalc.aszName, ui32RegVal1 + ui32RegVal2 - ui32RegVal3);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("%s: 0x%08X", psComm->sDBGCalc.aszName, 0);
+ }
+ break;
+ }
+ case RGX_INIT_OP_DBG_WAIT:
+ {
+ OSWaitus(psComm->sDBGWait.ui32WaitInUs);
+ break;
+ }
+ case RGX_INIT_OP_DBG_STRING:
+ {
+ PVR_DUMPDEBUG_LOG("%s", psComm->sDBGString.aszString);
+ break;
+ }
+ case RGX_INIT_OP_HALT:
+ {
+ return PVRSRV_OK;
+ }
+ case RGX_INIT_OP_ILLEGAL:
+ /* FALLTHROUGH */
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp));
+ return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION;
+ }
+ }
+
+ }
+
+ return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION;
+}
+
+inline const char * RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM)
+{
+ /*
+ * This is based on the currently defined DMs.
+ * If you need to modify the enum in include/rgx_common.h
+ * please keep this function up-to-date too.
+ *
+ * typedef enum _RGXFWIF_DM_
+ * {
+ * RGXFWIF_DM_GP = 0,
+ * RGXFWIF_DM_2D = 1,
+ * RGXFWIF_DM_TDM = 1,
+ * RGXFWIF_DM_TA = 2,
+ * RGXFWIF_DM_3D = 3,
+ * RGXFWIF_DM_CDM = 4,
+ * RGXFWIF_DM_RTU = 5,
+ * RGXFWIF_DM_SHG = 6,
+ * RGXFWIF_DM_LAST,
+ * RGXFWIF_DM_FORCE_I32 = 0x7fffffff
+ * } RGXFWIF_DM;
+ */
+ PVR_ASSERT(eKickTypeDM < RGX_KICK_TYPE_DM_LAST);
+
+ switch(eKickTypeDM) {
+ case RGX_KICK_TYPE_DM_GP:
+ return "GP ";
+ case RGX_KICK_TYPE_DM_TDM_2D:
+ return "TDM/2D ";
+ case RGX_KICK_TYPE_DM_TA:
+ return "TA ";
+ case RGX_KICK_TYPE_DM_3D:
+ return "3D ";
+ case RGX_KICK_TYPE_DM_CDM:
+ return "CDM ";
+ case RGX_KICK_TYPE_DM_RTU:
+ return "RTU ";
+ case RGX_KICK_TYPE_DM_SHG:
+ return "SHG ";
+ case RGX_KICK_TYPE_DM_TQ2D:
+ return "TQ2D ";
+ case RGX_KICK_TYPE_DM_TQ3D:
+ return "TQ3D ";
+ default:
+ return "Invalid DM ";
+ }
+}
+
+/******************************************************************************
+ End of file (rgxutils.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Device specific utility routines declarations
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Inline functions/structures specific to RGX
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+
+/*!
+******************************************************************************
+
+ @Function RGXQueryAPMState
+
+ @Description Query the state of the APM configuration
+
+ @Input psDeviceNode : The device node
+
+ @Input pvPrivateData: Unused (required for AppHint callback)
+
+ @Output pui32State : The APM configuration state
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_UINT32 *pui32State);
+
+/*!
+******************************************************************************
+
+ @Function RGXSetAPMState
+
+ @Description Set the APM configuration state. Currently only 'OFF' is
+ supported
+
+ @Input psDeviceNode : The device node
+
+ @Input pvPrivateData: Unused (required for AppHint callback)
+
+ @Input ui32State : The requested APM configuration state
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_UINT32 ui32State);
+
+/*!
+******************************************************************************
+
+ @Function RGXQueryPdumpPanicEnable
+
+ @Description Get the PDump Panic Enable configuration state.
+
+ @Input psDeviceNode : The device node
+
+ @Input pvPrivateData: Unused (required for AppHint callback)
+
+ @Input pbEnabled : IMG_TRUE if PDump Panic is enabled
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXQueryPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_BOOL *pbEnabled);
+
+/*!
+******************************************************************************
+
+ @Function RGXSetPdumpPanicEnable
+
+ @Description Set the PDump Panic Enable flag
+
+ @Input psDeviceNode : The device node
+
+ @Input pvPrivateData: Unused (required for AppHint callback)
+
+ @Input bEnable : The requested configuration state
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_BOOL bEnable);
+
+/*!
+******************************************************************************
+
+ @Function RGXGetDeviceFlags
+
+ @Description Get the device flags for a given device
+
+ @Input psDevInfo : The device descriptor query
+
+ @Output pui32DeviceFlags : The current state of the device flags
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 *pui32DeviceFlags);
+
+/*!
+******************************************************************************
+
+ @Function RGXSetDeviceFlags
+
+ @Description Set the device flags for a given device
+
+ @Input psDevInfo : The device descriptor to modify
+
+ @Input ui32Config : The device flags to modify
+
+ @Input bSetNotClear : Set or clear the specified flags
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Config,
+ IMG_BOOL bSetNotClear);
+
+/*!
+******************************************************************************
+
+ @Function RGXRunScript
+
+ @Description Execute the commands in the script
+
+ @Input
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXRunScript(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGX_INIT_COMMAND *psScript,
+ IMG_UINT32 ui32NumCommands,
+ IMG_UINT32 ui32PdumpFlags,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/*!
+******************************************************************************
+
+ @Function RGXStringifyKickTypeDM
+
+ @Description Gives the kick type DM name stringified
+
+ @Input Kick type DM
+
+ @Return Array containing the kick type DM name
+
+******************************************************************************/
+const char* RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM);
+
+#define RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(bitmask, eKickTypeDM) bitmask & eKickTypeDM ? RGXStringifyKickTypeDM(eKickTypeDM) : ""
+/******************************************************************************
+ End of file (rgxutils.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxworkest.c
+@Title RGX Workload Estimation Functionality
+@Codingstyle IMG
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Kernel mode workload estimation functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxworkest.h"
+#include "rgxfwutils.h"
+#include "rgxdevice.h"
+#include "rgxpdvfs.h"
+#include "device.h"
+#include "pvr_debug.h"
+
+#define ROUND_DOWN_TO_NEAREST_1024(number) (((number) >> 10) << 10)
+
+void WorkEstRCInit(WORKEST_HOST_DATA *psWorkEstData)
+{
+ /* Create hash tables for workload matching */
+ psWorkEstData->sWorkloadMatchingDataTA.psWorkloadDataHash =
+ HASH_Create_Extended(WORKLOAD_HASH_SIZE,
+ sizeof(RGX_WORKLOAD_TA3D *),
+ &WorkEstHashFuncTA3D,
+ (HASH_KEY_COMP *)&WorkEstHashCompareTA3D);
+
+ /* Create a lock to protect the hash table */
+ WorkEstHashLockCreate(&(psWorkEstData->sWorkloadMatchingDataTA.psWorkEstHashLock));
+
+ psWorkEstData->sWorkloadMatchingData3D.psWorkloadDataHash =
+ HASH_Create_Extended(WORKLOAD_HASH_SIZE,
+ sizeof(RGX_WORKLOAD_TA3D *),
+ &WorkEstHashFuncTA3D,
+ (HASH_KEY_COMP *)&WorkEstHashCompareTA3D);
+
+ /* Create a lock to protect the hash tables */
+ WorkEstHashLockCreate(&(psWorkEstData->sWorkloadMatchingData3D.psWorkEstHashLock));
+}
+
+void WorkEstRCDeInit(WORKEST_HOST_DATA *psWorkEstData,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ HASH_TABLE *psWorkloadDataHash;
+ RGX_WORKLOAD_TA3D *pasWorkloadHashKeys;
+ RGX_WORKLOAD_TA3D *psWorkloadHashKey;
+ IMG_UINT32 ui32i;
+ IMG_UINT64 *paui64WorkloadCycleData;
+
+ pasWorkloadHashKeys = psWorkEstData->sWorkloadMatchingDataTA.asWorkloadHashKeys;
+ paui64WorkloadCycleData = psWorkEstData->sWorkloadMatchingDataTA.aui64HashCycleData;
+ psWorkloadDataHash = psWorkEstData->sWorkloadMatchingDataTA.psWorkloadDataHash;
+
+ if(psWorkloadDataHash)
+ {
+ for(ui32i = 0; ui32i < WORKLOAD_HASH_SIZE; ui32i++)
+ {
+ if(paui64WorkloadCycleData[ui32i] > 0)
+ {
+ psWorkloadHashKey = &pasWorkloadHashKeys[ui32i];
+ HASH_Remove_Extended(psWorkloadDataHash,
+ (uintptr_t*)&psWorkloadHashKey);
+ }
+ }
+
+ HASH_Delete(psWorkloadDataHash);
+ }
+
+ /* Remove the hash lock */
+ WorkEstHashLockDestroy(psWorkEstData->sWorkloadMatchingDataTA.psWorkEstHashLock);
+
+ pasWorkloadHashKeys = psWorkEstData->sWorkloadMatchingData3D.asWorkloadHashKeys;
+ paui64WorkloadCycleData = psWorkEstData->sWorkloadMatchingData3D.aui64HashCycleData;
+ psWorkloadDataHash = psWorkEstData->sWorkloadMatchingData3D.psWorkloadDataHash;
+
+ if(psWorkloadDataHash)
+ {
+ for(ui32i = 0; ui32i < WORKLOAD_HASH_SIZE; ui32i++)
+ {
+ if(paui64WorkloadCycleData[ui32i] > 0)
+ {
+ psWorkloadHashKey = &pasWorkloadHashKeys[ui32i];
+ HASH_Remove_Extended(psWorkloadDataHash,
+ (uintptr_t*)&psWorkloadHashKey);
+ }
+ }
+
+ HASH_Delete(psWorkloadDataHash);
+ }
+
+ /* Remove the hash lock */
+ WorkEstHashLockDestroy(psWorkEstData->sWorkloadMatchingData3D.psWorkEstHashLock);
+
+ return;
+}
+
+IMG_BOOL WorkEstHashCompareTA3D(size_t uKeySize,
+ void *pKey1,
+ void *pKey2)
+{
+ RGX_WORKLOAD_TA3D *psWorkload1;
+ RGX_WORKLOAD_TA3D *psWorkload2;
+
+ if(pKey1 && pKey2)
+ {
+ psWorkload1 = *((RGX_WORKLOAD_TA3D **)pKey1);
+ psWorkload2 = *((RGX_WORKLOAD_TA3D **)pKey2);
+
+ PVR_ASSERT(psWorkload1);
+ PVR_ASSERT(psWorkload2);
+
+ if(psWorkload1->ui32RenderTargetSize == psWorkload2->ui32RenderTargetSize
+ && psWorkload1->ui32NumberOfDrawCalls == psWorkload2->ui32NumberOfDrawCalls
+ && psWorkload1->ui32NumberOfIndices == psWorkload2->ui32NumberOfIndices
+ && psWorkload1->ui32NumberOfMRTs == psWorkload2->ui32NumberOfMRTs)
+ {
+ /* This is added to allow this memory to be freed */
+ *(uintptr_t*)pKey2 = *(uintptr_t*)pKey1;
+ return IMG_TRUE;
+ }
+ }
+ return IMG_FALSE;
+}
+
+static inline IMG_UINT32 WorkEstDoHash(IMG_UINT32 ui32Input)
+{
+ IMG_UINT32 ui32HashPart;
+
+ ui32HashPart = ui32Input;
+ ui32HashPart += (ui32HashPart << 12);
+ ui32HashPart ^= (ui32HashPart >> 22);
+ ui32HashPart += (ui32HashPart << 4);
+ ui32HashPart ^= (ui32HashPart >> 9);
+ ui32HashPart += (ui32HashPart << 10);
+ ui32HashPart ^= (ui32HashPart >> 2);
+ ui32HashPart += (ui32HashPart << 7);
+ ui32HashPart ^= (ui32HashPart >> 12);
+
+ return ui32HashPart;
+}
+
+IMG_UINT32 WorkEstHashFuncTA3D(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+ RGX_WORKLOAD_TA3D *psWorkload = *((RGX_WORKLOAD_TA3D**)pKey);
+ IMG_UINT32 ui32HashKey = 0;
+ PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+ PVR_UNREFERENCED_PARAMETER(uKeySize);
+
+ ui32HashKey += WorkEstDoHash(psWorkload->ui32RenderTargetSize);
+ ui32HashKey += WorkEstDoHash(psWorkload->ui32NumberOfDrawCalls);
+ ui32HashKey += WorkEstDoHash(psWorkload->ui32NumberOfIndices);
+ ui32HashKey += WorkEstDoHash(psWorkload->ui32NumberOfMRTs);
+
+ return ui32HashKey;
+}
+
+PVRSRV_ERROR WorkEstPrepare(PVRSRV_RGXDEV_INFO *psDevInfo,
+ WORKEST_HOST_DATA *psWorkEstHostData,
+ WORKLOAD_MATCHING_DATA *psWorkloadMatchingData,
+ IMG_UINT32 ui32RenderTargetSize,
+ IMG_UINT32 ui32NumberOfDrawCalls,
+ IMG_UINT32 ui32NumberOfIndices,
+ IMG_UINT32 ui32NumberOfMRTs,
+ IMG_UINT64 ui64DeadlineInus,
+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData)
+{
+ PVRSRV_ERROR eError;
+ RGX_WORKLOAD_TA3D *psWorkloadCharacteristics;
+ IMG_UINT64 *pui64CyclePrediction;
+ POS_LOCK psWorkEstHashLock;
+ IMG_UINT64 ui64WorkloadDeadlineInus = ui64DeadlineInus;
+ IMG_UINT64 ui64CurrentTime;
+ HASH_TABLE *psWorkloadDataHash;
+ WORKEST_RETURN_DATA *psReturnData;
+
+ if(psDevInfo == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"WorkEstPrepare: Device Info not available"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if(psDevInfo->bWorkEstEnabled != IMG_TRUE)
+ {
+ /* No error message to avoid excessive messages */
+ return PVRSRV_OK;
+ }
+
+ if(psWorkEstHostData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "WorkEstPrepare: Host data not available"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if(psWorkloadMatchingData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "WorkEstPrepare: Workload Matching Data not available"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psWorkloadDataHash = psWorkloadMatchingData->psWorkloadDataHash;
+ if(psWorkloadDataHash == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"WorkEstPrepare: Hash Table not available"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psWorkEstHashLock = psWorkloadMatchingData->psWorkEstHashLock;
+ if(psWorkEstHashLock == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "WorkEstPrepare: Hash lock not available"
+ ));
+ eError = PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE;
+ return eError;
+ }
+
+ eError = OSClockMonotonicus64(&ui64CurrentTime);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "WorkEstPrepare: Unable to access System Monotonic clock"));
+ PVR_ASSERT(eError == PVRSRV_OK);
+ return eError;
+ }
+
+#if defined(SUPPORT_PDVFS)
+ psDevInfo->psDeviceNode->psDevConfig->sDVFS.sPDVFSData.bWorkInFrame = IMG_TRUE;
+#endif
+
+ /* Set up data for the return path to process the workload */
+
+ /* Any host side data needed for the return path is stored in an array and
+ * only the array's index is passed to and from the firmware. This is a
+ * similar abstraction to using handles but is optimised for this case.
+ */
+ psReturnData =
+ &psDevInfo->asReturnData[psDevInfo->ui32ReturnDataWO];
+
+ /* The index for the specific data is passed to the FW */
+ psWorkEstKickData->ui64ReturnDataIndex = psDevInfo->ui32ReturnDataWO;
+
+ psDevInfo->ui32ReturnDataWO =
+ (psDevInfo->ui32ReturnDataWO + 1) & RETURN_DATA_ARRAY_WRAP_MASK;
+
+ /* The workload characteristics are needed in the return data for the
+ * matching of future workloads via the hash.
+ */
+ psWorkloadCharacteristics = &psReturnData->sWorkloadCharacteristics;
+ psWorkloadCharacteristics->ui32RenderTargetSize = ui32RenderTargetSize;
+ psWorkloadCharacteristics->ui32NumberOfDrawCalls = ui32NumberOfDrawCalls;
+ psWorkloadCharacteristics->ui32NumberOfIndices = ui32NumberOfIndices;
+ psWorkloadCharacteristics->ui32NumberOfMRTs = ui32NumberOfMRTs;
+
+ /* The matching data is needed as it holds the hash data. */
+ psReturnData->psWorkloadMatchingData = psWorkloadMatchingData;
+
+ /* The host data for the completion updates */
+ psReturnData->psWorkEstHostData = psWorkEstHostData;
+ if(ui64WorkloadDeadlineInus > ui64CurrentTime)
+ {
+ /* This is rounded to reduce multiple deadlines with a minor spread
+ * flooding the fw workload array.
+ */
+ psWorkEstKickData->ui64DeadlineInus =
+ ROUND_DOWN_TO_NEAREST_1024(ui64WorkloadDeadlineInus);
+ }
+ else
+ {
+ /* If the deadline has already passed assign as zero to suggest full
+ * frequency
+ */
+ psWorkEstKickData->ui64DeadlineInus = 0;
+ }
+
+ /* Acquire the lock to access hash */
+ OSLockAcquire(psWorkEstHashLock);
+
+ /* Check if there is a prediction for this workload */
+ pui64CyclePrediction =
+ (IMG_UINT64*) HASH_Retrieve(psWorkloadDataHash,
+ (uintptr_t)psWorkloadCharacteristics);
+
+ /* Release lock */
+ OSLockRelease(psWorkEstHashLock);
+
+ if(pui64CyclePrediction != NULL)
+ {
+ /* Cycle prediction is available, store this prediction */
+ psWorkEstKickData->ui64CyclesPrediction = *pui64CyclePrediction;
+ }
+ else
+ {
+ /* There is no prediction */
+ psWorkEstKickData->ui64CyclesPrediction = 0;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR WorkEstWorkloadFinished(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_WORKEST_FWCCB_CMD *psReturnCmd)
+{
+ RGX_WORKLOAD_TA3D *psWorkloadCharacteristics;
+ RGX_WORKLOAD_TA3D *pasWorkloadHashKeys;
+ IMG_UINT64 *paui64HashCycleData;
+ IMG_UINT32 *pui32HashArrayWO;
+ RGX_WORKLOAD_TA3D *psWorkloadHashKey;
+ IMG_UINT64 *pui64CyclesTaken;
+ HASH_TABLE *psWorkloadHash;
+ WORKLOAD_MATCHING_DATA *psWorkloadMatchingData;
+ POS_LOCK psWorkEstHashLock;
+ IMG_BOOL bHashSucess;
+ WORKEST_RETURN_DATA *psReturnData;
+ WORKEST_HOST_DATA *psWorkEstHostData;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(psDevInfo->bWorkEstEnabled != IMG_TRUE)
+ {
+ /* No error message to avoid excessive messages */
+ return PVRSRV_OK;
+ }
+
+ if(psReturnCmd == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "WorkEstFinished: Missing Return Command"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if(psReturnCmd->ui64ReturnDataIndex >= RETURN_DATA_ARRAY_SIZE)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "WorkEstFinished: Handle Reference Out of Bounds"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Retrieve the return data for this workload */
+ psReturnData = &psDevInfo->asReturnData[psReturnCmd->ui64ReturnDataIndex];
+
+ psWorkEstHostData = psReturnData->psWorkEstHostData;
+
+ if(psWorkEstHostData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "WorkEstFinished: Missing host data"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return eError;
+ }
+
+ psWorkloadCharacteristics = &psReturnData->sWorkloadCharacteristics;
+
+ if(psWorkloadCharacteristics == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "WorkEstFinished: Missing workload characteristics"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto hasherror;
+ }
+
+ psWorkloadMatchingData = psReturnData->psWorkloadMatchingData;
+
+ psWorkloadHash = psWorkloadMatchingData->psWorkloadDataHash;
+ if(psWorkloadHash == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "WorkEstFinished: Missing hash"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto hasherror;
+ }
+
+ psWorkEstHashLock = psWorkloadMatchingData->psWorkEstHashLock;
+ if(psWorkEstHashLock == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "WorkEstFinished: Missing hash lock"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto hasherror;
+ }
+
+ OSLockAcquire(psWorkEstHashLock);
+
+ pui64CyclesTaken =
+ (IMG_UINT64*) HASH_Remove_Extended(psWorkloadHash,
+ (uintptr_t*)&psWorkloadCharacteristics);
+
+ pui32HashArrayWO = &(psWorkloadMatchingData->ui32HashArrayWO);
+ paui64HashCycleData = psWorkloadMatchingData->aui64HashCycleData;
+ pasWorkloadHashKeys = psWorkloadMatchingData->asWorkloadHashKeys;
+
+ /* Remove the oldest Hash data before it becomes overwritten */
+ if(paui64HashCycleData[*pui32HashArrayWO] > 0)
+ {
+ psWorkloadHashKey = &pasWorkloadHashKeys[*pui32HashArrayWO];
+ HASH_Remove_Extended(psWorkloadHash,
+ (uintptr_t*)&psWorkloadHashKey);
+ }
+
+ if(pui64CyclesTaken == NULL)
+ {
+ /* There is no existing entry for these characteristics. */
+ pasWorkloadHashKeys[*pui32HashArrayWO] = *psWorkloadCharacteristics;
+
+ paui64HashCycleData[*pui32HashArrayWO] = psReturnCmd->ui64CyclesTaken;
+ }
+ else
+ {
+ *pui64CyclesTaken =
+ (*pui64CyclesTaken + psReturnCmd->ui64CyclesTaken)/2;
+
+ pasWorkloadHashKeys[*pui32HashArrayWO] = *psWorkloadCharacteristics;
+
+ paui64HashCycleData[*pui32HashArrayWO] = *pui64CyclesTaken;
+
+ /* Set the old value to 0 so it is known to be invalid */
+ *pui64CyclesTaken = 0;
+ }
+
+
+ bHashSucess = HASH_Insert((HASH_TABLE*)(psWorkloadHash),
+ (uintptr_t)&pasWorkloadHashKeys[*pui32HashArrayWO],
+ (uintptr_t)&paui64HashCycleData[*pui32HashArrayWO]);
+ PVR_ASSERT(bHashSucess);
+
+ if(*pui32HashArrayWO == WORKLOAD_HASH_SIZE-1)
+ {
+ *pui32HashArrayWO = 0;
+ }
+ else
+ {
+ (*pui32HashArrayWO)++;
+ }
+
+ OSLockRelease(psWorkEstHashLock);
+
+hasherror:
+
+ /* Update the received counter so that the FW is able to check as to whether
+ * all the workloads connected to a render context are finished.
+ */
+ psWorkEstHostData->ui32WorkEstCCBReceived++;
+ return eError;
+}
+
+void WorkEstHashLockCreate(POS_LOCK *psWorkEstHashLock)
+{
+ if(*psWorkEstHashLock == NULL)
+ {
+ OSLockCreate(psWorkEstHashLock, LOCK_TYPE_DISPATCH);
+ }
+ return;
+}
+
+void WorkEstHashLockDestroy(POS_LOCK sWorkEstHashLock)
+{
+ if(sWorkEstHashLock != NULL)
+ {
+ OSLockDestroy(sWorkEstHashLock);
+ sWorkEstHashLock = NULL;
+ }
+ return;
+}
+
+void WorkEstCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_WORKEST_FWCCB_CMD *psFwCCBCmd;
+
+ RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl;
+ IMG_UINT8 *psFWCCB = psDevInfo->psWorkEstFirmwareCCB;
+
+ while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset)
+ {
+ /* Point to the next command */
+ psFwCCBCmd = ((RGXFWIF_WORKEST_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset;
+
+ WorkEstWorkloadFinished(psDevInfo, psFwCCBCmd);
+
+ /* Update read offset */
+ psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask;
+ }
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxworkest.h
+@Title RGX Workload Estimation Functionality
+@Codingstyle IMG
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the kernel mode workload estimation functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXWORKEST_H
+#define RGXWORKEST_H
+
+#include "img_types.h"
+#include "hash.h"
+#include "rgxta3d.h"
+
+
+IMG_INTERNAL
+void WorkEstRCInit(WORKEST_HOST_DATA *psWorkEstData);
+
+IMG_INTERNAL
+void WorkEstRCDeInit(WORKEST_HOST_DATA *psWorkEstData,
+ PVRSRV_RGXDEV_INFO *psDevInfo);
+IMG_INTERNAL
+PVRSRV_ERROR WorkEstEmptyWorkloadHash( HASH_TABLE* psHash,
+ uintptr_t k,
+ uintptr_t v);
+
+IMG_INTERNAL
+IMG_BOOL WorkEstHashCompareTA3D(size_t uKeySize,
+ void *pKey1,
+ void *pKey2);
+
+IMG_INTERNAL
+IMG_UINT32 WorkEstHashFuncTA3D(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+IMG_INTERNAL
+PVRSRV_ERROR WorkEstPrepare(PVRSRV_RGXDEV_INFO *psDevInfo,
+ WORKEST_HOST_DATA *psWorkEstHostData,
+ WORKLOAD_MATCHING_DATA *psWorkloadMatchingData,
+ IMG_UINT32 ui32RenderTargetSize,
+ IMG_UINT32 ui32NumberOfDrawCalls,
+ IMG_UINT32 ui32NumberOfIndices,
+ IMG_UINT32 ui32NumberOfMRTs,
+ IMG_UINT64 ui64DeadlineInus,
+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData);
+
+IMG_INTERNAL
+PVRSRV_ERROR WorkEstWorkloadFinished(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_WORKEST_FWCCB_CMD *psReturnCmd);
+
+IMG_INTERNAL
+void WorkEstHashLockCreate(POS_LOCK *psWorkEstHashLock);
+
+IMG_INTERNAL
+void WorkEstHashLockDestroy(POS_LOCK sWorkEstHashLock);
+
+IMG_INTERNAL
+void WorkEstCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* RGXWORKEST_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File ri_server.c
+@Title Resource Information (RI) server implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Resource Information (RI) server functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#include <stdarg.h>
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "srvkm.h"
+#include "lock.h"
+/* services/server/include/ */
+#include "ri_server.h"
+
+/* services/include/shared/ */
+#include "hash.h"
+/* services/shared/include/ */
+#include "dllist.h"
+
+#include "pmr.h"
+
+#if defined(PVR_RI_DEBUG)
+
+#define USE_RI_LOCK 1
+
+/*
+ * Initial size use for Hash table.
+ * (Used to index the RI list entries).
+ */
+#define _RI_INITIAL_HASH_TABLE_SIZE 64
+
+/*
+ * Values written to the 'valid' field of
+ * RI structures when created and cleared
+ * prior to being destroyed.
+ * The code can then check this value
+ * before accessing the provided pointer
+ * contents as a valid RI structure.
+ */
+#define _VALID_RI_LIST_ENTRY 0x66bccb66
+#define _VALID_RI_SUBLIST_ENTRY 0x77cddc77
+#define _INVALID 0x00000000
+
+/*
+ * If this define is set to 1, details of
+ * the linked lists (addresses, prev/next
+ * ptrs, etc) are also output when function
+ * RIDumpList() is called
+ */
+#define _DUMP_LINKEDLIST_INFO 0
+
+
+typedef IMG_UINT64 _RI_BASE_T;
+
+/*
+ * Length of string used for process name
+ */
+#define TASK_COMM_LEN 16
+/*
+ * Length of string used for process ID
+ */
+#define TASK_PID_LEN 11
+/*
+ * Length of string used for "[{PID}:_{process_name}]"
+ */
+#define RI_PROC_TAG_CHAR_LEN (1+TASK_PID_LEN+2+TASK_COMM_LEN+1)
+
+/*
+ * Length of string used for address
+ */
+#define RI_ADDR_CHAR_LEN 12
+/*
+ * Length of string used for size
+ */
+#define RI_SIZE_CHAR_LEN 12
+/*
+ * Length of string used for "{Imported from PID nnnnnnnnnn}"
+ */
+#define RI_IMPORT_TAG_CHAR_LEN 32
+/*
+ * Total length of string returned to debugfs
+ * {0xaddr}_{annotation_text}_{0xsize}_{import_tag}
+ */
+#define RI_MAX_DEBUGFS_ENTRY_LEN (RI_ADDR_CHAR_LEN+1+RI_MAX_TEXT_LEN+1+RI_SIZE_CHAR_LEN+1+RI_IMPORT_TAG_CHAR_LEN+1)
+/*
+ * Total length of string output to _RIOutput()
+ * for MEMDESC RI sub-list entries
+ * {0xaddr}_{annotation_text}_[{PID}:_{process_name}]_{0xsize}_bytes_{import_tag}
+ */
+#define RI_MAX_MEMDESC_RI_ENTRY_LEN (RI_ADDR_CHAR_LEN+1+RI_MAX_TEXT_LEN+1+RI_PROC_TAG_CHAR_LEN+1+RI_SIZE_CHAR_LEN+7+RI_IMPORT_TAG_CHAR_LEN+1)
+/*
+ * Total length of string output to _RIOutput()
+ * for PMR RI list entries
+ * {annotation_text}_{pmr_handle}_suballocs:{num_suballocs}_{0xsize}
+ */
+#define RI_MAX_PMR_RI_ENTRY_LEN (RI_MAX_TEXT_LEN+1+RI_ADDR_CHAR_LEN+11+10+1+RI_SIZE_CHAR_LEN)
+
+
+/*
+ * Structure used to make linked sublist of
+ * memory allocations (MEMDESC)
+ */
+struct _RI_SUBLIST_ENTRY_
+{
+ DLLIST_NODE sListNode;
+ struct _RI_LIST_ENTRY_ *psRI;
+ IMG_UINT32 valid;
+ IMG_BOOL bIsImport;
+ IMG_BOOL bIsExportable;
+ IMG_BOOL bIsPinned;
+ IMG_PID pid;
+ IMG_CHAR ai8ProcName[TASK_COMM_LEN];
+ IMG_DEV_VIRTADDR sVAddr;
+ IMG_UINT64 ui64Offset;
+ IMG_UINT64 ui64Size;
+ IMG_UINT64 ui64BackedSize;
+ IMG_CHAR ai8TextB[RI_MAX_TEXT_LEN+1];
+ DLLIST_NODE sProcListNode;
+};
+
+/*
+ * Structure used to make linked list of
+ * PMRs. Sublists of allocations (MEMDESCs) made
+ * from these PMRs are chained off these entries.
+ */
+struct _RI_LIST_ENTRY_
+{
+ DLLIST_NODE sListNode;
+ DLLIST_NODE sSubListFirst;
+ IMG_UINT32 valid;
+ PMR *hPMR;
+ IMG_UINT64 ui64LogicalSize;
+ IMG_PID pid;
+ IMG_CHAR ai8ProcName[TASK_COMM_LEN];
+ IMG_CHAR ai8TextA[RI_MAX_TEXT_LEN+1];
+ IMG_UINT16 ui16SubListCount;
+ IMG_UINT16 ui16MaxSubListCount;
+};
+
+typedef struct _RI_LIST_ENTRY_ RI_LIST_ENTRY;
+typedef struct _RI_SUBLIST_ENTRY_ RI_SUBLIST_ENTRY;
+
+static IMG_UINT16 g_ui16RICount = 0;
+static HASH_TABLE *g_pRIHashTable = NULL;
+static IMG_UINT16 g_ui16ProcCount = 0;
+static HASH_TABLE *g_pProcHashTable = NULL;
+
+static POS_LOCK g_hRILock;
+/*
+ * Flag used to indicate if RILock should be destroyed when final PMR entry
+ * is deleted, i.e. if RIDeInitKM() has already been called before that point
+ * but the handle manager has deferred deletion of RI entries.
+ */
+static IMG_BOOL bRIDeInitDeferred = IMG_FALSE;
+
+/*
+ * Used as head of linked-list of PMR RI entries -
+ * this is useful when we wish to iterate all PMR
+ * list entries (when we don't have a PMR ref)
+ */
+static DLLIST_NODE sListFirst;
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString);
+
+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DumpList(PMR *hPMR, IMG_PID pid);
+#define _RIOutput(x) PVR_LOG(x)
+
+IMG_INTERNAL IMG_UINT32
+_ProcHashFunc (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+IMG_INTERNAL IMG_UINT32
+_ProcHashFunc (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+ IMG_UINT32 *p = (IMG_UINT32 *)pKey;
+ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+ IMG_UINT32 ui;
+ IMG_UINT32 uHashKey = 0;
+
+ PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+ for (ui = 0; ui < uKeyLen; ui++)
+ {
+ IMG_UINT32 uHashPart = *p++;
+
+ uHashPart += (uHashPart << 12);
+ uHashPart ^= (uHashPart >> 22);
+ uHashPart += (uHashPart << 4);
+ uHashPart ^= (uHashPart >> 9);
+ uHashPart += (uHashPart << 10);
+ uHashPart ^= (uHashPart >> 2);
+ uHashPart += (uHashPart << 7);
+ uHashPart ^= (uHashPart >> 12);
+
+ uHashKey += uHashPart;
+ }
+
+ return uHashKey;
+}
+IMG_INTERNAL IMG_BOOL
+_ProcHashComp (size_t uKeySize, void *pKey1, void *pKey2);
+IMG_INTERNAL IMG_BOOL
+_ProcHashComp (size_t uKeySize, void *pKey1, void *pKey2)
+{
+ IMG_UINT32 *p1 = (IMG_UINT32 *)pKey1;
+ IMG_UINT32 *p2 = (IMG_UINT32 *)pKey2;
+ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+ IMG_UINT32 ui;
+
+ for (ui = 0; ui < uKeyLen; ui++)
+ {
+ if (*p1++ != *p2++)
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+
+static void _RILock(void)
+{
+#if (USE_RI_LOCK == 1)
+ OSLockAcquire(g_hRILock);
+#endif
+}
+
+static void _RIUnlock(void)
+{
+#if (USE_RI_LOCK == 1)
+ OSLockRelease(g_hRILock);
+#endif
+}
+
+PVRSRV_ERROR RIInitKM(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ bRIDeInitDeferred = IMG_FALSE;
+#if (USE_RI_LOCK == 1)
+ eError = OSLockCreate(&g_hRILock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSLockCreate failed (returned %d)",__func__,eError));
+ }
+#endif
+ return eError;
+}
+void RIDeInitKM(void)
+{
+#if (USE_RI_LOCK == 1)
+ if (g_ui16RICount > 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: called with %d entries remaining - deferring OSLockDestroy()",__func__,g_ui16RICount));
+ bRIDeInitDeferred = IMG_TRUE;
+ }
+ else
+ {
+ OSLockDestroy(g_hRILock);
+ }
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function RIWritePMREntryKM
+
+ @Description
+ Writes a new Resource Information list entry.
+ The new entry will be inserted at the head of the list of
+ PMR RI entries and assigned the values provided.
+
+ @input hPMR - Reference (handle) to the PMR to which this reference relates
+ @input ai8TextA - String describing this PMR (may be null)
+ @input uiLogicalSize - Size of PMR
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWritePMREntryKM(PMR *hPMR,
+ IMG_UINT32 ui32TextASize,
+ const IMG_CHAR *psz8TextA,
+ IMG_UINT64 ui64LogicalSize)
+{
+ uintptr_t hashData = 0;
+ PMR *pPMRHashKey = hPMR;
+ IMG_PCHAR pszText = (IMG_PCHAR)psz8TextA;
+ RI_LIST_ENTRY *psRIEntry = NULL;
+
+
+ /* if Hash table has not been created, create it now */
+ if (!g_pRIHashTable)
+ {
+ g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
+ g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp);
+ }
+ if (!g_pRIHashTable || !g_pProcHashTable)
+ {
+ /* Error - no memory to allocate for Hash table(s) */
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ if (!hPMR)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ else
+ {
+ /* Acquire RI Lock */
+ _RILock();
+
+ /* look-up hPMR in Hash Table */
+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+ psRIEntry = (RI_LIST_ENTRY *)hashData;
+ if (!psRIEntry)
+ {
+ /*
+ * If failed to find a matching existing entry, create a new one
+ */
+ psRIEntry = (RI_LIST_ENTRY *)OSAllocZMem(sizeof(RI_LIST_ENTRY));
+ if (!psRIEntry)
+ {
+ /* Release RI Lock */
+ _RIUnlock();
+ /* Error - no memory to allocate for new RI entry */
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ else
+ {
+ /*
+ * Add new RI Entry
+ */
+ if (g_ui16RICount == 0)
+ {
+ /* Initialise PMR entry linked-list head */
+ dllist_init(&sListFirst);
+ }
+ g_ui16RICount++;
+
+ dllist_init (&(psRIEntry->sSubListFirst));
+ psRIEntry->ui16SubListCount = 0;
+ psRIEntry->ui16MaxSubListCount = 0;
+ psRIEntry->valid = _VALID_RI_LIST_ENTRY;
+ psRIEntry->pid = OSGetCurrentClientProcessIDKM();
+ OSSNPrintf((IMG_CHAR *)psRIEntry->ai8ProcName, TASK_COMM_LEN, "%s", OSGetCurrentClientProcessNameKM());
+ /* Add PMR entry to linked-list of PMR entries */
+ dllist_init (&(psRIEntry->sListNode));
+ dllist_add_to_tail(&sListFirst,(PDLLIST_NODE)&(psRIEntry->sListNode));
+ }
+
+ if (pszText)
+ {
+ if (ui32TextASize > RI_MAX_TEXT_LEN)
+ ui32TextASize = RI_MAX_TEXT_LEN;
+
+ /* copy ai8TextA field data */
+ OSSNPrintf((IMG_CHAR *)psRIEntry->ai8TextA, ui32TextASize+1, "%s", pszText);
+
+ /* ensure string is NUL-terminated */
+ psRIEntry->ai8TextA[ui32TextASize] = '\0';
+ }
+ else
+ {
+ /* ensure string is NUL-terminated */
+ psRIEntry->ai8TextA[0] = '\0';
+ }
+ psRIEntry->hPMR = hPMR;
+ psRIEntry->ui64LogicalSize = ui64LogicalSize;
+
+ /* Create index entry in Hash Table */
+ HASH_Insert_Extended (g_pRIHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry);
+
+ /* Store phRIHandle in PMR structure, so it can delete the associated RI entry when it destroys the PMR */
+ PMRStoreRIHandle(hPMR, psRIEntry);
+ }
+ /* Release RI Lock */
+ _RIUnlock();
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIWriteMEMDESCEntryKM
+
+ @Description
+ Writes a new Resource Information sublist entry.
+ The new entry will be inserted at the head of the sublist of
+ the indicated PMR list entry, and assigned the values provided.
+
+ @input hPMR - Reference (handle) to the PMR to which this MEMDESC RI entry relates
+ @input ai8TextB - String describing this secondary reference (may be null)
+ @input uiOffset - Offset from the start of the PMR at which this allocation begins
+ @input uiSize - Size of this allocation
+ @input ui64BackedSize - How much of uiSize is actually physically backed?
+ @input bIsImport - Flag indicating if this is an allocation or an import
+ @input bIsExportable - Flag indicating if this allocation is exportable
+ @output phRIHandle - Handle to the created RI entry
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *hPMR,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *psz8TextB,
+ IMG_UINT64 ui64Offset,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64BackedSize,
+ IMG_BOOL bIsImport,
+ IMG_BOOL bIsExportable,
+ RI_HANDLE *phRIHandle)
+{
+ uintptr_t hashData = 0;
+ PMR *pPMRHashKey = hPMR;
+ IMG_PID pid;
+ IMG_PCHAR pszText = (IMG_PCHAR)psz8TextB;
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+
+ /* check Hash tables have been created (meaning at least one PMR has been defined) */
+ if (!g_pRIHashTable || !g_pProcHashTable)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (!hPMR || !phRIHandle)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ else
+ {
+ /* Acquire RI Lock */
+ _RILock();
+
+ *phRIHandle = NULL;
+
+ /* look-up hPMR in Hash Table */
+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+ psRIEntry = (RI_LIST_ENTRY *)hashData;
+ if (!psRIEntry)
+ {
+ /* Release RI Lock */
+ _RIUnlock();
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMem(sizeof(RI_SUBLIST_ENTRY));
+ if (!psRISubEntry)
+ {
+ /* Release RI Lock */
+ _RIUnlock();
+ /* Error - no memory to allocate for new RI sublist entry */
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ else
+ {
+ /*
+ * Insert new entry in sublist
+ */
+ PDLLIST_NODE currentNode = dllist_get_next_node(&(psRIEntry->sSubListFirst));
+
+ /*
+ * Insert new entry before currentNode
+ */
+ if (!currentNode)
+ {
+ currentNode = &(psRIEntry->sSubListFirst);
+ }
+ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sListNode));
+
+ psRISubEntry->psRI = psRIEntry;
+
+ /* Increment number of entries in sublist */
+ psRIEntry->ui16SubListCount++;
+ if (psRIEntry->ui16SubListCount > psRIEntry->ui16MaxSubListCount)
+ {
+ psRIEntry->ui16MaxSubListCount = psRIEntry->ui16SubListCount;
+ }
+ psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
+ }
+
+ psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
+
+ if (ui32TextBSize > RI_MAX_TEXT_LEN)
+ ui32TextBSize = RI_MAX_TEXT_LEN;
+ /* copy ai8TextB field data */
+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, ui32TextBSize+1, "%s", pszText);
+ /* ensure string is NUL-terminated */
+ psRISubEntry->ai8TextB[ui32TextBSize] = '\0';
+
+ psRISubEntry->ui64Offset = ui64Offset;
+ psRISubEntry->ui64Size = ui64Size;
+ psRISubEntry->ui64BackedSize = ui64BackedSize;
+ psRISubEntry->bIsImport = bIsImport;
+ psRISubEntry->bIsExportable = bIsExportable;
+ psRISubEntry->bIsPinned = IMG_TRUE;
+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, TASK_COMM_LEN, "%s", OSGetCurrentClientProcessNameKM());
+ dllist_init (&(psRISubEntry->sProcListNode));
+
+ /*
+ * Now insert this MEMDESC into the proc list
+ */
+ /* look-up pid in Hash Table */
+ pid = psRISubEntry->pid;
+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+ if (!hashData)
+ {
+ /*
+ * No allocations for this pid yet
+ */
+ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode));
+ /* Increment number of entries in proc hash table */
+ g_ui16ProcCount++;
+ }
+ else
+ {
+ /*
+ * Insert allocation into pid allocations linked list
+ */
+ PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData;
+
+ /*
+ * Insert new entry
+ */
+ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode));
+ }
+ *phRIHandle = (RI_HANDLE)psRISubEntry;
+ /* Release RI Lock */
+ _RIUnlock();
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIWriteProcListEntryKM
+
+ @Description
+ Write a new entry in the process list directly. We have to do this
+ because there might be no, multiple or changing PMR handles.
+
+ In the common case we have a PMR that will be added to the PMR list
+ and one or several MemDescs that are associated to it in a sub-list.
+ Additionally these MemDescs will be inserted in the per-process list.
+
+ There might be special descriptors from e.g. new user APIs that
+ are associated with no or multiple PMRs and not just one.
+ These can be now added to the per-process list (as RI_SUBLIST_ENTRY)
+ directly with this function and won't be listed in the PMR list (RIEntry)
+ because there might be no PMR.
+
+ To remove entries from the per-process list, just use
+ RIDeleteMEMDESCEntryKM().
+
+ @input ai8TextB - String describing this secondary reference (may be null)
+ @input uiSize - Size of this allocation
+ @input ui64BackedSize - How much of uiSize is actually physically backed?
+ @input ui64DevVAddr - Virtual address of this entry
+ @output phRIHandle - Handle to the created RI entry
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *psz8TextB,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64BackedSize,
+ IMG_UINT64 ui64DevVAddr,
+ RI_HANDLE *phRIHandle)
+{
+ uintptr_t hashData = 0;
+ IMG_PID pid;
+ IMG_PCHAR pszText = (IMG_PCHAR)psz8TextB;
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+ if (!g_pRIHashTable)
+ {
+ g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
+ g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp);
+
+ if (!g_pRIHashTable || !g_pProcHashTable)
+ {
+ /* Error - no memory to allocate for Hash table(s) */
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ /* Acquire RI Lock */
+ _RILock();
+
+ *phRIHandle = NULL;
+
+ psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMem(sizeof(RI_SUBLIST_ENTRY));
+ if (!psRISubEntry)
+ {
+ /* Release RI Lock */
+ _RIUnlock();
+ /* Error - no memory to allocate for new RI sublist entry */
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
+
+
+ psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
+
+ if (ui32TextBSize > RI_MAX_TEXT_LEN)
+ ui32TextBSize = RI_MAX_TEXT_LEN;
+ /* copy ai8TextB field data */
+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, ui32TextBSize+1, "%s", pszText);
+ /* ensure string is NUL-terminated */
+ psRISubEntry->ai8TextB[ui32TextBSize] = '\0';
+
+ psRISubEntry->ui64Offset = 0;
+ psRISubEntry->ui64Size = ui64Size;
+ psRISubEntry->ui64BackedSize = ui64BackedSize;
+ psRISubEntry->sVAddr.uiAddr = ui64DevVAddr;
+ psRISubEntry->bIsImport = IMG_FALSE;
+ psRISubEntry->bIsExportable = IMG_FALSE;
+ psRISubEntry->bIsPinned = IMG_TRUE;
+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, TASK_COMM_LEN, "%s", OSGetCurrentClientProcessNameKM());
+ dllist_init (&(psRISubEntry->sProcListNode));
+
+ /*
+ * Now insert this MEMDESC into the proc list
+ */
+ /* look-up pid in Hash Table */
+ pid = psRISubEntry->pid;
+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+ if (!hashData)
+ {
+ /*
+ * No allocations for this pid yet
+ */
+ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode));
+ /* Increment number of entries in proc hash table */
+ g_ui16ProcCount++;
+ }
+ else
+ {
+ /*
+ * Insert allocation into pid allocations linked list
+ */
+ PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData;
+
+ /*
+ * Insert new entry
+ */
+ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode));
+ }
+ *phRIHandle = (RI_HANDLE)psRISubEntry;
+ /* Release RI Lock */
+ _RIUnlock();
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIUpdateMEMDESCAddrKM
+
+ @Description
+ Update a Resource Information entry.
+
+ @input hRIHandle - Handle of object whose reference info is to be updated
+ @input uiAddr - New address for the RI entry
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+ IMG_DEV_VIRTADDR sVAddr)
+{
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+ if (!hRIHandle)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+ {
+ /* Pointer does not point to valid structure */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Acquire RI lock*/
+ _RILock();
+
+ psRISubEntry->sVAddr.uiAddr = sVAddr.uiAddr;
+
+ /* Release RI lock */
+ _RIUnlock();
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIUpdateMEMDESCPinningKM
+
+ @Description
+ Update a Resource Information entry.
+
+ @input hRIHandle - Handle of object whose reference info is to be updated
+ @input bIsPinned - The new pinning state
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIUpdateMEMDESCPinningKM(RI_HANDLE hRIHandle,
+ IMG_BOOL bIsPinned)
+{
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+ if (!hRIHandle)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+ {
+ /* Pointer does not point to valid structure */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Acquire RI lock*/
+ _RILock();
+
+ psRISubEntry->bIsPinned = bIsPinned;
+
+ /* Release RI lock */
+ _RIUnlock();
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIUpdateMEMDESCBackingKM
+
+ @Description
+ Update a Resource Information entry.
+
+ @input hRIHandle Handle of object whose reference info is to be updated
+ @input iSizeAdjustment The change of backed physical memory for this
+ allocation in bytes.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIUpdateMEMDESCBackingKM(RI_HANDLE hRIHandle,
+ IMG_INT32 iSizeAdjustment)
+{
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+ if (!hRIHandle)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+ {
+ /* Pointer does not point to valid structure */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Acquire RI lock*/
+ _RILock();
+
+ psRISubEntry->ui64BackedSize += iSizeAdjustment;
+
+ /* Release RI lock */
+ _RIUnlock();
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDeletePMREntryKM
+
+ @Description
+ Delete a Resource Information entry.
+
+ @input hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle)
+{
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ PMR *pPMRHashKey;
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+
+
+ if (!hRIHandle)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ else
+ {
+ psRIEntry = (RI_LIST_ENTRY *)hRIHandle;
+
+ if (psRIEntry->valid != _VALID_RI_LIST_ENTRY)
+ {
+ /* Pointer does not point to valid structure */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if(psRIEntry->ui16SubListCount == 0)
+ {
+ /* Acquire RI lock*/
+ _RILock();
+
+ /* Remove the HASH table index entry */
+ pPMRHashKey = psRIEntry->hPMR;
+ HASH_Remove_Extended(g_pRIHashTable, (void *)&pPMRHashKey);
+
+ psRIEntry->valid = _INVALID;
+
+ /* Remove PMR entry from linked-list of PMR entries */
+ dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode));
+
+ /* Now, free the memory used to store the RI entry */
+ OSFreeMem(psRIEntry);
+ psRIEntry = NULL;
+
+ /* Release RI lock*/
+ _RIUnlock();
+
+ /*
+ * Decrement number of RI entries - if this is now zero,
+ * we can delete the RI hash table
+ */
+ if(--g_ui16RICount == 0)
+ {
+ HASH_Delete(g_pRIHashTable);
+ g_pRIHashTable = NULL;
+ /* If deInit has been deferred, we can now destroy the RI Lock */
+ if (bRIDeInitDeferred)
+ {
+ OSLockDestroy(g_hRILock);
+ }
+ }
+ /*
+ * Make the handle NULL once PMR RI entry is deleted
+ */
+ hRIHandle = NULL;
+ }
+ else
+ {
+ eResult = PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+ }
+ }
+
+ return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDeleteMEMDESCEntryKM
+
+ @Description
+ Delete a Resource Information entry.
+ Entry can be from RIEntry list or ProcList.
+
+ @input hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle)
+{
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+ uintptr_t hashData = 0;
+ IMG_PID pid;
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+
+
+ if (!hRIHandle)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+ {
+ /* Pointer does not point to valid structure */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Acquire RI lock*/
+ _RILock();
+
+ /* For entries which do have a parent PMR remove the node from the sublist */
+ if (psRISubEntry->psRI)
+ {
+ psRIEntry = (RI_LIST_ENTRY *)psRISubEntry->psRI;
+
+ /* Now, remove entry from the sublist */
+ dllist_remove_node(&(psRISubEntry->sListNode));
+ }
+
+ psRISubEntry->valid = _INVALID;
+
+ /* Remove the entry from the proc allocations linked list */
+ pid = psRISubEntry->pid;
+ /* If this is the only allocation for this pid, just remove it from the hash table */
+ if (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL)
+ {
+ HASH_Remove_Extended(g_pProcHashTable, (void *)&pid);
+ /* Decrement number of entries in proc hash table, and delete the hash table if there are now none */
+ if(--g_ui16ProcCount == 0)
+ {
+ HASH_Delete(g_pProcHashTable);
+ g_pProcHashTable = NULL;
+ }
+ }
+ else
+ {
+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+ if ((PDLLIST_NODE)hashData == &(psRISubEntry->sProcListNode))
+ {
+ HASH_Remove_Extended(g_pProcHashTable, (void *)&pid);
+ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)dllist_get_next_node(&(psRISubEntry->sProcListNode)));
+ }
+ }
+ dllist_remove_node(&(psRISubEntry->sProcListNode));
+
+ /* Now, free the memory used to store the sublist entry */
+ OSFreeMem(psRISubEntry);
+ psRISubEntry = NULL;
+
+ /*
+ * Decrement number of entries in sublist if this MemDesc had a parent entry.
+ */
+ if (psRIEntry)
+ {
+ psRIEntry->ui16SubListCount--;
+ }
+
+ /* Release RI lock*/
+ _RIUnlock();
+
+ /*
+ * Make the handle NULL once MEMDESC RI entry is deleted
+ */
+ hRIHandle = NULL;
+
+ return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDeleteListKM
+
+ @Description
+ Delete all Resource Information entries and free associated
+ memory.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteListKM(void)
+{
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+
+
+ if (g_pRIHashTable)
+ {
+ eResult = HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DeleteAllEntries);
+ if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+ {
+ /*
+ * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when
+ * the hash table gets deleted as a result of deleting the final PMR entry,
+ * so this is not a real error condition...
+ */
+ eResult = PVRSRV_OK;
+ }
+ }
+
+ /* After the run through the RIHashTable that holds the PMR entries there might be
+ * still entries left in the per-process hash table because they were added with
+ * RIWriteProcListEntryKM() and have no PMR parent associated.
+ */
+ if (g_pProcHashTable)
+ {
+ eResult = HASH_Iterate(g_pProcHashTable, (HASH_pfnCallback) _DeleteAllProcEntries);
+ if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+ {
+ /*
+ * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when
+ * the hash table gets deleted as a result of deleting the final PMR entry,
+ * so this is not a real error condition...
+ */
+ eResult = PVRSRV_OK;
+ }
+ }
+
+ return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDumpListKM
+
+ @Description
+ Dumps out the contents of the RI List entry for the
+ specified PMR, and all MEMDESC allocation entries
+ in the associated sub linked list.
+ At present, output is directed to Kernel log
+ via PVR_DPF.
+
+ @input hPMR - PMR for which RI entry details are to be output
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpListKM(PMR *hPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Acquire RI lock*/
+ _RILock();
+
+ eError = _DumpList(hPMR,0);
+
+ /* Release RI lock*/
+ _RIUnlock();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIGetListEntryKM
+
+ @Description
+ Returns pointer to a formatted string with details of the specified
+ list entry. If no entry exists (e.g. it may have been deleted
+ since the previous call), NULL is returned.
+
+ @input pid - pid for which RI entry details are to be output
+ @input ppHandle - handle to the entry, if NULL, the first entry will be
+ returned.
+ @output pszEntryString - string to be output for the entry
+ @output hEntry - hEntry will be returned pointing to the next entry
+ (or NULL if there is no next entry)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+ IMG_HANDLE **ppHandle,
+ IMG_CHAR **ppszEntryString)
+{
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+ uintptr_t hashData = 0;
+ IMG_PID hashKey = pid;
+
+ static IMG_CHAR ai8DebugfsSummaryString[RI_MAX_DEBUGFS_ENTRY_LEN+1];
+ static IMG_UINT64 ui64TotalAlloc = 0;
+ static IMG_UINT64 ui64TotalBacked = 0;
+ static IMG_UINT64 ui64TotalImport = 0;
+ static IMG_UINT64 ui64TotalUnpinned = 0;
+ static IMG_BOOL bDisplaySummary = IMG_FALSE;
+ static IMG_BOOL bTerminateNextCall = IMG_FALSE;
+
+ if (bDisplaySummary)
+ {
+ OSSNPrintf((IMG_CHAR *)&ai8DebugfsSummaryString[0],
+ RI_MAX_TEXT_LEN,
+ "Alloc:0x%llx + Imports:0x%llx = Total:0x%llx [Physical: 0x%llx] {Unpinned:0x%llx}\n",
+ (unsigned long long) ui64TotalAlloc,
+ (unsigned long long) ui64TotalImport,
+ (unsigned long long) (ui64TotalAlloc + ui64TotalImport),
+ (unsigned long long) ui64TotalBacked,
+ (unsigned long long) ui64TotalUnpinned);
+
+ *ppszEntryString = &ai8DebugfsSummaryString[0];
+ ui64TotalAlloc = 0;
+ ui64TotalImport = 0;
+ ui64TotalUnpinned = 0;
+ ui64TotalBacked = 0;
+ bTerminateNextCall = IMG_TRUE;
+ bDisplaySummary = IMG_FALSE;
+ return IMG_TRUE;
+ }
+
+ if (bTerminateNextCall)
+ {
+ *ppszEntryString = NULL;
+ *ppHandle = NULL;
+ bTerminateNextCall = IMG_FALSE;
+ return IMG_FALSE;
+ }
+
+ /* Acquire RI lock*/
+ _RILock();
+
+ /* look-up pid in Hash Table, to obtain first entry for pid */
+ hashData = HASH_Retrieve_Extended(g_pProcHashTable, (void *)&hashKey);
+ if (hashData)
+ {
+ if (*ppHandle)
+ {
+ psRISubEntry = (RI_SUBLIST_ENTRY *)*ppHandle;
+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+ {
+ psRISubEntry = NULL;
+ }
+ }
+ else
+ {
+ psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+ {
+ psRISubEntry = NULL;
+ }
+ }
+ }
+
+ if (psRISubEntry)
+ {
+ PDLLIST_NODE psNextProcListNode = dllist_get_next_node(&psRISubEntry->sProcListNode);
+
+ if (psNextProcListNode == NULL ||
+ psNextProcListNode == (PDLLIST_NODE)hashData)
+ {
+ bDisplaySummary = IMG_TRUE;
+ }
+
+
+ ui64TotalBacked += psRISubEntry->ui64BackedSize;
+
+ if (psRISubEntry->bIsImport)
+ {
+ /* If it is a local import we set backed size to 0
+ * so we don't account twice for the same allocation */
+ ui64TotalImport += psRISubEntry->ui64BackedSize;
+ }
+ else
+ {
+ ui64TotalAlloc += psRISubEntry->ui64Size;
+ }
+
+
+ if (!psRISubEntry->bIsPinned)
+ {
+ ui64TotalUnpinned += psRISubEntry->ui64Size;
+ }
+
+ _GenerateMEMDESCEntryString(psRISubEntry,
+ IMG_TRUE,
+ RI_MAX_DEBUGFS_ENTRY_LEN,
+ (IMG_CHAR *)&ai8DebugfsSummaryString);
+ ai8DebugfsSummaryString[RI_MAX_DEBUGFS_ENTRY_LEN] = '\0';
+
+ *ppszEntryString = (IMG_CHAR *)&ai8DebugfsSummaryString;
+ *ppHandle = (IMG_HANDLE)IMG_CONTAINER_OF(psNextProcListNode, RI_SUBLIST_ENTRY, sProcListNode);
+
+ }
+ else
+ {
+ bDisplaySummary = IMG_TRUE;
+ if (ui64TotalAlloc == 0)
+ {
+ ai8DebugfsSummaryString[0] = '\0';
+ *ppszEntryString = (IMG_CHAR *)&ai8DebugfsSummaryString;
+ }
+ }
+
+ /* Release RI lock*/
+ _RIUnlock();
+
+ return IMG_TRUE;
+}
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry,
+ IMG_BOOL bDebugFs,
+ IMG_UINT16 ui16MaxStrLen,
+ IMG_CHAR *pszEntryString)
+{
+ IMG_CHAR szProc[RI_PROC_TAG_CHAR_LEN];
+ IMG_CHAR szImport[RI_IMPORT_TAG_CHAR_LEN];
+ IMG_PCHAR pszAnnotationText = NULL;
+
+ if (!bDebugFs)
+ {
+ /* we don't include process ID info for debugfs output */
+ OSSNPrintf( (IMG_CHAR *)&szProc,
+ RI_PROC_TAG_CHAR_LEN,
+ "[%d: %s]",
+ psRISubEntry->pid,
+ (IMG_CHAR *)psRISubEntry->ai8ProcName);
+ }
+ if (psRISubEntry->bIsImport)
+ {
+ OSSNPrintf( (IMG_CHAR *)&szImport,
+ RI_IMPORT_TAG_CHAR_LEN,
+ "{Import from PID %d}",
+ psRISubEntry->psRI->pid);
+ /* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+ pszAnnotationText = (IMG_PCHAR)psRISubEntry->psRI->ai8TextA;
+ }
+ else
+ {
+ if (psRISubEntry->bIsExportable)
+ {
+ /* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+ pszAnnotationText = (IMG_PCHAR)psRISubEntry->psRI->ai8TextA;
+ }
+ else
+ {
+ /* Set pszAnnotationText to that of the MEMDESC RI entry */
+ pszAnnotationText = (IMG_PCHAR)psRISubEntry->ai8TextB;
+ }
+ }
+
+
+ OSSNPrintf(pszEntryString,
+ ui16MaxStrLen,
+ "%s 0x%010llx\t%-80s %s\t0x%010llx [0x%010llx] %s%s%c",
+ (bDebugFs ? "" : " "),
+ (unsigned long long) (psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset),
+ pszAnnotationText,
+ (bDebugFs ? "" : (char *)szProc),
+ (unsigned long long) psRISubEntry->ui64Size,
+ (unsigned long long) psRISubEntry->ui64BackedSize,
+ (psRISubEntry->bIsImport ? (char *)&szImport : ""),
+ (psRISubEntry->bIsPinned ? "" : "{Unpinned}"),
+ (bDebugFs ? '\n' : ' '));
+}
+
+/*!
+******************************************************************************
+
+ @Function _DumpList
+ @Description
+ Dumps out RI List entries according to parameters passed.
+
+ @input hPMR - If not NULL, function will output the RI entries for
+ the specified PMR only
+ @input pid - If non-zero, the function will only output MEMDESC RI
+ entries made by the process with ID pid.
+ If zero, all MEMDESC RI entries will be output.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR _DumpList(PMR *hPMR, IMG_PID pid)
+{
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+ IMG_UINT16 ui16SubEntriesParsed = 0;
+ uintptr_t hashData = 0;
+ IMG_PID hashKey;
+ PMR *pPMRHashKey = hPMR;
+ IMG_BOOL bDisplayedThisPMR = IMG_FALSE;
+
+
+ if (!hPMR)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (g_pRIHashTable && g_pProcHashTable)
+ {
+ if (pid != 0)
+ {
+ /* look-up pid in Hash Table */
+ hashKey = pid;
+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey);
+ if (hashData)
+ {
+ psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+ if (psRISubEntry)
+ {
+ psRIEntry = psRISubEntry->psRI;
+ }
+ }
+ }
+ else
+ {
+ /* look-up hPMR in Hash Table */
+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+ psRIEntry = (RI_LIST_ENTRY *)hashData;
+ }
+ if (!psRIEntry)
+ {
+ /* No entry found in hash table */
+ return PVRSRV_ERROR_NOT_FOUND;
+ }
+ while (psRIEntry)
+ {
+ bDisplayedThisPMR = IMG_FALSE;
+ /* Output details for RI entry */
+ if (!pid)
+ {
+ _RIOutput (("%s (0x%p) suballocs:%d size:0x%llx",
+ psRIEntry->ai8TextA,
+ psRIEntry->hPMR,
+ (IMG_UINT)psRIEntry->ui16SubListCount,
+ (unsigned long long)psRIEntry->ui64LogicalSize));
+ bDisplayedThisPMR = IMG_TRUE;
+ }
+ ui16SubEntriesParsed = 0;
+ if(psRIEntry->ui16SubListCount)
+ {
+#if _DUMP_LINKEDLIST_INFO
+ _RIOutput (("RI LIST: {sSubListFirst.psNextNode:0x%x}",
+ (IMG_UINT)psRIEntry->sSubListFirst.psNextNode));
+#endif /* _DUMP_LINKEDLIST_INFO */
+ if (!pid)
+ {
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)),
+ RI_SUBLIST_ENTRY, sListNode);
+ }
+ /* Traverse RI sublist and output details for each entry */
+ while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount))
+ {
+ if (!bDisplayedThisPMR)
+ {
+ _RIOutput (("%s (0x%p) suballocs:%d size:0x%llx",
+ psRIEntry->ai8TextA,
+ psRIEntry->hPMR,
+ (IMG_UINT)psRIEntry->ui16SubListCount,
+ (unsigned long long)psRIEntry->ui64LogicalSize));
+ bDisplayedThisPMR = IMG_TRUE;
+ }
+#if _DUMP_LINKEDLIST_INFO
+ _RIOutput (("RI LIST: [this subentry:0x%x]",(IMG_UINT)psRISubEntry));
+ _RIOutput (("RI LIST: psRI:0x%x",(IMG_UINT32)psRISubEntry->psRI));
+#endif /* _DUMP_LINKEDLIST_INFO */
+
+ {
+ IMG_CHAR szEntryString[RI_MAX_MEMDESC_RI_ENTRY_LEN];
+
+ _GenerateMEMDESCEntryString(psRISubEntry,
+ IMG_FALSE,
+ RI_MAX_MEMDESC_RI_ENTRY_LEN,
+ (IMG_CHAR *)&szEntryString);
+ szEntryString[RI_MAX_MEMDESC_RI_ENTRY_LEN-1] = '\0';
+ _RIOutput (("%s",(IMG_CHAR *)&szEntryString));
+ }
+
+ if (pid)
+ {
+ if((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == 0) ||
+ (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+ {
+ psRISubEntry = NULL;
+ }
+ else
+ {
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+ RI_SUBLIST_ENTRY, sProcListNode);
+ if (psRISubEntry)
+ {
+ if (psRIEntry != psRISubEntry->psRI)
+ {
+ /*
+ * The next MEMDESC in the process linked list is in a different PMR
+ */
+ psRIEntry = psRISubEntry->psRI;
+ bDisplayedThisPMR = IMG_FALSE;
+ }
+ }
+ }
+ }
+ else
+ {
+ ui16SubEntriesParsed++;
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)),
+ RI_SUBLIST_ENTRY, sListNode);
+ }
+ }
+ }
+ if (!pid)
+ {
+ if (ui16SubEntriesParsed != psRIEntry->ui16SubListCount)
+ {
+ /*
+ * Output error message as sublist does not contain the
+ * number of entries indicated by sublist count
+ */
+ _RIOutput (("RI ERROR: RI sublist contains %d entries, not %d entries",
+ ui16SubEntriesParsed,psRIEntry->ui16SubListCount));
+ }
+ else if (psRIEntry->ui16SubListCount && !dllist_get_next_node(&(psRIEntry->sSubListFirst)))
+ {
+ /*
+ * Output error message as sublist is empty but sublist count
+ * is not zero
+ */
+ _RIOutput (("RI ERROR: ui16SubListCount=%d for empty RI sublist",
+ psRIEntry->ui16SubListCount));
+ }
+ }
+ psRIEntry = NULL;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDumpAllKM
+
+ @Description
+ Dumps out the contents of all RI List entries (i.e. for all
+ MEMDESC allocations for each PMR).
+ At present, output is directed to Kernel log
+ via PVR_DPF.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpAllKM(void)
+{
+ if (g_pRIHashTable)
+ {
+ return HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DumpAllEntries);
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDumpProcessKM
+
+ @Description
+ Dumps out the contents of all MEMDESC RI List entries (for every
+ PMR) which have been allocate by the specified process only.
+ At present, output is directed to Kernel log
+ via PVR_DPF.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 dummyPMR;
+
+ if (g_pProcHashTable)
+ {
+ /* Acquire RI lock*/
+ _RILock();
+
+ eError = _DumpList((PMR *)&dummyPMR,pid);
+
+ /* Release RI lock*/
+ _RIUnlock();
+ }
+ return eError;
+}
+
+#if defined(DEBUG)
+/*!
+******************************************************************************
+
+ @Function _DumpList
+ @Description
+ Dumps out RI List entries according to parameters passed.
+
+ @input hPMR - If not NULL, function will output the RI entries for
+ the specified PMR only
+ @input pid - If non-zero, the function will only output MEMDESC RI
+ entries made by the process with ID pid.
+ If zero, all MEMDESC RI entries will be output.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR _DumpProcessList(PMR *hPMR,
+ IMG_PID pid,
+ IMG_UINT64 ui64Offset,
+ IMG_DEV_VIRTADDR *psDevVAddr)
+{
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+ IMG_UINT16 ui16SubEntriesParsed = 0;
+ uintptr_t hashData = 0;
+ PMR *pPMRHashKey = hPMR;
+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ psDevVAddr->uiAddr = 0;
+
+ if (!hPMR)
+ {
+ /* NULL handle provided */
+ return eError;
+ }
+
+ if (g_pRIHashTable && g_pProcHashTable)
+ {
+ PVR_ASSERT(hPMR && pid);
+
+ /* look-up hPMR in Hash Table */
+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+ psRIEntry = (RI_LIST_ENTRY *)hashData;
+
+ if (!psRIEntry)
+ {
+ /* No entry found in hash table */
+ return PVRSRV_ERROR_NOT_FOUND;
+ }
+
+ if (psRIEntry->ui16SubListCount)
+ {
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)),
+ RI_SUBLIST_ENTRY, sListNode);
+
+ /* Traverse RI sublist and output details for each entry */
+ while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount))
+ {
+ if (pid == psRISubEntry->pid)
+ {
+ IMG_UINT64 ui64StartOffset = psRISubEntry->ui64Offset;
+ IMG_UINT64 ui64EndOffset = psRISubEntry->ui64Offset + psRISubEntry->ui64Size;
+
+ if (ui64Offset >= ui64StartOffset && ui64Offset < ui64EndOffset)
+ {
+ psDevVAddr->uiAddr = psRISubEntry->sVAddr.uiAddr;
+ return PVRSRV_OK;
+ }
+ }
+
+ ui16SubEntriesParsed++;
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)),
+ RI_SUBLIST_ENTRY, sListNode);
+ }
+ }
+ }
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDumpProcessListKM
+
+ @Description
+ Dumps out selected contents of all MEMDESC RI List entries (for a
+ PMR) which have been allocate by the specified process only.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpProcessListKM(PMR *hPMR,
+ IMG_PID pid,
+ IMG_UINT64 ui64Offset,
+ IMG_DEV_VIRTADDR *psDevVAddr)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (g_pProcHashTable)
+ {
+ /* Acquire RI lock*/
+ _RILock();
+
+ eError = _DumpProcessList(hPMR,
+ pid,
+ ui64Offset,
+ psDevVAddr);
+
+ /* Release RI lock*/
+ _RIUnlock();
+ }
+
+ return eError;
+}
+#endif
+
+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v)
+{
+ RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+
+ PVR_UNREFERENCED_PARAMETER (k);
+
+ return RIDumpListKM(psRIEntry->hPMR);
+}
+
+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v)
+{
+ RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+ RI_SUBLIST_ENTRY *psRISubEntry;
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER (k);
+
+ while ((eResult == PVRSRV_OK) && (psRIEntry->ui16SubListCount > 0))
+ {
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), RI_SUBLIST_ENTRY, sListNode);
+ eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE)psRISubEntry);
+ }
+ if (eResult == PVRSRV_OK)
+ {
+ eResult = RIDeletePMREntryKM((RI_HANDLE)psRIEntry);
+ /*
+ * If we've deleted the Hash table, return
+ * an error to stop the iterator...
+ */
+ if (!g_pRIHashTable)
+ {
+ eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ }
+ }
+ return eResult;
+}
+
+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v)
+{
+ RI_SUBLIST_ENTRY *psRISubEntry = (RI_SUBLIST_ENTRY *)v;
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER (k);
+
+ eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE) psRISubEntry);
+ if (eResult == PVRSRV_OK && !g_pProcHashTable)
+ {
+ /*
+ * If we've deleted the Hash table, return
+ * an error to stop the iterator...
+ */
+ eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ }
+
+ return eResult;
+}
+
+#endif /* if defined(PVR_RI_DEBUG) */
--- /dev/null
+/*************************************************************************/ /*!
+@File ri_server.h
+@Title Resource Information abstraction
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Resource Information (RI) functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RI_SERVER_H_
+#define _RI_SERVER_H_
+
+#include <img_defs.h>
+#include <ri_typedefs.h>
+#include <pmr.h>
+#include <pvrsrv_error.h>
+
+PVRSRV_ERROR RIInitKM(void);
+void RIDeInitKM(void);
+
+PVRSRV_ERROR RIWritePMREntryKM(PMR *hPMR,
+ IMG_UINT32 ui32TextASize,
+ const IMG_CHAR ai8TextA[RI_MAX_TEXT_LEN+1],
+ IMG_UINT64 uiLogicalSize);
+
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *hPMR,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR ai8TextB[RI_MAX_TEXT_LEN+1],
+ IMG_UINT64 uiOffset,
+ IMG_UINT64 uiSize,
+ IMG_UINT64 uiBackedSize,
+ IMG_BOOL bIsImport,
+ IMG_BOOL bIsExportable,
+ RI_HANDLE *phRIHandle);
+
+PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *psz8TextB,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 uiBackedSize,
+ IMG_UINT64 ui64DevVAddr,
+ RI_HANDLE *phRIHandle);
+
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+ IMG_DEV_VIRTADDR sVAddr);
+
+PVRSRV_ERROR RIUpdateMEMDESCPinningKM(RI_HANDLE hRIHandle,
+ IMG_BOOL bIsPinned);
+
+PVRSRV_ERROR RIUpdateMEMDESCBackingKM(RI_HANDLE hRIHandle,
+ IMG_INT32 iNumModified);
+
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle);
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle);
+
+PVRSRV_ERROR RIDeleteListKM(void);
+
+PVRSRV_ERROR RIDumpListKM(PMR *hPMR);
+
+PVRSRV_ERROR RIDumpAllKM(void);
+
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid);
+
+#if defined(DEBUG)
+PVRSRV_ERROR RIDumpProcessListKM(PMR *hPMR,
+ IMG_PID pid,
+ IMG_UINT64 ui64Offset,
+ IMG_DEV_VIRTADDR *psDevVAddr);
+#endif
+
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+ IMG_HANDLE **ppHandle,
+ IMG_CHAR **ppszEntryString);
+
+#endif /* #ifndef _RI_SERVER_H _*/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Resource Information (RI) Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Client side part of RI management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RI_TYPEDEFS_H
+#define RI_TYPEDEFS_H
+
+#include "img_types.h"
+
+#define RI_MAX_TEXT_LEN 96
+
+typedef struct RI_SUBLIST_ENTRY RI_ENTRY;
+typedef RI_ENTRY* RI_HANDLE;
+
+#endif /* #ifndef RI_TYPEDEFS_H */
+
--- /dev/null
+########################################################################### ###
+#@File
+#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+pvrsrvkm-y += \
+ services/system/$(PVR_SYSTEM)/rk_init.o \
+ services/system/$(PVR_SYSTEM)/sysconfig.o \
+ services/system/common/env/linux/interrupt_support.o
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RK Initialisation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Initialisation routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(SUPPORT_ION)
+#include "ion_sys.h"
+#endif /* defined(SUPPORT_ION) */
+#include "rk_init.h"
+
+#include <linux/hardirq.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/rk_fb.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/rockchip/dvfs.h>
+#include <linux/rockchip/common.h>
+#include <linux/workqueue.h>
+#include <linux/clkdev.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/freezer.h>
+#include <linux/sched/rt.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+#include <linux/clk-private.h>
+#else
+#include <linux/clk-provider.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_opp.h>
+#endif
+#include "power.h"
+#include "rgxinit.h"
+#include <asm/compiler.h>
+
+static struct rk_context *g_platform = NULL;
+
+#if RK_TF_VERSION
+#define PSCI_RKSIP_TF_VERSION (0x82000001)
+
+static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
+ u64 arg2)
+{
+ asm volatile(
+ __asmeq("%0", "x0")
+ __asmeq("%1", "x1")
+ __asmeq("%2", "x2")
+ __asmeq("%3", "x3")
+ "smc #0\n"
+ : "+r" (function_id)
+ : "r" (arg0), "r" (arg1), "r" (arg2));
+
+ return function_id;
+}
+
+static int (*invoke_psci_fn)(u64, u64 , u64, u64) = __invoke_psci_fn_smc;
+
+static int rk_tf_get_version(void)
+{
+ int ver_num;
+
+ ver_num = invoke_psci_fn(PSCI_RKSIP_TF_VERSION, 0, 0, 0);
+
+ return ver_num;
+}
+
+static int rk_tf_check_version(void)
+{
+ int version = 0;
+ int high_16 = 0;
+ int low_16 = 0;
+ IMG_PINT pNULL = NULL;
+
+ version = rk_tf_get_version();
+ high_16 = (version >> 16) & ~(0xFFFF << 16);
+ low_16 = (version & ~(0xFFFF << 16));
+
+ printk("raw version=0x%x,rk_tf_version=%x.%x\n", version, high_16, low_16);
+
+ if ((version != 0xFFFFFFFF) && (high_16 >= 1) && (low_16 >= 3)) {
+ return 0;
+ } else {
+ printk("Error:%s-line:%d This version can't support rk3328\n", __func__, __LINE__);
+ *pNULL = 0; /*crash system*/
+ return -1;
+ }
+}
+
+#endif
+
+#if RK33_DVFS_SUPPORT
+#define gpu_temp_limit 110
+#define gpu_temp_statis_time 1
+#define level0_threshold_min 0
+#define level0_threshold_max 40
+#define levelf_threshold_max 100
+#define level0_coef_max 95
+
+static IMG_UINT32 div_dvfs = 0 ;
+
+/*dvfs status*/
+static struct workqueue_struct *rgx_dvfs_wq;
+spinlock_t rgx_dvfs_spinlock;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+static struct cpufreq_frequency_table *rgx_freq_table = NULL;
+#endif
+#endif
+static IMG_HANDLE ghGpuUtilDvfs = NULL;
+
+/* voltage clock min_threshold max_threshold time */
+static rgx_dvfs_info rgx_dvfs_infotbl[] = {
+ {925, 100, 0, 70, 0, 100},
+ {925, 160, 50, 65, 0, 95},
+ {1025, 266, 60, 78, 0, 90},
+ {1075, 350, 65, 75, 0, 85},
+ {1125, 400, 70, 75, 0, 80},
+ {1200, 500, 90, 100, 0, 75},
+};
+rgx_dvfs_info *p_rgx_dvfs_infotbl = rgx_dvfs_infotbl;
+unsigned int RGX_DVFS_STEP = ARRAY_SIZE(rgx_dvfs_infotbl);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+static int rk33_clk_set_normal_node(struct clk* node, unsigned long rate)
+{
+ int ret = 0;
+
+ if (!node) {
+ printk("rk33_clk_set_normal_node error \r\n");
+ ret = -1;
+ }
+ ret = clk_set_rate(node, rate * ONE_MHZ);
+ if (ret)
+ printk("clk_set_rate error \r\n");
+
+ return ret;
+}
+
+static int rk33_clk_set_dvfs_node(struct dvfs_node *node, unsigned long rate)
+{
+ int ret = 0;
+
+ if (!node) {
+ printk("rk33_clk_set_dvfs_node error \r\n");
+ ret = -1;
+ }
+ ret = dvfs_clk_set_rate(node, rate * ONE_MHZ);
+ if (ret)
+ printk("dvfs_clk_set_rate error \r\n");
+
+ return ret;
+}
+#endif
+#if RK33_DVFS_SUPPORT
+#define dividend 7
+#define fix_float(a) ((((a)*dividend)%10)?((((a)*dividend)/10)+1):(((a)*dividend)/10))
+static IMG_BOOL calculate_dvfs_max_min_threshold(IMG_UINT32 level)
+{
+ IMG_UINT32 pre_level;
+ IMG_UINT32 tmp;
+
+ if (level == 0) {
+ if ((RGX_DVFS_STEP - 1) == level) {
+ rgx_dvfs_infotbl[level].min_threshold = level0_threshold_min;
+ rgx_dvfs_infotbl[level].max_threshold = levelf_threshold_max;
+ } else {
+ rgx_dvfs_infotbl[level].min_threshold = level0_threshold_min;
+ rgx_dvfs_infotbl[level].max_threshold = level0_threshold_max;
+ }
+#if RK33_USE_CL_COUNT_UTILS
+ rgx_dvfs_infotbl[level].coef = level0_coef_max;
+#endif
+ } else {
+ pre_level = level - 1;
+ if ((RGX_DVFS_STEP - 1) == level) {
+ rgx_dvfs_infotbl[level].max_threshold = levelf_threshold_max;
+ } else {
+ rgx_dvfs_infotbl[level].max_threshold = rgx_dvfs_infotbl[pre_level].max_threshold + div_dvfs;
+ }
+ rgx_dvfs_infotbl[level].min_threshold = (rgx_dvfs_infotbl[pre_level].max_threshold * (rgx_dvfs_infotbl[pre_level].clock))
+ / (rgx_dvfs_infotbl[level].clock);
+
+ tmp = rgx_dvfs_infotbl[level].max_threshold - rgx_dvfs_infotbl[level].min_threshold;
+
+ rgx_dvfs_infotbl[level].min_threshold += fix_float(tmp);
+#if RK33_USE_CL_COUNT_UTILS
+ rgx_dvfs_infotbl[level].coef = (rgx_dvfs_infotbl[pre_level].clock * rgx_dvfs_infotbl[pre_level].coef + 2000)
+ / (rgx_dvfs_infotbl[level].clock);
+#endif
+ }
+
+#if 1
+ printk("rgx_dvfs_infotbl[%d].clock=%d,min_threshold=%d,max_threshold=%d,coef=%d\n", level,
+ rgx_dvfs_infotbl[level].clock,
+ rgx_dvfs_infotbl[level].min_threshold,
+ rgx_dvfs_infotbl[level].max_threshold,
+ rgx_dvfs_infotbl[level].coef
+ );
+#endif
+ return IMG_TRUE;
+}
+
+#if RK33_DVFS_FREQ_LIMIT
+static int rk33_dvfs_get_freq(int level)
+{
+ if (WARN_ON((level >= RGX_DVFS_STEP) || (level < 0))) {
+ printk("unknown rgx dvfs level:level = %d,set clock not done\n", level);
+ return -1;
+ }
+ return rgx_dvfs_infotbl[level].clock;
+}
+#endif
+
+static int rk33_dvfs_get_level(int freq)
+{
+ int i;
+ for (i = 0; i < RGX_DVFS_STEP; i++) {
+ if (rgx_dvfs_infotbl[i].clock == freq)
+ return i;
+ }
+ return -1;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+static void rk33_dvfs_set_clock(struct rk_context *platform, int freq)
+{
+ unsigned long old_freq;
+#if USE_PVR_SPEED_CHANGE
+ PVRSRV_ERROR err;
+
+ err = PVRSRVDevicePreClockSpeedChange(platform->dev_config->psDevNode, IMG_TRUE, NULL);
+ if (err != PVRSRV_OK) {
+ return;
+ }
+#endif
+
+ if (!platform->aclk_gpu_mem || !platform->aclk_gpu_cfg || !platform->dvfs_enabled) {
+ printk("aclk_gpu_mem or aclk_gpu_cfg not init\n");
+ return;
+ }
+
+ if (!platform->gpu_clk_node && !platform->clk_gpu) {
+ pr_err("%s:clk_gpu & gpu_clk_node is null\n", __func__);
+ return;
+ }
+
+ if (platform->gpu_clk_node)
+ old_freq = clk_get_rate(platform->gpu_clk_node->clk);
+ else if (platform->clk_gpu)
+ old_freq = clk_get_rate(platform->clk_gpu);
+
+ if (old_freq > freq) {
+ if (platform->gpu_clk_node)
+ rk33_clk_set_dvfs_node(platform->gpu_clk_node, freq);
+ else if (platform->clk_gpu)
+ rk33_clk_set_normal_node(platform->clk_gpu, freq);
+ }
+
+ rk33_clk_set_normal_node(platform->aclk_gpu_mem, freq);
+ rk33_clk_set_normal_node(platform->aclk_gpu_cfg, freq);
+
+ if (old_freq < freq) {
+ if (platform->gpu_clk_node)
+ rk33_clk_set_dvfs_node(platform->gpu_clk_node, freq);
+ else if (platform->clk_gpu)
+ rk33_clk_set_normal_node(platform->clk_gpu, freq);
+ }
+
+#if USE_PVR_SPEED_CHANGE
+ PVRSRVDevicePostClockSpeedChange(platform->dev_config->psDevNode, IMG_TRUE, NULL);
+#endif
+}
+
+static void rk33_dvfs_set_level(struct rk_context *platform, int level)
+{
+ static int prev_level = -1;
+
+ if (level == prev_level)
+ return;
+
+ if (WARN_ON((level >= RGX_DVFS_STEP) || (level < 0))) {
+ printk("unknown rgx dvfs level:level = %d,set clock not done\n", level);
+ return ;
+ }
+
+ rk33_dvfs_set_clock(platform, rgx_dvfs_infotbl[level].clock);
+
+ prev_level = level;
+}
+#else
+static void rk33_dvfs_set_level(struct rk_context *platform, int level)
+{
+ static int prev_level = -1;
+ int ret = 0;
+ unsigned int old_freq, new_freq;
+ unsigned int old_volt, new_volt;
+
+ if (NULL == platform)
+ panic("oops");
+
+ if (!platform->dvfs_enabled) {
+ printk("dvfs not enabled\n");
+ return;
+ }
+
+ if (level == prev_level)
+ return;
+
+ if (WARN_ON((level >= RGX_DVFS_STEP) || (level < 0))) {
+ printk("unknown rgx dvfs level:level = %d,set clock not done \n", level);
+ return;
+ }
+
+ old_freq = clk_get_rate(platform->sclk_gpu_core) / ONE_MHZ;
+ new_freq = rgx_dvfs_infotbl[level].clock;
+ old_volt = regulator_get_voltage(platform->gpu_reg);
+ new_volt = rgx_dvfs_infotbl[level].voltage;
+
+#if 0
+ dev_info(&gpsPVRLDMDev->dev, "%d MHz, %d mV --> %d MHz, %d mV\n",
+ old_freq, (old_volt > 0) ? old_volt / 1000 : -1,
+ new_freq, new_volt ? new_volt / 1000 : -1);
+#endif
+
+ if (new_freq > old_freq) {
+ ret = regulator_set_voltage(platform->gpu_reg, new_volt, INT_MAX);
+ if (ret) {
+ PVR_DPF((PVR_DBG_ERROR, "failed to scale voltage up: %d\n", ret));
+ return;
+ }
+ }
+ ret = clk_set_rate(platform->aclk_gpu_mem, new_freq * ONE_MHZ);
+ if (ret) {
+ PVR_DPF((PVR_DBG_ERROR, "failed to set aclk_gpu_mem rate: %d\n", ret));
+ if (old_volt > 0)
+ regulator_set_voltage(platform->gpu_reg, old_volt, INT_MAX);
+ return;
+ }
+ ret = clk_set_rate(platform->aclk_gpu_cfg, new_freq * ONE_MHZ);
+ if (ret) {
+ PVR_DPF((PVR_DBG_ERROR, "failed to set aclk_gpu_cfg rate: %d\n", ret));
+ clk_set_rate(platform->aclk_gpu_mem, old_freq * ONE_MHZ);
+ if (old_volt > 0)
+ regulator_set_voltage(platform->gpu_reg, old_volt, INT_MAX);
+ return;
+ }
+ ret = clk_set_rate(platform->sclk_gpu_core, new_freq * ONE_MHZ);
+ if (ret) {
+ PVR_DPF((PVR_DBG_ERROR, "failed to set sclk_gpu_core rate: %d\n", ret));
+ clk_set_rate(platform->aclk_gpu_mem, old_freq * ONE_MHZ);
+ clk_set_rate(platform->aclk_gpu_cfg, old_freq * ONE_MHZ);
+ if (old_volt > 0)
+ regulator_set_voltage(platform->gpu_reg, old_volt, INT_MAX);
+ return;
+ }
+ if (new_freq < old_freq) {
+ ret =
+ regulator_set_voltage(platform->gpu_reg, new_volt, INT_MAX);
+ if (ret) {
+ PVR_DPF((PVR_DBG_ERROR, "failed to scale voltage down: %d\n", ret));
+ clk_set_rate(platform->aclk_gpu_mem, old_freq * ONE_MHZ);
+ clk_set_rate(platform->aclk_gpu_cfg, old_freq * ONE_MHZ);
+ clk_set_rate(platform->sclk_gpu_core, old_freq * ONE_MHZ);
+ return;
+ }
+ }
+#if 0
+ update_time_in_state(prev_level);
+#endif
+ prev_level = level;
+}
+#endif
+
+static int rk33_dvfs_get_enable_status(struct rk_context *platform)
+{
+ unsigned long flags;
+ int enable;
+
+ spin_lock_irqsave(&platform->timer_lock, flags);
+ enable = platform->timer_active;
+ spin_unlock_irqrestore(&platform->timer_lock, flags);
+
+ return enable;
+}
+
+#if RK33_USE_CL_COUNT_UTILS
+static void rk33_dvfs_event_proc(struct work_struct *w)
+{
+ unsigned long flags;
+ static IMG_UINT32 temp_tmp;
+ IMG_UINT32 fps = 0;
+ IMG_UINT32 fps_limit;
+ IMG_UINT32 policy;
+ IMG_INT32 absload;
+ IMG_INT32 new_index;
+ struct rk_context *platform;
+
+ platform = container_of(w, struct rk_context, rgx_dvfs_work);
+ spin_lock_irqsave(&rgx_dvfs_spinlock, flags);
+
+ if (!rk33_dvfs_get_enable_status(platform)) {
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+ return;
+ }
+
+ fps = rk_get_real_fps(0);
+
+ platform->temperature_time++;
+ /*temp_tmp += rockchip_tsadc_get_temp(2);*/
+ if (platform->temperature_time >= gpu_temp_statis_time) {
+ platform->temperature_time = 0;
+ platform->temperature = temp_tmp / gpu_temp_statis_time;
+ temp_tmp = 0;
+ /*pr_info("platform->temperature = %d\n",platform->temperature);*/
+ }
+
+ platform->abs_load[0] = platform->abs_load[1];
+ platform->abs_load[1] = platform->abs_load[2];
+ platform->abs_load[2] = platform->abs_load[3];
+ platform->abs_load[3] = (platform->utilisation * rgx_dvfs_infotbl[platform->freq_level].clock * rgx_dvfs_infotbl[platform->freq_level].coef) / 100;
+ absload = (platform->abs_load[3] * 4 + platform->abs_load[2] * 3 + platform->abs_load[1] * 2 + platform->abs_load[0]);
+
+ /*policy = rockchip_pm_get_policy();*/
+ policy = ROCKCHIP_PM_POLICY_NORMAL;
+
+ if (ROCKCHIP_PM_POLICY_PERFORMANCE == policy) {
+ platform->freq_level = RGX_DVFS_STEP - 1; /*Highest level when performance mode*/
+ } else if (platform->fix_freq > 0) {
+ platform->freq_level = rk33_dvfs_get_level(platform->fix_freq);
+
+ if (platform->debug_level == DBG_HIGH)
+ printk("fix clock=%d\n", platform->fix_freq);
+ } else {
+ fps_limit = (ROCKCHIP_PM_POLICY_NORMAL == policy) ? LIMIT_FPS : LIMIT_FPS_POWER_SAVE;
+ /*printk("policy : %d , fps_limit = %d\n",policy,fps_limit);*/
+ /*give priority to temperature unless in performance mode */
+ if (platform->temperature > gpu_temp_limit) {
+ if (platform->freq_level > 0)
+ platform->freq_level--;
+
+ if (gpu_temp_statis_time > 1)
+ platform->temperature = 0;
+ } else if (absload == 0 || platform->gpu_active == IMG_FALSE) {
+ platform->freq_level = 0;
+ } else if ((platform->freq_level < RGX_DVFS_STEP - 1) && fps < fps_limit) {
+ /*freq_hint=0 or freq_hint>sRK30_DVFS.u8FreqNums, select freq automatically, find the right index*/
+ for (new_index = 0; new_index < RGX_DVFS_STEP; new_index++) {
+ if (absload <= ((rgx_dvfs_infotbl[new_index].clock) * (rgx_dvfs_infotbl[new_index].coef) * 9)) {
+ if (platform->debug_level == DBG_HIGH)
+ printk("absload=%d,cur_coef[%d]=%d\n", absload, new_index, (rgx_dvfs_infotbl[new_index].clock) * (rgx_dvfs_infotbl[new_index].coef)*9);
+ break;
+ }
+ }
+
+ /*ensure the new_index in the reasonable range*/
+ if (new_index >= RGX_DVFS_STEP)
+ new_index = RGX_DVFS_STEP - 1;
+
+ /*if fps>=50, should not run at the higher frequency*/
+ if (new_index > platform->freq_level && fps >= fps_limit) {
+ new_index = platform->freq_level;
+ } else if (platform->freq_level == RGX_DVFS_STEP - 1 && fps > 53 && absload <= ((rgx_dvfs_infotbl[new_index].clock) * (rgx_dvfs_infotbl[new_index].coef) * 9)) {
+ /*if running at highest frequency & fps>53 & absload<90%, try to run at a lower frequency*/
+ new_index = platform->freq_level - 1;
+ }
+
+ if (platform->debug_level == DBG_HIGH)
+ printk("absload=%d,freq_level=%d,freq=%dM\n", absload, new_index, rgx_dvfs_infotbl[new_index].clock);
+
+ platform->freq_level = new_index;
+ }
+ }
+#if RK33_SYSFS_FILE_SUPPORT && RK33_DVFS_FREQ_LIMIT
+ if ((platform->up_level >= 0) && (platform->freq_level > platform->up_level))
+ platform->freq_level = platform->up_level;
+
+ if ((platform->down_level >= 0) && (platform->freq_level < platform->down_level))
+ platform->freq_level = platform->down_level;
+#endif
+ platform->time_busy = 0;
+ platform->time_idle = 0;
+ platform->utilisation = 0;
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+
+ rk33_dvfs_set_level(platform, platform->freq_level);
+}
+#else
+static void rk33_dvfs_event_proc(struct work_struct *w)
+{
+ unsigned long flags;
+ static int level_down_time;
+ static int level_up_time;
+ static IMG_UINT32 temp_tmp;
+ IMG_UINT32 fps = 0;
+ IMG_UINT32 fps_limit;
+ IMG_UINT32 policy;
+ struct rk_context *platform;
+
+ platform = container_of(w, struct rk_context, rgx_dvfs_work);
+ spin_lock_irqsave(&rgx_dvfs_spinlock, flags);
+
+ if (!rk33_dvfs_get_enable_status(platform)) {
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+ return;
+ }
+
+ fps = rk_get_real_fps(0);
+
+ platform->temperature_time++;
+ /*temp_tmp += rockchip_tsadc_get_temp(2);*/
+ if (platform->temperature_time >= gpu_temp_statis_time) {
+ platform->temperature_time = 0;
+ platform->temperature = temp_tmp / gpu_temp_statis_time;
+ temp_tmp = 0;
+ /*pr_info("platform->temperature = %d\n",platform->temperature);*/
+ }
+
+ /*
+ policy = rockchip_pm_get_policy();
+ */
+ policy = ROCKCHIP_PM_POLICY_NORMAL;
+
+ if (policy == ROCKCHIP_PM_POLICY_PERFORMANCE) {
+ platform->freq_level = RGX_DVFS_STEP - 1; /*Highest level when performance mode*/
+ } else if (platform->fix_freq > 0) {
+ platform->freq_level = rk33_dvfs_get_level(platform->fix_freq);
+
+ if (platform->debug_level == DBG_HIGH)
+ printk("fix clock=%d\n", platform->fix_freq);
+ } else {
+ fps_limit = (policy == ROCKCHIP_PM_POLICY_NORMAL) ? LIMIT_FPS : LIMIT_FPS_POWER_SAVE;
+#if 0
+ printk("policy : %d , fps_limit = %d\n", policy, fps_limit);
+#endif
+
+ /*give priority to temperature unless in performance mode */
+ if (platform->temperature > gpu_temp_limit) {
+ if (platform->freq_level > 0)
+ platform->freq_level--;
+
+ if (gpu_temp_statis_time > 1)
+ platform->temperature = 0;
+ } else if ((platform->utilisation > rgx_dvfs_infotbl[platform->freq_level].max_threshold) && (platform->freq_level < RGX_DVFS_STEP - 1) /*&& fps < fps_limit*/) {
+ level_up_time++;
+ if (level_up_time == RGX_DVFS_LEVEL_INTERVAL) {
+ if (platform->debug_level == DBG_HIGH)
+ printk("up,utilisation=%d,current clock=%d,fps = %d\n", platform->utilisation, rgx_dvfs_infotbl[platform->freq_level].clock, fps);
+
+ platform->freq_level++;
+ level_up_time = 0;
+
+ if (platform->debug_level == DBG_HIGH)
+ printk(" next clock=%d\n", rgx_dvfs_infotbl[platform->freq_level].clock);
+
+ BUG_ON(platform->freq_level >= RGX_DVFS_STEP);
+ }
+ level_down_time = 0;
+ } else if ((platform->freq_level > 0) && (platform->utilisation < rgx_dvfs_infotbl[platform->freq_level].min_threshold)) {
+ level_down_time++;
+ if (level_down_time == RGX_DVFS_LEVEL_INTERVAL) {
+ if (platform->debug_level == DBG_HIGH)
+ printk("down,utilisation=%d,current clock=%d,fps = %d\n", platform->utilisation, rgx_dvfs_infotbl[platform->freq_level].clock, fps);
+ BUG_ON(platform->freq_level <= 0);
+ platform->freq_level--;
+ level_down_time = 0;
+
+ if (platform->debug_level == DBG_HIGH)
+ printk(" next clock=%d\n", rgx_dvfs_infotbl[platform->freq_level].clock);
+ }
+ level_up_time = 0;
+ } else {
+ level_down_time = 0;
+ level_up_time = 0;
+
+ if (platform->debug_level == DBG_HIGH)
+ printk("keep,utilisation=%d,current clock=%d,fps = %d\n", platform->utilisation, rgx_dvfs_infotbl[platform->freq_level].clock, fps);
+ }
+ }
+#if RK33_SYSFS_FILE_SUPPORT && RK33_DVFS_FREQ_LIMIT
+ if ((platform->up_level >= 0) && (platform->freq_level > platform->up_level))
+ platform->freq_level = platform->up_level;
+
+ if ((platform->down_level >= 0) && (platform->freq_level < platform->down_level))
+ platform->freq_level = platform->down_level;
+#endif
+ platform->time_busy = 0;
+ platform->time_idle = 0;
+ platform->utilisation = 0;
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+
+ rk33_dvfs_set_level(platform, platform->freq_level);
+}
+#endif
+
+static IMG_BOOL rk33_dvfs_event(RGXFWIF_GPU_UTIL_STATS * psUtilStats)
+{
+ struct rk_context *platform;
+#if !RK33_USE_CUSTOMER_GET_GPU_UTIL
+ IMG_UINT32 time_busy = 0;
+ IMG_UINT32 time_idle = 0;
+#endif
+ unsigned long flags;
+#if RK33_USE_CUSTOMER_GET_GPU_UTIL
+ ktime_t now = ktime_get();
+ ktime_t diff;
+ IMG_INT i;
+#endif
+
+ platform = container_of(psUtilStats, struct rk_context, sUtilStats);
+ PVR_ASSERT(platform != NULL);
+
+#if RK33_USE_RGX_GET_GPU_UTIL
+ PVR_ASSERT(psUtilStats != NULL);
+
+ if (platform->debug_level == DBG_HIGH)
+ printk("GPU util info:valid[%d],ActiveHigh[%llu],ActiveLow[%llu],Blocked[%llu],Idle[%llu],Sum[%llu]\n",
+ psUtilStats->bValid, psUtilStats->ui64GpuStatActiveHigh,
+ psUtilStats->ui64GpuStatActiveLow,
+ psUtilStats->ui64GpuStatBlocked,
+ psUtilStats->ui64GpuStatIdle,
+ psUtilStats->ui64GpuStatCumulative);
+
+ if (psUtilStats->bValid /*&& psUtilStats->bIncompleteData */) {
+ time_busy = psUtilStats->ui64GpuStatActiveHigh + psUtilStats->ui64GpuStatActiveLow;
+ time_idle = psUtilStats->ui64GpuStatIdle + psUtilStats->ui64GpuStatBlocked;
+
+ //check valid
+ if (time_busy + time_idle == 0)
+ goto err;
+
+ spin_lock_irqsave(&rgx_dvfs_spinlock, flags);
+ platform->time_busy += time_busy;
+ platform->time_idle += time_idle;
+ platform->utilisation = (time_busy * 100) / (time_busy + time_idle);
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+ queue_work_on(0, rgx_dvfs_wq, &platform->rgx_dvfs_work);
+ }
+#elif RK33_USE_CUSTOMER_GET_GPU_UTIL
+ diff = ktime_sub(now, platform->time_period_start);
+
+ if (platform->gpu_active) {
+ platform->time_busy += (IMG_UINT32) (ktime_to_ns(diff) >> RK_PM_TIME_SHIFT);
+ platform->time_period_start = now;
+ } else {
+ platform->time_idle += (IMG_UINT32) (ktime_to_ns(diff) >> RK_PM_TIME_SHIFT);
+ platform->time_period_start = now;
+ }
+
+ //check valid
+ if (platform->time_busy + platform->time_idle == 0)
+ goto err;
+
+ spin_lock_irqsave(&rgx_dvfs_spinlock, flags);
+ for (i = 0; i < RK33_MAX_UTILIS - 1; i++) {
+ platform->stUtilis.time_busys[i] = platform->stUtilis.time_busys[i + 1];
+ platform->stUtilis.time_idles[i] = platform->stUtilis.time_idles[i + 1];
+ platform->stUtilis.utilis[i] = platform->stUtilis.utilis[i + 1];
+ }
+ platform->stUtilis.time_busys[RK33_MAX_UTILIS - 1] = platform->time_busy;
+ platform->stUtilis.time_idles[RK33_MAX_UTILIS - 1] = platform->time_idle;
+ platform->stUtilis.utilis[RK33_MAX_UTILIS - 1] = (platform->time_busy * 10) / (platform->time_busy + platform->time_idle);
+ platform->utilisation =
+ platform->stUtilis.utilis[3] * 4 +
+ platform->stUtilis.utilis[2] * 3 +
+ platform->stUtilis.utilis[1] * 2 + platform->stUtilis.utilis[0] * 1;
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+
+ if (platform->debug_level == DBG_HIGH)
+ printk("GPU util info:time_busy=%d,time_idle=%d,utilisation=%d\n",
+ platform->time_busy, platform->time_idle,
+ platform->utilisation);
+ queue_work_on(0, rgx_dvfs_wq, &platform->rgx_dvfs_work);
+#endif
+
+ return IMG_TRUE;
+
+err:
+
+ platform->time_busy = 0;
+ platform->time_idle = 0;
+ platform->utilisation = 0;
+
+ return IMG_FALSE;
+}
+
+
+#if USE_HRTIMER
+static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
+{
+ unsigned long flags;
+ struct rk_context *platform;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT(timer != NULL);
+
+ platform = container_of(timer, struct rk_context, timer);
+ PVR_ASSERT(platform != NULL);
+
+ spin_lock_irqsave(&platform->timer_lock, flags);
+
+ if (platform->dev_config->psDevNode) {
+ psDevInfo = platform->dev_config->psDevNode->pvDevice;
+
+ if (psDevInfo && psDevInfo->pfnGetGpuUtilStats && platform->gpu_active) {
+ /*Measuring GPU Utilisation*/
+ eError = psDevInfo->pfnGetGpuUtilStats(platform->dev_config->psDevNode, ghGpuUtilDvfs, &platform->sUtilStats);
+ rk33_dvfs_event(&platform->sUtilStats);
+ } else {
+ if (!psDevInfo || !psDevInfo->pfnGetGpuUtilStats)
+ PVR_DPF((PVR_DBG_ERROR, "%s:line=%d,devinfo is null\n", __func__, __LINE__));
+ }
+ }
+
+ if (platform->timer_active)
+ hrtimer_start(timer, HR_TIMER_DELAY_MSEC(RK33_DVFS_FREQ), HRTIMER_MODE_REL);
+
+ spin_unlock_irqrestore(&platform->timer_lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+#elif USE_KTHREAD
+static int gpu_dvfs_task(void *data)
+{
+ static long sTimeout = msecs_to_jiffies(RK33_DVFS_FREQ);
+ long timeout = sTimeout;
+ unsigned long flags;
+ struct rk_context *platform = data;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(platform != NULL);
+
+ set_freezable();
+
+ do {
+ if (platform->dev_config->psDevNode) {
+ psDevInfo = platform->dev_config->psDevNode->pvDevice;
+ spin_lock_irqsave(&platform->timer_lock, flags);
+ if (psDevInfo && psDevInfo->pfnGetGpuUtilStats && platform->gpu_active) {
+ /*Measuring GPU Utilisation*/
+ platform->sUtilStats = psDevInfo->pfnGetGpuUtilStats(platform->dev_config->psDevNode);
+ rk33_dvfs_event(&platform->sUtilStats);
+ } else {
+ if (!psDevInfo || !psDevInfo->pfnGetGpuUtilStats)
+ PVR_DPF((PVR_DBG_ERROR, "%s:line=%d,devinfo is null\n", __func__, __LINE__));
+ }
+ spin_unlock_irqrestore(&platform->timer_lock, flags);
+ }
+ wait_event_freezable_timeout(platform->dvfs_wait, kthread_should_stop(), timeout);
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+#endif
+
+static void rk33_dvfs_utils_init(struct rk_context *platform)
+{
+#if USE_KTHREAD
+ int iRet = -1;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+#endif
+#if RK33_USE_CUSTOMER_GET_GPU_UTIL
+ IMG_INT i;
+#endif
+ PVR_ASSERT(platform != NULL);
+
+ /*spin_lock_irqsave(&rgx_dvfs_spinlock, flags);*/
+ platform->utilisation = 0;
+ platform->freq_level = 0;
+ platform->freq = 0;
+ platform->time_tick = 0;
+#if RK33_USE_CUSTOMER_GET_GPU_UTIL
+ platform->time_period_start = ktime_get();
+ for (i = 0; i < RK33_MAX_UTILIS; i++) {
+ platform->stUtilis.time_busys[i] = 0;
+ platform->stUtilis.time_idles[i] = 0;
+ platform->stUtilis.utilis[i] = 0;
+ }
+#endif
+ platform->gpu_active = IMG_FALSE;
+ platform->time_busy = 0;
+ platform->time_idle = 0;
+ platform->temperature = 0;
+ platform->temperature_time = 0;
+ platform->timer_active = IMG_FALSE;
+
+ INIT_WORK(&platform->rgx_dvfs_work, rk33_dvfs_event_proc);
+
+#if RK33_USE_CL_COUNT_UTILS
+ platform->abs_load[0] = platform->abs_load[1] = platform->abs_load[2] = platform->abs_load[3] = 0;
+#endif
+#if RK33_SYSFS_FILE_SUPPORT
+ platform->debug_level = DBG_OFF;
+ platform->fix_freq = 0;
+ platform->fps_gap = FPS_DEFAULT_GAP;
+#if RK33_DVFS_FREQ_LIMIT
+ platform->up_level = -1;
+ platform->down_level = -1;
+#endif //RK33_DVFS_FREQ_LIMIT
+#endif //RK33_SYSFS_FILE_SUPPORT
+
+
+#if USE_HRTIMER
+ hrtimer_init(&platform->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ platform->timer.function = dvfs_callback;
+#endif
+
+#if USE_KTHREAD
+ platform->dvfs_task = kthread_create(gpu_dvfs_task, platform, "GpuDvfsD");
+ if (IS_ERR(platform->dvfs_task)) {
+ iRet = PTR_ERR(platform->dvfs_task);
+ PVR_DPF((PVR_DBG_ERROR, "failed to create kthread! error %d\n", iRet));
+ return;
+ }
+
+ sched_setscheduler_nocheck(platform->dvfs_task, SCHED_FIFO, ¶m);
+ get_task_struct(platform->dvfs_task);
+ kthread_bind(platform->dvfs_task, 0);
+
+ init_waitqueue_head(&platform->dvfs_wait);
+
+#endif
+ //spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+}
+
+static void rk33_dvfs_utils_term(struct rk_context *platform)
+{
+ unsigned long flags;
+
+ PVR_ASSERT(platform != NULL);
+
+
+ if (platform->timer_active) {
+ spin_lock_irqsave(&platform->timer_lock, flags);
+ platform->timer_active = IMG_FALSE;
+ spin_unlock_irqrestore(&platform->timer_lock, flags);
+#if USE_HRTIMER
+ hrtimer_cancel(&platform->timer);
+#elif USE_KTHREAD
+ kthread_stop(platform->dvfs_task);
+#endif
+ }
+}
+
+#if RK33_USE_CUSTOMER_GET_GPU_UTIL
+/*caller needs to hold kbdev->pm.metrics.lock before calling this function*/
+static void rk33_dvfs_record_busy_utils(struct rk_context *platform)
+{
+ ktime_t now;
+ ktime_t diff;
+ IMG_UINT32 ns_time;
+
+ PVR_ASSERT(platform != NULL);
+
+ now = ktime_get();
+ diff = ktime_sub(now, platform->time_period_start);
+
+ ns_time = (IMG_UINT32)(ktime_to_ns(diff) >> RK_PM_TIME_SHIFT);
+ platform->time_busy += ns_time;
+ platform->time_period_start = now;
+}
+
+static void rk33_dvfs_record_idle_utils(struct rk_context *platform)
+{
+ ktime_t now;
+ ktime_t diff;
+ IMG_UINT32 ns_time;
+
+ PVR_ASSERT(platform != NULL);
+
+ now = ktime_get();
+ diff = ktime_sub(now, platform->time_period_start);
+
+ ns_time = (IMG_UINT32)(ktime_to_ns(diff) >> RK_PM_TIME_SHIFT);
+ platform->time_idle += ns_time;
+ platform->time_period_start = now;
+}
+#endif
+
+static void rk33_dvfs_record_gpu_idle(struct rk_context *platform)
+{
+ unsigned long flags;
+ PVR_ASSERT(platform != NULL);
+
+ if (!platform->gpu_active)
+ return;
+
+ spin_lock_irqsave(&platform->timer_lock, flags);
+ platform->gpu_active = IMG_FALSE;
+#if RK33_USE_CUSTOMER_GET_GPU_UTIL
+ rk33_dvfs_record_busy_utils(platform);
+#endif
+ spin_unlock_irqrestore(&platform->timer_lock, flags);
+}
+
+
+static void rk33_dvfs_record_gpu_active(struct rk_context *platform)
+{
+ unsigned long flags;
+
+ PVR_ASSERT(platform != NULL);
+
+ if (platform->gpu_active)
+ return;
+
+ spin_lock_irqsave(&platform->timer_lock, flags);
+ platform->gpu_active = IMG_TRUE;
+#if RK33_USE_CUSTOMER_GET_GPU_UTIL
+ rk33_dvfs_record_idle_utils(platform);
+#endif
+ spin_unlock_irqrestore(&platform->timer_lock, flags);
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+static IMG_BOOL rk33_dvfs_get_freq_table(struct rk_context *platform)
+{
+ IMG_INT i;
+
+ PVR_ASSERT(platform != NULL);
+
+ /*get freq table*/
+ rgx_freq_table = dvfs_get_freq_volt_table(platform->gpu_clk_node);
+
+ if (rgx_freq_table == NULL) {
+ printk("rgx freq table not assigned yet,use default\n");
+ return IMG_FALSE ;
+ } else {
+ /*recalculate step*/
+ RGX_DVFS_STEP = 0;
+
+ PVR_DPF((PVR_DBG_WARNING, "The raw GPU freq_table:"));
+ for (i = 0; rgx_freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ rgx_dvfs_infotbl[i].clock = rgx_freq_table[i].frequency / ONE_KHZ;
+ PVR_DPF((PVR_DBG_WARNING, "%dM,", rgx_dvfs_infotbl[i].clock));
+ RGX_DVFS_STEP++;
+ }
+
+ if (RGX_DVFS_STEP > 1)
+ div_dvfs = round_up(((levelf_threshold_max - level0_threshold_max) / (RGX_DVFS_STEP - 1)), 1);
+
+ PVR_DPF((PVR_DBG_WARNING, "RGX_DVFS_STEP=%d,div_dvfs=%d\n", RGX_DVFS_STEP, div_dvfs));
+
+ for (i = 0; i < RGX_DVFS_STEP; i++)
+ calculate_dvfs_max_min_threshold(i);
+ p_rgx_dvfs_infotbl = rgx_dvfs_infotbl;
+ }
+
+ return IMG_TRUE;
+}
+#else
+static IMG_BOOL rk33_dvfs_get_freq_table(struct rk_context *platform)
+{
+ IMG_INT i;
+ int length;
+ unsigned long rate;
+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice;
+
+ PVR_ASSERT(platform != NULL);
+
+
+ RGX_DVFS_STEP = 0;
+
+ rcu_read_lock();
+ length = dev_pm_opp_get_opp_count(dev);
+ if (length <= 0) {
+ PVR_DPF((PVR_DBG_ERROR, "rgx freq table not assigned yet,use default\n"));
+ return IMG_FALSE;
+ }
+
+ for (i = 0, rate = 0; i < length; i++, rate++) {
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &rate);
+ if (IS_ERR(opp)) {
+ PVR_DPF((PVR_DBG_ERROR, "Error getting opp %d\n", i));
+ break;
+ }
+ rgx_dvfs_infotbl[i].voltage = dev_pm_opp_get_voltage(opp);
+ rgx_dvfs_infotbl[i].clock = clk_round_rate(platform->sclk_gpu_core, rate) / ONE_MHZ;
+ PVR_DPF((PVR_DBG_WARNING, "%dM,%dMv", rgx_dvfs_infotbl[i].clock, rgx_dvfs_infotbl[i].voltage));
+ RGX_DVFS_STEP++;
+ }
+ rcu_read_unlock();
+
+ if (RGX_DVFS_STEP > 1)
+ div_dvfs = round_up(((levelf_threshold_max - level0_threshold_max) / (RGX_DVFS_STEP - 1)), 1);
+
+ PVR_DPF((PVR_DBG_WARNING, "RGX_DVFS_STEP=%d,div_dvfs=%d\n", RGX_DVFS_STEP, div_dvfs));
+
+ for (i = 0; i < RGX_DVFS_STEP; i++)
+ calculate_dvfs_max_min_threshold(i);
+
+ p_rgx_dvfs_infotbl = rgx_dvfs_infotbl;
+
+ return IMG_TRUE;
+}
+#endif
+
+IMG_BOOL rk33_dvfs_init(struct rk_context *platform)
+{
+ if (IMG_FALSE == rk33_dvfs_get_freq_table(platform))
+ return IMG_FALSE;
+
+ if (!rgx_dvfs_wq)
+ rgx_dvfs_wq = create_singlethread_workqueue("rgx_dvfs");
+
+ spin_lock_init(&rgx_dvfs_spinlock);
+ rk33_dvfs_utils_init(platform);
+
+ return IMG_TRUE;
+}
+
+
+void rk33_dvfs_term(struct rk_context *platform)
+{
+ rk33_dvfs_utils_term(platform);
+
+ if (rgx_dvfs_wq)
+ destroy_workqueue(rgx_dvfs_wq);
+
+ rgx_dvfs_wq = NULL;
+}
+
+#if RK33_SYSFS_FILE_SUPPORT
+#if RK33_DVFS_FREQ_LIMIT
+static int rk33_dvfs_up_limit_on(struct rk_context *platform, int level)
+{
+ unsigned long flags;
+
+ if (!platform || level < 0)
+ return -ENODEV;
+
+ spin_lock_irqsave(&rgx_dvfs_spinlock, flags);
+ if (platform->down_level >= 0 &&
+ platform->down_level > level) {
+ PVR_DPF((PVR_DBG_ERROR, " rk33_dvfs_up_limit_on : Attempting to set upper lock (%d) to below under lock(%d)\n", level, platform->down_level));
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+ return -1;
+ }
+ platform->up_level = level;
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+
+ PVR_DPF((PVR_DBG_WARNING, " Up Level Set : %d\n", level));
+
+ return 0;
+}
+
+static int rk33_dvfs_up_limit_off(struct rk_context *platform)
+{
+ unsigned long flags;
+
+ if (!platform)
+ return -ENODEV;
+
+ spin_lock_irqsave(&rgx_dvfs_spinlock, flags);
+ platform->up_level = -1;
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+
+ PVR_DPF((PVR_DBG_WARNING, "Up Level Unset\n"));
+
+ return 0;
+}
+
+static int rk33_dvfs_down_limit_on(struct rk_context *platform, int level)
+{
+ unsigned long flags;
+
+ if (!platform || level < 0)
+ return -ENODEV;
+
+ spin_lock_irqsave(&rgx_dvfs_spinlock, flags);
+ if (platform->up_level >= 0 && platform->up_level < level) {
+ PVR_DPF((PVR_DBG_ERROR, " rk33_dvfs_down_limit_on : Attempting to set under lock (%d) to above upper lock(%d)\n", level, platform->up_level));
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+ return -1;
+ }
+ platform->down_level = level;
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+
+ PVR_DPF((PVR_DBG_WARNING, " Down Level Set : %d\n", level));
+
+ return 0;
+}
+
+static int rk33_dvfs_down_limit_off(struct rk_context *platform)
+{
+ unsigned long flags;
+
+ if (!platform)
+ return -ENODEV;
+
+ spin_lock_irqsave(&rgx_dvfs_spinlock, flags);
+ platform->down_level = -1;
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+
+ PVR_DPF((PVR_DBG_WARNING, "Up Level Unset\n"));
+
+ return 0;
+}
+
+static int rk33_dvfs_get_dvfs_up_limit_freq(struct rk_context *platform)
+{
+ unsigned long flags;
+ int up_level = -1;
+
+ if (!platform)
+ return -ENODEV;
+
+ spin_lock_irqsave(&rgx_dvfs_spinlock, flags);
+ up_level = rk33_dvfs_get_freq(platform->up_level);
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+
+ return up_level;
+}
+
+static int rk33_dvfs_get_dvfs_down_limit_freq(struct rk_context *platform)
+{
+ unsigned long flags;
+ int down_level = -1;
+
+ if (!platform)
+ return -ENODEV;
+
+ spin_lock_irqsave(&rgx_dvfs_spinlock, flags);
+ down_level = rk33_dvfs_get_freq(platform->down_level);
+ spin_unlock_irqrestore(&rgx_dvfs_spinlock, flags);
+
+ return down_level;
+}
+#endif //end of RK33_DVFS_FREQ_LIMIT
+
+#define to_dev_ext_attribute(a) container_of(a, struct dev_ext_attribute, attr)
+
+static ssize_t show_freq(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ext_attr = to_dev_ext_attribute(attr);
+ struct rk_context *platform;
+ ssize_t ret = 0;
+ unsigned int clkrate;
+ int i ;
+
+ platform = (struct rk_context *)ext_attr->var;
+ if (!platform)
+ return -ENODEV;
+
+ if (platform->debug_level > DBG_OFF) {
+#if RK33_DVFS_SUPPORT
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (!platform->gpu_clk_node) {
+ PVR_DPF((PVR_DBG_ERROR, "gpu_clk_node not init!"));
+ return -ENODEV;
+ }
+ clkrate = dvfs_clk_get_rate(platform->gpu_clk_node);
+#else
+ if (!platform->sclk_gpu_core) {
+ PVR_DPF((PVR_DBG_ERROR, "sclk_gpu_core not init!"));
+ return -ENODEV;
+ }
+ clkrate = clk_get_rate(platform->sclk_gpu_core);
+#endif
+#endif
+ if (platform->dvfs_enabled) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "DVFS is on");
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nCurrent clk rgx = %dMhz", clkrate / ONE_MHZ);
+ /* To be revised */
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings:");
+ for (i = 0; i < RGX_DVFS_STEP; i++)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d ", p_rgx_dvfs_infotbl[i].clock);
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Mhz");
+ } else {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "DVFS is off");
+ }
+
+ if (ret < PAGE_SIZE - 1) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ } else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+ }
+ return ret;
+}
+
+static ssize_t set_freq(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dev_ext_attribute *ext_attr = to_dev_ext_attribute(attr);
+ struct rk_context *platform;
+ unsigned int tmp = 0, freq = 0;
+
+ tmp = 0;
+
+ platform = (struct rk_context *)ext_attr->var;
+ if (!platform)
+ return -ENODEV;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (!platform->gpu_clk_node) {
+#else
+ if (!platform->sclk_gpu_core) {
+#endif
+ PVR_DPF((PVR_DBG_ERROR, "sclk_gpu_core not init!"));
+ return -ENODEV;
+ }
+
+ if (memcmp(buf, "debug_hi", 8) == 0) {
+ platform->debug_level = DBG_HIGH;
+ return count;
+ } else if (memcmp(buf, "debug_lo", 8) == 0) {
+ platform->debug_level = DBG_LOW;
+ return count;
+ } else if (memcmp(buf, "debug_off", 9) == 0) {
+ platform->debug_level = DBG_OFF;
+ return count;
+ } else if (memcmp(buf, "perf", 4) == 0) {
+ set_freq_mode(PERFORMANCE_MODE);
+ return count;
+ } else if (memcmp(buf, "dvfs", 4) == 0) {
+ set_freq_mode(DVFS_MODE);
+ return count;
+ } else if (memcmp(buf, "power", 5) == 0) {
+ set_freq_mode(POWER_MODE);
+ return count;
+ }
+
+ if (sscanf(buf, "%i", &freq) == 0) {
+ PVR_DPF((PVR_DBG_ERROR, "invalid value"));
+ return -EINVAL;
+ } else {
+ if (rk33_dvfs_get_level(freq) >= RGX_DVFS_STEP || rk33_dvfs_get_level(freq) < 0) {
+ PVR_DPF((PVR_DBG_ERROR, "invalid freq(%d)", freq));
+ platform->fix_freq = 0; /*open dvfs*/
+ return count;
+ }
+ }
+
+ rk33_dvfs_set_level(platform, rk33_dvfs_get_level(freq));
+
+ platform->fix_freq = freq; /*close dvfs*/
+
+ return count;
+}
+
+#if RK33_DVFS_MODE
+int set_freq_mode(int freq_mode)
+{
+ struct rk_context *platform;
+ static int s_freq_mode = DVFS_MODE;
+ platform = g_platform;
+
+ if(freq_mode == s_freq_mode)
+ {
+ return 0;
+ }
+
+ switch (freq_mode)
+ {
+ case DVFS_MODE:
+ platform->fix_freq = 0;
+ printk("%s:enter dvfs mode,freq=%d\n",__FUNCTION__,platform->fix_freq);
+ break;
+ case PERFORMANCE_MODE:
+ platform->fix_freq = rk33_dvfs_get_freq(RGX_DVFS_STEP - 1);
+ printk("%s:enter performance mode,freq=%d\n",__FUNCTION__,platform->fix_freq);
+ break;
+ case POWER_MODE:
+ platform->fix_freq = rk33_dvfs_get_freq(0);
+ printk("%s:enter power mode,freq=%d\n",__FUNCTION__,platform->fix_freq);
+ break;
+ default:
+ printk("%s:error mode=%d\n",__FUNCTION__,freq_mode);
+ return -1;
+ }
+
+ s_freq_mode = freq_mode;
+ return 0;
+}
+#endif
+
+static ssize_t show_utilisation(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ext_attr = to_dev_ext_attribute(attr);
+ struct rk_context *platform;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ IMG_INT utilisation;
+ IMG_UINT32 time_busy;
+ IMG_UINT32 time_idle;
+ ssize_t ret = 0;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ platform = (struct rk_context *)ext_attr->var;
+ if (!platform || !platform->dev_config->psDevNode)
+ return -ENODEV;
+
+ if (platform->debug_level > DBG_OFF) {
+ psDevInfo = platform->dev_config->psDevNode->pvDevice;
+
+ /*Measuring GPU Utilisation*/
+ eError = (psDevInfo->pfnGetGpuUtilStats(platform->dev_config->psDevNode, ghGpuUtilDvfs, &platform->sUtilStats));
+ time_busy = platform->sUtilStats.ui64GpuStatActiveHigh + platform->sUtilStats.ui64GpuStatActiveLow;
+ time_idle = platform->sUtilStats.ui64GpuStatIdle + platform->sUtilStats.ui64GpuStatBlocked;
+ utilisation = (time_busy * 100) / (time_busy + time_idle);
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Utilisation=%d", utilisation);
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "\nDetail: ActiveHigh=%llu,ActiveLow=%llu,Blocked=%llu,Idle=%llu",
+ platform->sUtilStats.ui64GpuStatActiveHigh,
+ platform->sUtilStats.ui64GpuStatActiveLow,
+ platform->sUtilStats.ui64GpuStatBlocked,
+ platform->sUtilStats.ui64GpuStatIdle);
+
+ if (ret < PAGE_SIZE - 1)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+ }
+ return ret;
+}
+
+static ssize_t set_utilisation(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int utilisation = 0;
+
+ utilisation = simple_strtoul(buf, NULL, 10);
+
+ return count;
+}
+
+static ssize_t show_fbdev(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < num_registered_fb; i++)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "fb[%d] xres=%d, yres=%d, addr=0x%lx\n", i, registered_fb[i]->var.xres, registered_fb[i]->var.yres, registered_fb[i]->fix.smem_start);
+
+ if (ret < PAGE_SIZE - 1) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ } else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+static ssize_t show_fps(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int ret = 0;
+ IMG_UINT32 fps = 0;
+
+ fps = rk_get_real_fps(0);
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "fbs=%d", fps);
+
+ if (ret < PAGE_SIZE - 1) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ } else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+static enum hrtimer_restart fps_callback(struct hrtimer *timer)
+{
+ struct rk_context *platform;
+ IMG_UINT32 fps = 0;
+
+ PVR_ASSERT(timer != NULL);
+
+ platform = container_of(timer, struct rk_context, fps_timer);
+ PVR_ASSERT(platform != NULL);
+
+ fps = rk_get_real_fps(0);
+ printk("Current fps=%d\n", fps);
+
+ hrtimer_start(timer,
+ HR_TIMER_DELAY_MSEC(platform->fps_gap),
+ HRTIMER_MODE_REL);
+
+ return HRTIMER_NORESTART;
+}
+
+static ssize_t set_fps(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dev_ext_attribute *ext_attr = to_dev_ext_attribute(attr);
+ struct rk_context *platform;
+ static IMG_BOOL bOpen = IMG_FALSE;
+ IMG_UINT gap = FPS_DEFAULT_GAP;
+
+ platform = (struct rk_context *)ext_attr->var;
+ if (!platform)
+ return -ENODEV;
+
+ gap = simple_strtoul(buf, NULL, 10);
+
+ if (sysfs_streq("on", buf) && !bOpen) {
+ bOpen = IMG_TRUE;
+ hrtimer_init(&platform->fps_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ platform->fps_timer.function = fps_callback;
+ hrtimer_start(&platform->fps_timer, HR_TIMER_DELAY_MSEC(platform->fps_gap), HRTIMER_MODE_REL);
+ printk("on fps\n");
+ } else if (sysfs_streq("off", buf)) {
+ printk("off fps\n");
+
+ if (bOpen) {
+ bOpen = IMG_FALSE;
+ hrtimer_cancel(&platform->fps_timer);
+ }
+ } else {
+ if (gap > 0 && gap < FPS_MAX_GAP)
+ platform->fps_gap = gap;
+ }
+
+ return count;
+}
+
+#if RK33_DVFS_FREQ_LIMIT
+static ssize_t show_up_limit(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ext_attr = to_dev_ext_attribute(attr);
+ struct rk_context *platform;
+ ssize_t ret = 0;
+ int i;
+
+ platform = (struct rk_context *)ext_attr->var;
+ if (!platform)
+ return -ENODEV;
+
+ if (platform->up_level >= 0 && platform->up_level < RGX_DVFS_STEP)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Current Up limit freq = %dMhz", rk33_dvfs_get_dvfs_up_limit_freq(platform));
+ else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Unset the up limit level");
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings :");
+ for (i = 0; i < RGX_DVFS_STEP; i++)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d ", p_rgx_dvfs_infotbl[i].clock);
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Mhz");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, ", If you want to unlock : off");
+
+ if (ret < PAGE_SIZE - 1) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ } else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+static ssize_t set_up_limit(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dev_ext_attribute *ext_attr = to_dev_ext_attribute(attr);
+ struct rk_context *platform;
+ unsigned int freq = 0;
+
+ platform = (struct rk_context *)ext_attr->var;
+ if (!platform)
+ return -ENODEV;
+
+ freq = simple_strtoul(buf, NULL, 10);
+
+ if (sysfs_streq("off", buf))
+ rk33_dvfs_up_limit_off(platform);
+ else
+ rk33_dvfs_up_limit_on(platform, rk33_dvfs_get_level(freq));
+
+ return count;
+}
+
+static ssize_t show_down_limit(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ext_attr = to_dev_ext_attribute(attr);
+ struct rk_context *platform;
+ ssize_t ret = 0;
+ int i;
+
+ platform = (struct rk_context *)ext_attr->var;
+ if (!platform)
+ return -ENODEV;
+
+ if (platform->down_level >= 0 && platform->down_level < RGX_DVFS_STEP)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Current down limit freq = %dMhz", rk33_dvfs_get_dvfs_down_limit_freq(platform));
+ else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Unset the down limit level");
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings :");
+ for (i = 0; i < RGX_DVFS_STEP; i++)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d ", p_rgx_dvfs_infotbl[i].clock);
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "Mhz");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, ", If you want to unlock : off");
+
+ if (ret < PAGE_SIZE - 1) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ } else {
+ buf[PAGE_SIZE - 2] = '\n';
+ buf[PAGE_SIZE - 1] = '\0';
+ ret = PAGE_SIZE - 1;
+ }
+
+ return ret;
+}
+
+static ssize_t set_down_limit(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dev_ext_attribute *ext_attr = to_dev_ext_attribute(attr);
+ struct rk_context *platform;
+ unsigned int freq = 0;
+
+ platform = (struct rk_context *)ext_attr->var;
+ if (!platform)
+ return -ENODEV;
+
+ freq = simple_strtoul(buf, NULL, 10);
+
+ if (sysfs_streq("off", buf))
+ rk33_dvfs_down_limit_off(platform);
+ else
+ rk33_dvfs_down_limit_on(platform, rk33_dvfs_get_level(freq));
+
+ return count;
+}
+#endif
+
+/** The sysfs file @c clock, fbdev.
+ *
+ * This is used for obtaining information about operating clock & framebuffer address,
+ */
+
+static struct dev_ext_attribute dev_attr_freq = {
+ .attr = __ATTR(freq, S_IRUGO | S_IWUSR, show_freq, set_freq),
+};
+
+static struct dev_ext_attribute dev_attr_util = {
+ .attr = __ATTR(util, S_IRUGO | S_IWUSR, show_utilisation, set_utilisation),
+};
+
+static struct dev_ext_attribute dev_attr_fbdev = {
+ .attr = __ATTR(fbdev, S_IRUGO, show_fbdev, NULL),
+};
+
+static struct dev_ext_attribute dev_attr_fps = {
+ .attr = __ATTR(fps, S_IRUGO | S_IWUSR, show_fps, set_fps),
+};
+
+#if RK33_DVFS_FREQ_LIMIT
+static struct dev_ext_attribute dev_attr_dvfs_up_limit = {
+ .attr = __ATTR(dvfs_up_limit, S_IRUGO | S_IWUSR, show_up_limit, set_up_limit),
+};
+
+static struct dev_ext_attribute dev_attr_dvfs_down_limit = {
+ .attr = __ATTR(dvfs_down_limit, S_IRUGO | S_IWUSR, show_down_limit, set_down_limit),
+};
+#endif
+
+static IMG_INT rk_create_sysfs_file(struct rk_context *platform)
+{
+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice;
+
+ dev_attr_freq.var = platform;
+ dev_attr_util.var = platform;
+ dev_attr_fbdev.var = platform;
+ dev_attr_fps.var = platform;
+
+#if RK33_DVFS_FREQ_LIMIT
+ dev_attr_dvfs_up_limit.var = platform;
+ dev_attr_dvfs_down_limit.var = platform;
+#endif
+
+ if (device_create_file(dev, &dev_attr_freq.attr)) {
+ dev_err(dev, "Couldn't create sysfs file [freq]\n");
+ goto out;
+ }
+
+ if (device_create_file(dev, &dev_attr_util.attr)) {
+ dev_err(dev, "Couldn't create sysfs file [utilisation]\n");
+ goto out;
+ }
+
+ if (device_create_file(dev, &dev_attr_fbdev.attr)) {
+ dev_err(dev, "Couldn't create sysfs file [fbdev]\n");
+ goto out;
+ }
+
+ if (device_create_file(dev, &dev_attr_fps.attr)) {
+ dev_err(dev, "Couldn't create sysfs file [fbdev]\n");
+ goto out;
+ }
+
+#if RK33_DVFS_FREQ_LIMIT
+ if (device_create_file(dev, &dev_attr_dvfs_up_limit.attr)) {
+ dev_err(dev, "Couldn't create sysfs file [dvfs_upper_lock]\n");
+ goto out;
+ }
+
+ if (device_create_file(dev, &dev_attr_dvfs_down_limit.attr)) {
+ dev_err(dev, "Couldn't create sysfs file [dvfs_under_lock]\n");
+ goto out;
+ }
+#endif
+
+ return 0;
+out:
+ return -ENOENT;
+}
+
+static void rk_remove_sysfs_file(struct rk_context *platform)
+{
+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice;
+
+ device_remove_file(dev, &dev_attr_freq.attr);
+ device_remove_file(dev, &dev_attr_util.attr);
+ device_remove_file(dev, &dev_attr_fbdev.attr);
+ device_remove_file(dev, &dev_attr_fps.attr);
+#if RK33_DVFS_FREQ_LIMIT
+ device_remove_file(dev, &dev_attr_dvfs_up_limit.attr);
+ device_remove_file(dev, &dev_attr_dvfs_down_limit.attr);
+#endif
+}
+
+
+
+#endif //end of RK33_SYSFS_FILE_SUPPORT
+
+#if RK33_USE_RGX_GET_GPU_UTIL
+IMG_BOOL rk33_set_device_node(IMG_HANDLE hDevCookie)
+{
+ struct rk_context *platform = g_platform;
+ unsigned long flags;
+
+ if (platform) {
+ if (hDevCookie != platform->dev_config->psDevNode) {
+ printk("device node is not match,hDevCookie=%p,psDevNode=%p\n",
+ hDevCookie, platform->dev_config->psDevNode);
+ return IMG_FALSE;
+ }
+
+ /*start timer*/
+#if USE_HRTIMER
+ if (platform->dev_config->psDevNode && platform->dvfs_enabled && platform->timer.function && !platform->timer_active) {
+#elif USE_KTHREAD
+ if (platform->dev_config->psDevNode && platform->dvfs_enabled && !platform->timer_active) {
+#endif
+ spin_lock_irqsave(&platform->timer_lock, flags);
+ platform->timer_active = IMG_TRUE;
+ spin_unlock_irqrestore(&platform->timer_lock, flags);
+ if (ghGpuUtilDvfs == NULL) {
+ if (RGXRegisterGpuUtilStats(&ghGpuUtilDvfs) != PVRSRV_OK) {
+ PVR_DPF((PVR_DBG_ERROR, "PVR_K:%s RGXRegisterGpuUtilStats fail\n", __func__));
+ return IMG_FALSE;
+ }
+ }
+#if USE_HRTIMER
+ hrtimer_start(&platform->timer, HR_TIMER_DELAY_MSEC(RK33_DVFS_FREQ), HRTIMER_MODE_REL);
+#elif USE_KTHREAD
+ wake_up_process(platform->dvfs_task);
+#endif
+ }
+ } else {
+ /*call PVRSRVRegisterPowerDevice before RgxRkInit.*/
+ PVR_DPF((PVR_DBG_ERROR, "PVR_K:%s platform is null\n", __func__));
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+
+IMG_BOOL rk33_clear_device_node(void)
+{
+ struct rk_context *platform = g_platform;
+ unsigned long flags;
+
+ if (platform) {
+ /*cancel timer*/
+ if (platform->timer_active && platform->dvfs_enabled) {
+ spin_lock_irqsave(&platform->timer_lock, flags);
+ platform->timer_active = IMG_FALSE;
+ spin_unlock_irqrestore(&platform->timer_lock, flags);
+#if USE_HRTIMER
+ hrtimer_cancel(&platform->timer);
+#endif
+ }
+ } else {
+ PVR_DPF((PVR_DBG_ERROR, "PVR_K:%s platform is null\n", __func__));
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+#endif //RK33_USE_RGX_GET_GPU_UTIL
+
+#endif //end of RK33_DVFS_SUPPORT
+
+static void RgxEnableClock(struct rk_context *platform)
+{
+#if RK33_DVFS_SUPPORT
+ unsigned long flags;
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (!platform->gpu_clk_node && !platform->clk_gpu)
+ {
+ printk("gpu_clk_node and clk_gpu are both null\n");
+ return;
+ }
+#else
+ if (!platform->sclk_gpu_core)
+ {
+ printk("sclk_gpu_core is null\n");
+ return;
+ }
+#endif
+
+ if (platform->aclk_gpu_mem && platform->aclk_gpu_cfg && !platform->gpu_active) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (platform->gpu_clk_node)
+ dvfs_clk_prepare_enable(platform->gpu_clk_node);
+ else if (platform->clk_gpu)
+ clk_prepare_enable(platform->clk_gpu);
+#else
+ clk_prepare_enable(platform->sclk_gpu_core);
+#endif
+ clk_prepare_enable(platform->aclk_gpu_mem);
+ clk_prepare_enable(platform->aclk_gpu_cfg);
+
+#if RK33_DVFS_SUPPORT
+ rk33_dvfs_record_gpu_active(platform);
+
+ if (platform->dev_config->psDevNode && platform->dvfs_enabled && !platform->timer_active) {
+ spin_lock_irqsave(&platform->timer_lock, flags);
+ platform->timer_active = IMG_TRUE;
+ spin_unlock_irqrestore(&platform->timer_lock, flags);
+
+#if USE_HRTIMER
+ hrtimer_start(&platform->timer, HR_TIMER_DELAY_MSEC(RK33_DVFS_FREQ), HRTIMER_MODE_REL);
+#endif
+ }
+#endif
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to enable clock!"));
+ }
+}
+
+static void RgxDisableClock(struct rk_context *platform)
+{
+#if RK33_DVFS_SUPPORT
+ unsigned long flags;
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (!platform->gpu_clk_node && !platform->clk_gpu) {
+ printk("gpu_clk_node and clk_gpu are both null\n");
+ return;
+ }
+#else
+ if (!platform->sclk_gpu_core) {
+ printk("sclk_gpu_core is null");
+ return;
+ }
+#endif
+
+ if (platform->aclk_gpu_mem && platform->aclk_gpu_cfg && platform->gpu_active) {
+#if RK33_DVFS_SUPPORT
+ if (platform->fix_freq <= 0) {
+ /*Force to drop freq to the lowest.*/
+ rk33_dvfs_set_level(platform, 0);
+ }
+
+ if (platform->dvfs_enabled && platform->timer_active) {
+ spin_lock_irqsave(&platform->timer_lock, flags);
+ platform->timer_active = IMG_FALSE;
+ spin_unlock_irqrestore(&platform->timer_lock, flags);
+
+#if USE_HRTIMER
+ hrtimer_cancel(&platform->timer);
+#endif
+ }
+
+ rk33_dvfs_record_gpu_idle(platform);
+
+#endif
+ clk_disable_unprepare(platform->aclk_gpu_cfg);
+ clk_disable_unprepare(platform->aclk_gpu_mem);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (platform->gpu_clk_node)
+ dvfs_clk_disable_unprepare(platform->gpu_clk_node);
+ else if (platform->clk_gpu)
+ clk_disable_unprepare(platform->clk_gpu);
+#else
+ clk_disable_unprepare(platform->sclk_gpu_core);
+#endif
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to disable clock!"));
+ }
+}
+
+#if OPEN_GPU_PD
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+/*
+ * The power management
+ * software must power down pd_gpu_1 before power down pd_gpu_0,
+ * and power up pd_gpu_1 after power up pd_gpu_0.
+ */
+static void RgxEnablePower(struct rk_context *platform)
+{
+ if (!platform->bEnablePd && platform->pd_gpu_0 && platform->pd_gpu_1) {
+ clk_prepare_enable(platform->pd_gpu_0);
+ clk_prepare_enable(platform->pd_gpu_1);
+ platform->bEnablePd = IMG_TRUE;
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to enable gpu_pd clock!"));
+ }
+}
+
+static void RgxDisablePower(struct rk_context *platform)
+{
+ if (platform->bEnablePd && platform->pd_gpu_0 && platform->pd_gpu_1) {
+ clk_disable_unprepare(platform->pd_gpu_1);
+ clk_disable_unprepare(platform->pd_gpu_0);
+ platform->bEnablePd = IMG_FALSE;
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to enable gpu_pd clock!"));
+ }
+}
+#else
+static void RgxEnablePower(struct rk_context *platform)
+{
+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice;
+ if (!platform->bEnablePd) {
+ pm_runtime_get_sync(dev);
+ platform->bEnablePd = IMG_TRUE;
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to enable gpu_pd clock!"));
+ }
+}
+
+static void RgxDisablePower(struct rk_context *platform)
+{
+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice;
+
+ if (platform->bEnablePd) {
+ pm_runtime_put(dev);
+ platform->bEnablePd = IMG_FALSE;
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to enable gpu_pd clock!"));
+ }
+}
+#endif
+#endif //end of OPEN_GPU_PD
+
+void RgxResume(struct rk_context *platform)
+{
+#if OPEN_GPU_PD
+ RgxEnablePower(platform);
+#endif
+ RgxEnableClock(platform);
+ }
+
+void RgxSuspend(struct rk_context *platform)
+{
+ RgxDisableClock(platform);
+#if OPEN_GPU_PD
+ RgxDisablePower(platform);
+#endif
+
+}
+
+PVRSRV_ERROR RkPrePowerState(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced)
+{
+ struct rk_context *platform = (struct rk_context *)hSysData;
+
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON)
+ RgxResume(platform);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RkPostPowerState(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced)
+{
+ struct rk_context *platform = (struct rk_context *)hSysData;
+
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ RgxSuspend(platform);
+ return PVRSRV_OK;
+}
+
+struct rk_context *RgxRkInit(PVRSRV_DEVICE_CONFIG *dev_config)
+{
+ struct device *dev = (struct device *)dev_config->pvOSDevice;
+ struct rk_context *platform;
+
+ platform = devm_kzalloc(dev, sizeof(struct rk_context), GFP_KERNEL);
+ g_platform = platform;
+ if (NULL == platform) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to kzalloc rk_context"));
+ return NULL;
+ }
+
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: dma_mask = %llx", __func__, dev->coherent_dma_mask));
+
+#if RK_TF_VERSION
+ rk_tf_check_version();
+#endif
+
+ platform->dev_config = dev_config;
+
+#if OPEN_GPU_PD
+ platform->bEnablePd = IMG_FALSE;
+#endif
+ platform->cmu_pmu_status = 0;
+
+ spin_lock_init(&platform->cmu_pmu_lock);
+ spin_lock_init(&platform->timer_lock);
+
+#if OPEN_GPU_PD
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ platform->pd_gpu_0 = devm_clk_get(dev, "pd_gpu_0");
+ if (IS_ERR_OR_NULL(platform->pd_gpu_0)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find pd_gpu_0 clock source"));
+ goto fail0;
+ }
+
+ platform->pd_gpu_1 = devm_clk_get(dev, "pd_gpu_1");
+ if (IS_ERR_OR_NULL(platform->pd_gpu_1)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find pd_gpu_1 clock source"));
+ goto fail1;
+ }
+#else
+ pm_runtime_enable(dev);
+#endif
+#endif
+
+ platform->aclk_gpu_mem = devm_clk_get(dev, "aclk_gpu_mem");
+ if (IS_ERR_OR_NULL(platform->aclk_gpu_mem)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find aclk_gpu_mem clock source"));
+ goto fail2;
+ }
+
+ platform->aclk_gpu_cfg = devm_clk_get(dev, "aclk_gpu_cfg");
+ if (IS_ERR_OR_NULL(platform->aclk_gpu_cfg)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find aclk_gpu_cfg clock source"));
+ goto fail3;
+ }
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ platform->gpu_clk_node = clk_get_dvfs_node("clk_gpu");
+ if (IS_ERR_OR_NULL(platform->gpu_clk_node))
+ {
+ platform->dvfs_enabled = IMG_FALSE;
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: GPU Dvfs is disabled"));
+ platform->clk_gpu = devm_clk_get(dev, "clk_gpu");
+ if (IS_ERR_OR_NULL(platform->clk_gpu)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find clk_gpu clock source"));
+ goto fail4;
+ } else {
+ rk33_clk_set_normal_node(platform->clk_gpu, RK33_DEFAULT_CLOCK);
+ }
+ } else {
+#if RK33_DVFS_SUPPORT
+ rk33_dvfs_init(platform);
+#if RK33_SYSFS_FILE_SUPPORT
+ /* create sysfs file node */
+ rk_create_sysfs_file(platform);
+#endif
+#endif /* end of RK33_DVFS_SUPPORT */
+ platform->dvfs_enabled = IMG_TRUE;
+ rk33_clk_set_dvfs_node(platform->gpu_clk_node, RK33_DEFAULT_CLOCK);
+ }
+#else
+ platform->sclk_gpu_core = devm_clk_get(dev, "sclk_gpu_core");
+ if (IS_ERR_OR_NULL(platform->sclk_gpu_core)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find sclk_gpu_core clock source"));
+ goto fail4;
+ }
+
+ platform->gpu_reg = devm_regulator_get_optional(dev, "logic");
+ if (!IS_ERR_OR_NULL(platform->gpu_reg)) {
+ if (dev_pm_opp_of_add_table(dev)) {
+ platform->dvfs_enabled = IMG_FALSE;
+ PVR_DPF((PVR_DBG_ERROR, "Invalid operating-points in device tree."));
+ goto fail5;
+ } else {
+#if RK33_DVFS_SUPPORT
+ rk33_dvfs_init(platform);
+#if RK33_SYSFS_FILE_SUPPORT
+ /* create sysfs file node */
+ rk_create_sysfs_file(platform);
+#endif
+#endif /* end of RK33_DVFS_SUPPORT */
+ platform->dvfs_enabled = IMG_TRUE;
+ }
+ }
+ clk_set_rate(platform->sclk_gpu_core, RK33_DEFAULT_CLOCK * ONE_MHZ);
+#endif
+ clk_set_rate(platform->aclk_gpu_mem, RK33_DEFAULT_CLOCK * ONE_MHZ);
+ clk_set_rate(platform->aclk_gpu_cfg, RK33_DEFAULT_CLOCK * ONE_MHZ);
+
+ RgxResume(platform);
+
+ return platform;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+fail5:
+ devm_clk_put(dev, platform->sclk_gpu_core);
+ platform->sclk_gpu_core = NULL;
+#endif
+fail4:
+ devm_clk_put(dev, platform->aclk_gpu_cfg);
+ platform->aclk_gpu_cfg = NULL;
+fail3:
+ devm_clk_put(dev, platform->aclk_gpu_mem);
+ platform->aclk_gpu_mem = NULL;
+fail2:
+
+#if OPEN_GPU_PD && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ devm_clk_put(dev, platform->pd_gpu_1);
+ platform->pd_gpu_1 = NULL;
+fail1:
+ devm_clk_put(dev, platform->pd_gpu_0);
+ platform->pd_gpu_0 = NULL;
+fail0:
+ devm_kfree(dev, platform);
+ return NULL;
+#else
+ devm_kfree(dev, platform);
+ return NULL;
+#endif //end of OPEN_GPU_PD
+
+}
+
+void RgxRkUnInit(struct rk_context *platform)
+{
+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice;
+
+ RgxSuspend(platform);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (platform->gpu_clk_node) {
+ clk_put_dvfs_node(platform->gpu_clk_node);
+ platform->gpu_clk_node = NULL;
+ } else if (platform->clk_gpu) {
+ devm_clk_put(dev, platform->clk_gpu);
+ platform->clk_gpu = NULL;
+ }
+#else
+ if (platform->sclk_gpu_core) {
+ devm_clk_put(dev, platform->sclk_gpu_core);
+ platform->sclk_gpu_core = NULL;
+ }
+#endif
+
+ if (platform->aclk_gpu_cfg) {
+ devm_clk_put(dev, platform->aclk_gpu_cfg);
+ platform->aclk_gpu_cfg = NULL;
+ }
+ if (platform->aclk_gpu_mem) {
+ devm_clk_put(dev, platform->aclk_gpu_mem);
+ platform->aclk_gpu_mem = NULL;
+ }
+#if OPEN_GPU_PD
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (platform->pd_gpu_1) {
+ devm_clk_put(dev, platform->pd_gpu_1);
+ platform->pd_gpu_1 = NULL;
+ }
+ if (platform->pd_gpu_0) {
+ devm_clk_put(dev, platform->pd_gpu_0);
+ platform->pd_gpu_0 = NULL;
+ }
+#else
+ pm_runtime_disable(dev);
+#endif
+#endif
+
+ if (platform->dvfs_enabled) {
+#if RK33_DVFS_SUPPORT
+#if RK33_SYSFS_FILE_SUPPORT
+ rk_remove_sysfs_file(platform);
+#endif
+ rk33_dvfs_term(platform);
+#endif
+ }
+ devm_kfree(dev, platform);
+}
+
+
+#if defined(SUPPORT_ION)
+struct ion_device *g_psIonDev;
+
+PVRSRV_ERROR IonInit(void *phPrivateData)
+{
+ g_psIonDev = NULL;
+ return PVRSRV_OK;
+}
+
+struct ion_device *IonDevAcquire(void)
+{
+ return g_psIonDev;
+}
+
+void IonDevRelease(struct ion_device *psIonDev)
+{
+ /* Nothing to do, sanity check the pointer we're passed back */
+ PVR_ASSERT(psIonDev == g_psIonDev);
+}
+
+void IonDeinit(void)
+{
+ g_psIonDev = NULL;
+}
+#endif /* defined(SUPPORT_ION) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RK Initialisation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Initialisation routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RK_INIT__)
+#define __RK_INIT__
+
+#include <linux/hrtimer.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+#include "rgxdevice.h"
+#include "device.h"
+
+
+/****************************************************************************
+ Rockchip modifications
+*****************************************************************************/
+#define RK33_DVFS_SUPPORT 1 /* 1:DVFS on 0:DVFS off*/
+#define RK33_SYSFS_FILE_SUPPORT 1 /* 1:add information nodes in /sys/devices/ffa30000.gpu/ */
+#define RK33_DVFS_MODE 1
+
+/*RK33_USE_RGX_GET_GPU_UTIL and RK33_USE_CUSTOMER_GET_GPU_UTIL are mutually exclusive*/
+#define RK33_USE_RGX_GET_GPU_UTIL 1
+#define RK33_USE_CUSTOMER_GET_GPU_UTIL 0
+#define RK33_USE_CL_COUNT_UTILS 0
+#define OPEN_GPU_PD 1
+/*USE_KTHREAD and USE_HRTIMER are mutually exclusive*/
+#define USE_KTHREAD 0
+#define USE_HRTIMER 1
+#define RK_TF_VERSION 0
+#define USE_PVR_SPEED_CHANGE 0
+#define RK33_MAX_UTILIS 4
+#define RK33_DVFS_FREQ 50
+#define RK33_DEFAULT_CLOCK 400
+#define RK33_DVFS_FREQ_LIMIT 1
+#define RGX_DVFS_CURRENT_FREQ 0
+#define FPS_DEFAULT_GAP 300
+#define FPS_MAX_GAP 5000
+#define LIMIT_FPS 60
+#define LIMIT_FPS_POWER_SAVE 50
+#define ONE_KHZ 1000
+#define ONE_MHZ 1000000
+#define HZ_TO_MHZ(m) ((m) / ONE_MHZ)
+/* Conversion helpers for setting up high resolution timers */
+#define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime((x)*1000000U))
+#define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x))
+#define RGX_DVFS_LEVEL_INTERVAL 2
+/* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns
+ This gives a maximum period between samples of 2^(32+8)/100 ns = slightly under 11s.
+ Exceeding this will cause overflow */
+#define RK_PM_TIME_SHIFT 8
+#define RK_EXPORT_API(func) EXPORT_SYMBOL(func);
+
+
+typedef struct _rgx_dvfs_info {
+ IMG_UINT voltage;
+ IMG_UINT clock;
+ IMG_INT min_threshold;
+ IMG_INT max_threshold;
+ IMG_UINT64 time;
+ IMG_UINT coef;
+} rgx_dvfs_info;
+
+typedef struct _rgx_dvfs_status_type {
+ IMG_INT step;
+ IMG_INT utilisation;
+ IMG_UINT32 temperature;
+ IMG_UINT32 temperature_time;
+#if 0
+ IMG_INT upper_lock;
+ IMG_INT under_lock;
+#endif
+
+} rgx_dvfs_status;
+
+enum {
+ DBG_OFF = 0,
+ DBG_LOW,
+ DBG_HIGH,
+};
+
+struct rk_utilis {
+ IMG_INT utilis[RK33_MAX_UTILIS];
+ IMG_INT time_busys[RK33_MAX_UTILIS];
+ IMG_INT time_idles[RK33_MAX_UTILIS];
+};
+
+struct rk_context {
+ PVRSRV_DEVICE_CONFIG *dev_config;
+
+ /*Indicator if system clock to gpu is active*/
+ IMG_INT cmu_pmu_status;
+ /*cmd & pmu lock*/
+ spinlock_t cmu_pmu_lock;
+ /*Timer*/
+ spinlock_t timer_lock;
+
+#if OPEN_GPU_PD
+ IMG_BOOL bEnablePd;
+ struct clk *pd_gpu_0;
+ struct clk *pd_gpu_1;
+#endif
+ struct clk *aclk_gpu_mem;
+ struct clk *aclk_gpu_cfg;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ struct clk *clk_gpu;
+ struct dvfs_node *gpu_clk_node;
+#else
+ struct clk *sclk_gpu_core;
+ struct regulator *gpu_reg;
+#endif
+ RGXFWIF_GPU_UTIL_STATS sUtilStats;
+ IMG_BOOL gpu_active;
+ IMG_BOOL dvfs_enabled;
+
+#if RK33_DVFS_SUPPORT
+#if RK33_USE_CUSTOMER_GET_GPU_UTIL
+ ktime_t time_period_start;
+#endif
+
+ struct work_struct rgx_dvfs_work;
+
+ /*Temperature*/
+ IMG_UINT32 temperature;
+ IMG_UINT32 temperature_time;
+
+ /*timer*/
+#if USE_HRTIMER
+ struct hrtimer timer;
+#endif
+ IMG_BOOL timer_active;
+
+ /*dvfs kthread*/
+#if USE_KTHREAD
+ struct task_struct *dvfs_task;
+ wait_queue_head_t dvfs_wait;
+#endif
+
+ /*To calculate utilization for x sec */
+ IMG_INT freq_level;
+ IMG_INT freq;
+ IMG_INT time_tick;
+ struct rk_utilis stUtilis;
+ IMG_INT utilisation;
+ IMG_UINT32 time_busy;
+ IMG_UINT32 time_idle;
+
+#if RK33_USE_CL_COUNT_UTILS
+ IMG_UINT32 abs_load[4];
+#endif
+
+ /*sysfs things*/
+#if RK33_SYSFS_FILE_SUPPORT
+#if RK33_DVFS_FREQ_LIMIT
+ IMG_INT up_level;
+ IMG_INT down_level;
+#endif //end of RK33_DVFS_FREQ_LIMIT
+ IMG_INT debug_level;
+ struct hrtimer fps_timer;
+ IMG_UINT fps_gap;
+ IMG_INT fix_freq;
+#endif /*end of RK33_SYSFS_FILE_SUPPORT*/
+
+#endif /*end of RK33_DVFS_SUPPORT*/
+};
+
+struct rk_context *RgxRkInit(PVRSRV_DEVICE_CONFIG *dev_config);
+void RgxRkUnInit(struct rk_context *platform);
+void RgxResume(struct rk_context *platform);
+void RgxSuspend(struct rk_context *platform);
+
+IMG_BOOL rk33_dvfs_init(struct rk_context *platform);
+void rk33_dvfs_term(struct rk_context *platform);
+
+#if RK33_DVFS_MODE
+#define DVFS_MODE (0)
+#define PERFORMANCE_MODE (1)
+#define POWER_MODE (2)
+int set_freq_mode(int freq);
+#endif
+
+#if RK33_DVFS_SUPPORT && RK33_USE_RGX_GET_GPU_UTIL
+IMG_BOOL rk33_set_device_node(IMG_HANDLE hDevCookie);
+IMG_BOOL rk33_clear_device_node(void);
+#endif
+
+PVRSRV_ERROR IonInit(void *pvPrivateData);
+void IonDeinit(void);
+PVRSRV_ERROR RkPrePowerState(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+PVRSRV_ERROR RkPostPowerState(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+#endif /* __RK_INIT__ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+@Description System Configuration functions
+*/ /**************************************************************************/
+
+#if defined(SUPPORT_ION)
+#include "ion_sys.h"
+#endif /* defined(SUPPORT_ION) */
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+
+#include <linux/clkdev.h>
+#include <linux/hardirq.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/rockchip/dvfs.h>
+#include <linux/rockchip/common.h>
+#include "power.h"
+#include "rk_init_v2.h"
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+#include <linux/clk-private.h>
+#else
+#include <linux/clk-provider.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_opp.h>
+#endif
+#include <linux/devfreq_cooling.h>
+#include <linux/thermal.h>
+#include "rgxdevice.h"
+
+#if !defined(PVR_DVFS) && !defined(SUPPORT_PDVFS)
+typedef struct
+{
+ IMG_UINT32 ui32Volt;
+ IMG_UINT32 ui32Freq;
+} IMG_OPP;
+#endif
+
+static const IMG_OPP rkOPPTable[] =
+{
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+ { 925, 100000000},
+ { 925, 160000000},
+ { 1025, 266000000},
+ { 1075, 350000000},
+ { 1125, 400000000},
+ { 1200, 500000000},
+#else
+ { 925, 100000000},
+ { 925, 160000000},
+ { 1025, 266000000},
+ { 1075, 350000000},
+ { 1125, 400000000},
+ { 1200, 500000000},
+#endif
+};
+
+#define RGX_DVFS_STEP (sizeof(rkOPPTable) / sizeof(rkOPPTable[0]))
+
+
+#if defined(PVR_DVFS)
+ #define DEFAULT_MIN_VF_LEVEL 0
+#else
+ #define DEFAULT_MIN_VF_LEVEL 4
+#endif
+
+//static IMG_UINT32 min_vf_level_val = DEFAULT_MIN_VF_LEVEL;
+//static IMG_UINT32 max_vf_level_val = RGX_DVFS_STEP - 1;
+
+static struct rk_context *g_platform = NULL;
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+static int rk33_clk_set_normal_node(struct clk* node, unsigned long rate)
+{
+ int ret = 0;
+
+ if (!node) {
+ printk("rk33_clk_set_normal_node error \r\n");
+ ret = -1;
+ }
+ ret = clk_set_rate(node, rate);
+ if (ret)
+ printk("clk_set_rate error \r\n");
+
+ return ret;
+}
+
+static int rk33_clk_set_dvfs_node(struct dvfs_node *node, unsigned long rate)
+{
+ int ret = 0;
+
+ if (!node) {
+ printk("rk33_clk_set_dvfs_node error \r\n");
+ ret = -1;
+ }
+ ret = dvfs_clk_set_rate(node, rate);
+ if (ret)
+ printk("dvfs_clk_set_rate error \r\n");
+
+ return ret;
+}
+
+void rkSetFrequency(IMG_UINT32 ui32Frequency)
+{
+ int ret = 0;
+ unsigned long old_freq;
+ PVRSRV_ERROR err;
+
+ if (NULL == g_platform)
+ panic("oops");
+
+#if USE_PVR_SPEED_CHANGE
+ err = PVRSRVDevicePreClockSpeedChange(g_platform->dev_config->psDevNode, IMG_TRUE, NULL);
+ if (err != PVRSRV_OK) {
+ return;
+ }
+#endif
+
+ if (!g_platform->aclk_gpu_mem || !g_platform->aclk_gpu_cfg) {
+ printk("aclk_gpu_mem or aclk_gpu_cfg not init\n");
+ return;
+ }
+ if (!g_platform->gpu_clk_node && !g_platform->clk_gpu) {
+ pr_err("%s:clk_gpu & gpu_clk_node is null\n", __func__);
+ return;
+ }
+
+ if (g_platform->gpu_clk_node)
+ old_freq = clk_get_rate(g_platform->gpu_clk_node->clk);
+ else if (g_platform->clk_gpu)
+ old_freq = clk_get_rate(g_platform->clk_gpu);
+
+ if (old_freq > ui32Frequency) {
+ if (g_platform->gpu_clk_node)
+ rk33_clk_set_dvfs_node(g_platform->gpu_clk_node, ui32Frequency);
+ else if (g_platform->clk_gpu)
+ rk33_clk_set_normal_node(g_platform->clk_gpu, ui32Frequency);
+ }
+
+ rk33_clk_set_normal_node(g_platform->aclk_gpu_mem, ui32Frequency);
+ rk33_clk_set_normal_node(g_platform->aclk_gpu_cfg, ui32Frequency);
+
+ if (old_freq < ui32Frequency) {
+ if (g_platform->gpu_clk_node)
+ rk33_clk_set_dvfs_node(g_platform->gpu_clk_node, ui32Frequency);
+ else if (g_platform->clk_gpu)
+ rk33_clk_set_normal_node(g_platform->clk_gpu, ui32Frequency);
+ }
+
+#if USE_PVR_SPEED_CHANGE
+ PVRSRVDevicePostClockSpeedChange(g_platform->dev_config->psDevNode, IMG_TRUE, NULL);
+#endif
+
+}
+
+//undefine gpu_reg
+void rkSetVoltage(IMG_UINT32 ui32Volt)
+{
+ if (NULL == g_platform)
+ panic("oops");
+
+ if(regulator_set_voltage(g_platform->gpu_reg, ui32Volt, ui32Volt) != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to set gpu power voltage=%d!",ui32Volt));
+ }
+}
+#else
+void rkSetFrequency(IMG_UINT32 ui32Frequency)
+{
+ int ret = 0;
+ unsigned int old_freq, old_volt;
+
+ if (NULL == g_platform)
+ panic("oops");
+
+ old_freq = clk_get_rate(g_platform->sclk_gpu_core);
+ old_volt = regulator_get_voltage(g_platform->gpu_reg);
+
+ ret = clk_set_rate(g_platform->aclk_gpu_mem, ui32Frequency);
+ if (ret) {
+ PVR_DPF((PVR_DBG_ERROR, "failed to set aclk_gpu_mem rate: %d\n", ret));
+ if (old_volt > 0)
+ regulator_set_voltage(g_platform->gpu_reg, old_volt, INT_MAX);
+ return;
+ }
+ ret = clk_set_rate(g_platform->aclk_gpu_cfg, ui32Frequency);
+ if (ret) {
+ PVR_DPF((PVR_DBG_ERROR, "failed to set aclk_gpu_cfg rate: %d\n", ret));
+ clk_set_rate(g_platform->aclk_gpu_mem, old_freq);
+ if (old_volt > 0)
+ regulator_set_voltage(g_platform->gpu_reg, old_volt, INT_MAX);
+ return;
+ }
+ ret = clk_set_rate(g_platform->sclk_gpu_core, ui32Frequency);
+ if (ret) {
+ PVR_DPF((PVR_DBG_ERROR, "failed to set sclk_gpu_core rate: %d\n", ret));
+ clk_set_rate(g_platform->aclk_gpu_mem, old_freq);
+ clk_set_rate(g_platform->aclk_gpu_cfg, old_freq);
+ if (old_volt > 0)
+ regulator_set_voltage(g_platform->gpu_reg, old_volt, INT_MAX);
+ return;
+ }
+}
+
+void rkSetVoltage(IMG_UINT32 ui32Volt)
+{
+ if (NULL == g_platform)
+ panic("oops");
+
+ if(regulator_set_voltage(g_platform->gpu_reg, ui32Volt, INT_MAX) != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to set gpu power voltage=%d!",ui32Volt));
+ }
+}
+#endif
+
+
+static void RgxEnableClock(struct rk_context *platform)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (!platform->gpu_clk_node && !platform->clk_gpu)
+ {
+ printk("gpu_clk_node and clk_gpu are both null\n");
+ return;
+ }
+#else
+ if (!platform->sclk_gpu_core)
+ {
+ printk("sclk_gpu_core is null\n");
+ return;
+ }
+#endif
+
+ if (platform->aclk_gpu_mem && platform->aclk_gpu_cfg && !platform->gpu_active) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (platform->gpu_clk_node)
+ dvfs_clk_prepare_enable(platform->gpu_clk_node);
+ else if (platform->clk_gpu)
+ clk_prepare_enable(platform->clk_gpu);
+#else
+ clk_prepare_enable(platform->sclk_gpu_core);
+#endif
+ clk_prepare_enable(platform->aclk_gpu_mem);
+ clk_prepare_enable(platform->aclk_gpu_cfg);
+ platform->gpu_active = IMG_TRUE;
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to enable clock!"));
+ }
+}
+
+static void RgxDisableClock(struct rk_context *platform)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (!platform->gpu_clk_node && !platform->clk_gpu) {
+ printk("gpu_clk_node and clk_gpu are both null\n");
+ return;
+ }
+#else
+ if (!platform->sclk_gpu_core) {
+ printk("sclk_gpu_core is null");
+ return;
+ }
+#endif
+
+ if (platform->aclk_gpu_mem && platform->aclk_gpu_cfg && platform->gpu_active) {
+ clk_disable_unprepare(platform->aclk_gpu_cfg);
+ clk_disable_unprepare(platform->aclk_gpu_mem);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (platform->gpu_clk_node)
+ dvfs_clk_disable_unprepare(platform->gpu_clk_node);
+ else if (platform->clk_gpu)
+ clk_disable_unprepare(platform->clk_gpu);
+#else
+ clk_disable_unprepare(platform->sclk_gpu_core);
+#endif
+ platform->gpu_active = IMG_FALSE;
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to disable clock!"));
+ }
+}
+
+
+#if OPEN_GPU_PD
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+/*
+ * The power management
+ * software must power down pd_gpu_1 before power down pd_gpu_0,
+ * and power up pd_gpu_1 after power up pd_gpu_0.
+ */
+static void RgxEnablePower(struct rk_context *platform)
+{
+ if (!platform->bEnablePd && platform->pd_gpu_0 && platform->pd_gpu_1) {
+ clk_prepare_enable(platform->pd_gpu_0);
+ clk_prepare_enable(platform->pd_gpu_1);
+ platform->bEnablePd = IMG_TRUE;
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to enable gpu_pd clock!"));
+ }
+}
+
+static void RgxDisablePower(struct rk_context *platform)
+{
+ if (platform->bEnablePd && platform->pd_gpu_0 && platform->pd_gpu_1) {
+ clk_disable_unprepare(platform->pd_gpu_1);
+ clk_disable_unprepare(platform->pd_gpu_0);
+ platform->bEnablePd = IMG_FALSE;
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to enable gpu_pd clock!"));
+ }
+}
+#else
+static void RgxEnablePower(struct rk_context *platform)
+{
+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice;
+ if (!platform->bEnablePd) {
+ pm_runtime_get_sync(dev);
+ platform->bEnablePd = IMG_TRUE;
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to enable gpu_pd clock!"));
+ }
+}
+
+static void RgxDisablePower(struct rk_context *platform)
+{
+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice;
+ if (platform->bEnablePd) {
+ pm_runtime_put(dev);
+ platform->bEnablePd = IMG_FALSE;
+ } else {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to enable gpu_pd clock!"));
+ }
+}
+#endif
+#endif //end of OPEN_GPU_PD
+
+void RgxResume(struct rk_context *platform)
+{
+#if OPEN_GPU_PD
+ RgxEnablePower(platform);
+#endif
+ RgxEnableClock(platform);
+ }
+
+void RgxSuspend(struct rk_context *platform)
+{
+ RgxDisableClock(platform);
+#if OPEN_GPU_PD
+ RgxDisablePower(platform);
+#endif
+
+}
+
+PVRSRV_ERROR RkPrePowerState(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced)
+{
+ struct rk_context *platform = (struct rk_context *)hSysData;
+
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON)
+ RgxResume(platform);
+ return PVRSRV_OK;
+
+}
+
+PVRSRV_ERROR RkPostPowerState(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced)
+{
+ struct rk_context *platform = (struct rk_context *)hSysData;
+
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ RgxSuspend(platform);
+ return PVRSRV_OK;
+}
+
+#if defined(CONFIG_DEVFREQ_THERMAL) && defined(PVR_DVFS)
+/*
+ * This model is primarily designed for the Juno platform. It may not be
+ * suitable for other platforms.
+ */
+
+#define FALLBACK_STATIC_TEMPERATURE 55000
+
+static u32 dynamic_coefficient;
+static u32 static_coefficient;
+static s32 ts[4];
+static struct thermal_zone_device *gpu_tz;
+
+static unsigned long model_static_power(unsigned long voltage)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+ unsigned long temperature;
+#else
+ int temperature;
+#endif
+ unsigned long temp;
+ unsigned long temp_squared, temp_cubed, temp_scaling_factor;
+ const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10;
+
+ if (gpu_tz) {
+ int ret;
+
+ ret = gpu_tz->ops->get_temp(gpu_tz, &temperature);
+ if (ret) {
+ pr_warn_ratelimited("Error reading temperature for gpu thermal zone: %d\n",
+ ret);
+ temperature = FALLBACK_STATIC_TEMPERATURE;
+ }
+ } else {
+ temperature = FALLBACK_STATIC_TEMPERATURE;
+ }
+
+ /* Calculate the temperature scaling factor. To be applied to the
+ * voltage scaled power.
+ */
+ temp = temperature / 1000;
+ temp_squared = temp * temp;
+ temp_cubed = temp_squared * temp;
+ temp_scaling_factor =
+ (ts[3] * temp_cubed)
+ + (ts[2] * temp_squared)
+ + (ts[1] * temp)
+ + ts[0];
+
+ return (((static_coefficient * voltage_cubed) >> 20)
+ * temp_scaling_factor)
+ / 1000000;
+}
+
+static unsigned long model_dynamic_power(unsigned long freq,
+ unsigned long voltage)
+{
+ /* The inputs: freq (f) is in Hz, and voltage (v) in mV.
+ * The coefficient (c) is in mW/(MHz mV mV).
+ *
+ * This function calculates the dynamic power after this formula:
+ * Pdyn (mW) = c (mW/(MHz*mV*mV)) * v (mV) * v (mV) * f (MHz)
+ */
+ const unsigned long v2 = (voltage * voltage) / 1000; /* m*(V*V) */
+ const unsigned long f_mhz = freq / 1000000; /* MHz */
+
+ return (dynamic_coefficient * v2 * f_mhz) / 1000000; /* mW */
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+struct devfreq_cooling_ops rk_power_model_simple_ops = {
+#else
+struct devfreq_cooling_power rk_power_model_simple_ops = {
+#endif
+ .get_static_power = model_static_power,
+ .get_dynamic_power = model_dynamic_power,
+};
+
+int rk_power_model_simple_init(struct device *dev)
+{
+ struct device_node *power_model_node;
+ const char *tz_name;
+ u32 static_power, dynamic_power;
+ u32 voltage, voltage_squared, voltage_cubed, frequency;
+
+ power_model_node = of_get_child_by_name(dev->of_node,
+ "power_model");
+ if (!power_model_node) {
+ dev_err(dev, "could not find power_model node\n");
+ return -ENODEV;
+ }
+ if (!of_device_is_compatible(power_model_node,
+ "arm,mali-simple-power-model")) {
+ dev_err(dev, "power_model incompatible with simple power model\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_string(power_model_node, "thermal-zone",
+ &tz_name)) {
+ dev_err(dev, "ts in power_model not available\n");
+ return -EINVAL;
+ }
+
+ gpu_tz = thermal_zone_get_zone_by_name(tz_name);
+ if (IS_ERR(gpu_tz)) {
+ pr_warn_ratelimited("Error getting gpu thermal zone (%ld), not yet ready?\n",
+ PTR_ERR(gpu_tz));
+ gpu_tz = NULL;
+
+ return -EPROBE_DEFER;
+ }
+
+ if (of_property_read_u32(power_model_node, "static-power",
+ &static_power)) {
+ dev_err(dev, "static-power in power_model not available\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32(power_model_node, "dynamic-power",
+ &dynamic_power)) {
+ dev_err(dev, "dynamic-power in power_model not available\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32(power_model_node, "voltage",
+ &voltage)) {
+ dev_err(dev, "voltage in power_model not available\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32(power_model_node, "frequency",
+ &frequency)) {
+ dev_err(dev, "frequency in power_model not available\n");
+ return -EINVAL;
+ }
+ voltage_squared = (voltage * voltage) / 1000;
+ voltage_cubed = voltage * voltage * voltage;
+ static_coefficient = (static_power << 20) / (voltage_cubed >> 10);
+ dynamic_coefficient = (((dynamic_power * 1000) / voltage_squared)
+ * 1000) / frequency;
+
+ if (of_property_read_u32_array(power_model_node, "ts", (u32 *)ts, 4)) {
+ dev_err(dev, "ts in power_model not available\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+void RgxRkUnInit(struct rk_context *platform)
+{
+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice;
+
+ RgxSuspend(platform);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (platform->gpu_clk_node) {
+ clk_put_dvfs_node(platform->gpu_clk_node);
+ platform->gpu_clk_node = NULL;
+ } else if (platform->clk_gpu) {
+ devm_clk_put(dev, platform->clk_gpu);
+ platform->clk_gpu = NULL;
+ }
+#else
+ if (platform->sclk_gpu_core) {
+ devm_clk_put(dev, platform->sclk_gpu_core);
+ platform->sclk_gpu_core = NULL;
+ }
+#endif
+
+ if (platform->aclk_gpu_cfg) {
+ devm_clk_put(dev, platform->aclk_gpu_cfg);
+ platform->aclk_gpu_cfg = NULL;
+ }
+ if (platform->aclk_gpu_mem) {
+ devm_clk_put(dev, platform->aclk_gpu_mem);
+ platform->aclk_gpu_mem = NULL;
+ }
+#if OPEN_GPU_PD
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ if (platform->pd_gpu_1) {
+ devm_clk_put(dev, platform->pd_gpu_1);
+ platform->pd_gpu_1 = NULL;
+ }
+ if (platform->pd_gpu_0) {
+ devm_clk_put(dev, platform->pd_gpu_0);
+ platform->pd_gpu_0 = NULL;
+ }
+#else
+ pm_runtime_disable(dev);
+#endif
+#endif
+ devm_kfree(dev, platform);
+
+}
+
+struct rk_context *RgxRkInit(PVRSRV_DEVICE_CONFIG* psDevConfig)
+{
+ struct device *dev = (struct device *)psDevConfig->pvOSDevice;
+ struct rk_context *platform;
+ RGX_DATA* psRGXData = (RGX_DATA*)psDevConfig->hDevData;
+
+ platform = devm_kzalloc(dev, sizeof(struct rk_context), GFP_KERNEL);
+ if (NULL == platform) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to kzalloc rk_context"));
+ return NULL;
+ }
+
+ g_platform = platform;
+
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: dma_mask = %llx", __func__, dev->coherent_dma_mask));
+
+ platform->dev_config = psDevConfig;
+ platform->gpu_active = IMG_FALSE;
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+ //psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = rkOPPTable;
+ //psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = RGX_DVFS_STEP;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = rkSetFrequency;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = rkSetVoltage;
+#if defined(CONFIG_DEVFREQ_THERMAL)
+ psDevConfig->sDVFS.sDVFSDeviceCfg.psPowerOps = &rk_power_model_simple_ops;
+#endif
+#endif
+
+#if OPEN_GPU_PD
+ platform->bEnablePd = IMG_FALSE;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ platform->pd_gpu_0 = devm_clk_get(dev, "pd_gpu_0");
+ if (IS_ERR_OR_NULL(platform->pd_gpu_0)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find pd_gpu_0 clock source"));
+ goto fail0;
+ }
+
+ platform->pd_gpu_1 = devm_clk_get(dev, "pd_gpu_1");
+ if (IS_ERR_OR_NULL(platform->pd_gpu_1)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find pd_gpu_1 clock source"));
+ goto fail1;
+ }
+#else
+ pm_runtime_enable(dev);
+#endif
+#endif
+
+ platform->aclk_gpu_mem = devm_clk_get(dev, "aclk_gpu_mem");
+ if (IS_ERR_OR_NULL(platform->aclk_gpu_mem)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find aclk_gpu_mem clock source"));
+ goto fail2;
+ }
+
+ platform->aclk_gpu_cfg = devm_clk_get(dev, "aclk_gpu_cfg");
+ if (IS_ERR_OR_NULL(platform->aclk_gpu_cfg)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find aclk_gpu_cfg clock source"));
+ goto fail3;
+ }
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ platform->gpu_clk_node = clk_get_dvfs_node("clk_gpu");
+ if (IS_ERR_OR_NULL(platform->gpu_clk_node))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: GPU Dvfs is disabled"));
+ platform->clk_gpu = devm_clk_get(dev, "clk_gpu");
+ if (IS_ERR_OR_NULL(platform->clk_gpu)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find clk_gpu clock source"));
+ goto fail4;
+ } else {
+ rk33_clk_set_normal_node(platform->clk_gpu, RK33_DEFAULT_CLOCK);
+ }
+ } else {
+ rk33_clk_set_dvfs_node(platform->gpu_clk_node, RK33_DEFAULT_CLOCK);
+ }
+#else
+ platform->sclk_gpu_core = devm_clk_get(dev, "sclk_gpu_core");
+ if (IS_ERR_OR_NULL(platform->sclk_gpu_core)) {
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to find sclk_gpu_core clock source"));
+ goto fail4;
+ }
+
+ platform->gpu_reg = devm_regulator_get_optional(dev, "logic");
+ if (IS_ERR_OR_NULL(platform->gpu_reg)) {
+ /*if (dev_pm_opp_of_add_table(dev)) {
+
+ } else {
+ }*/
+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: devm_regulator_get_optional failed."));
+ goto fail5;
+ }
+
+ clk_set_rate(platform->sclk_gpu_core, RK33_DEFAULT_CLOCK * ONE_MHZ);
+
+ if(psRGXData && psRGXData->psRGXTimingInfo)
+ {
+ psRGXData->psRGXTimingInfo->ui32CoreClockSpeed = clk_get_rate(platform->sclk_gpu_core);
+ psRGXData->psRGXTimingInfo->ui32CoreVoltage = regulator_get_voltage(platform->gpu_reg);
+ }
+#endif
+ clk_set_rate(platform->aclk_gpu_mem, RK33_DEFAULT_CLOCK * ONE_MHZ);
+ clk_set_rate(platform->aclk_gpu_cfg, RK33_DEFAULT_CLOCK * ONE_MHZ);
+
+
+
+ (void) RkPrePowerState(platform,
+ PVRSRV_DEV_POWER_STATE_ON,
+ PVRSRV_DEV_POWER_STATE_DEFAULT,
+ IMG_FALSE);
+
+ RgxResume(platform);
+
+ return platform;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+fail5:
+ devm_clk_put(dev, platform->sclk_gpu_core);
+ platform->sclk_gpu_core = NULL;
+#endif
+fail4:
+ devm_clk_put(dev, platform->aclk_gpu_cfg);
+ platform->aclk_gpu_cfg = NULL;
+fail3:
+ devm_clk_put(dev, platform->aclk_gpu_mem);
+ platform->aclk_gpu_mem = NULL;
+fail2:
+
+#if OPEN_GPU_PD && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ devm_clk_put(dev, platform->pd_gpu_1);
+ platform->pd_gpu_1 = NULL;
+fail1:
+ devm_clk_put(dev, platform->pd_gpu_0);
+ platform->pd_gpu_0 = NULL;
+fail0:
+ devm_kfree(dev, platform);
+ return NULL;
+#else
+ devm_kfree(dev, platform);
+ return NULL;
+#endif //end of OPEN_GPU_PD
+
+}
+
+
+#if defined(SUPPORT_ION)
+struct ion_device *g_psIonDev;
+
+PVRSRV_ERROR IonInit(void *phPrivateData)
+{
+ g_psIonDev = NULL;
+ return PVRSRV_OK;
+}
+
+struct ion_device *IonDevAcquire(void)
+{
+ return g_psIonDev;
+}
+
+void IonDevRelease(struct ion_device *psIonDev)
+{
+ /* Nothing to do, sanity check the pointer we're passed back */
+ PVR_ASSERT(psIonDev == g_psIonDev);
+}
+
+void IonDeinit(void)
+{
+ g_psIonDev = NULL;
+}
+#endif /* defined(SUPPORT_ION) */
+
--- /dev/null
+/*************************************************************************/ /*!
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+@Description System Configuration functions
+*/ /**************************************************************************/
+
+#if !defined(__SUNXI_INIT__)
+#define __SUNXI_INIT__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_device.h"
+#include "servicesext.h"
+#include <linux/version.h>
+
+#define OPEN_GPU_PD 1
+#define RK33_DEFAULT_CLOCK 400
+#define ONE_KHZ 1000
+#define ONE_MHZ 1000000
+#define HZ_TO_MHZ(m) ((m) / ONE_MHZ)
+
+struct rk_context {
+ PVRSRV_DEVICE_CONFIG *dev_config;
+#if OPEN_GPU_PD
+ IMG_BOOL bEnablePd;
+ struct clk *pd_gpu_0;
+ struct clk *pd_gpu_1;
+#endif
+ struct clk *aclk_gpu_mem;
+ struct clk *aclk_gpu_cfg;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ struct clk *clk_gpu;
+ struct dvfs_node *gpu_clk_node;
+#else
+ struct clk *sclk_gpu_core;
+ struct regulator *gpu_reg;
+#endif
+
+ /*To calculate utilization for x sec */
+ IMG_BOOL gpu_active;
+};
+
+#if defined(CONFIG_DEVFREQ_THERMAL) && defined(PVR_DVFS)
+int rk_power_model_simple_init(struct device *dev);
+#endif
+
+long int GetConfigFreq(void);
+IMG_UINT32 AwClockFreqGet(IMG_HANDLE hSysData);
+struct rk_context * RgxRkInit(PVRSRV_DEVICE_CONFIG* psDevConfig);
+void RgxRkUnInit(struct rk_context *platform);
+void RgxResume(struct rk_context *platform);
+void RgxSuspend(struct rk_context *platform);
+PVRSRV_ERROR RkPrePowerState(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+PVRSRV_ERROR RkPostPowerState(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+void rkSetFrequency(IMG_UINT32 ui32Frequency);
+void rkSetVoltage(IMG_UINT32 ui32Voltage);
+#endif /* __SUNXI_INIT__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "interrupt_support.h"
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#include "sysconfig.h"
+#include "physheap.h"
+#if defined(SUPPORT_ION)
+#include "ion_support.h"
+#endif
+#include "rk_init_v2.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+#include <linux/platform_device.h>
+extern struct platform_device *gpsPVRLDMDev;
+#endif
+
+static RGX_TIMING_INFORMATION gsRGXTimingInfo;
+static RGX_DATA gsRGXData;
+static PVRSRV_DEVICE_CONFIG gsDevices[1];
+
+static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs;
+#if defined(TDMETACODE)
+static PHYS_HEAP_CONFIG gsPhysHeapConfig[3];
+#else
+static PHYS_HEAP_CONFIG gsPhysHeapConfig[1];
+#endif
+
+/*
+ CPU to Device physical address translation
+*/
+static
+void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+ }
+ }
+}
+
+/*
+ Device to CPU physical address translation
+*/
+static
+void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr;
+ }
+ }
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+ if (gsDevices[0].pvOSDevice)
+ {
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ /*
+ * Setup information about physical memory heap(s) we have
+ */
+ gsPhysHeapFuncs.pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr;
+ gsPhysHeapFuncs.pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr;
+
+ gsPhysHeapConfig[0].ui32PhysHeapID = 0;
+ gsPhysHeapConfig[0].pszPDumpMemspaceName = "SYSMEM";
+ gsPhysHeapConfig[0].eType = PHYS_HEAP_TYPE_UMA;
+ gsPhysHeapConfig[0].psMemFuncs = &gsPhysHeapFuncs;
+ gsPhysHeapConfig[0].hPrivData = NULL;
+
+#if defined(TDMETACODE)
+ gsPhysHeapConfig[1].ui32PhysHeapID = 1;
+ gsPhysHeapConfig[1].pszPDumpMemspaceName = "TDMETACODEMEM";
+ gsPhysHeapConfig[1].eType = PHYS_HEAP_TYPE_UMA;
+ gsPhysHeapConfig[1].psMemFuncs = &gsPhysHeapFuncs;
+ gsPhysHeapConfig[1].hPrivData = NULL;
+
+ gsPhysHeapConfig[2].ui32PhysHeapID = 2;
+ gsPhysHeapConfig[2].pszPDumpMemspaceName = "TDSECUREBUFMEM";
+ gsPhysHeapConfig[2].eType = PHYS_HEAP_TYPE_UMA;
+ gsPhysHeapConfig[2].psMemFuncs = &gsPhysHeapFuncs;
+ gsPhysHeapConfig[2].hPrivData = NULL;
+#endif
+
+ /*
+ * Setup RGX specific timing data
+ */
+ gsRGXTimingInfo.ui32CoreClockSpeed = RGX_RK_CORE_CLOCK_SPEED;
+ gsRGXTimingInfo.bEnableActivePM = IMG_TRUE;
+ gsRGXTimingInfo.bEnableRDPowIsland = IMG_FALSE;
+ gsRGXTimingInfo.ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+ /*
+ * Setup RGX specific data
+ */
+ gsRGXData.psRGXTimingInfo = &gsRGXTimingInfo;
+#if defined(TDMETACODE)
+ gsRGXData.bHasTDMetaCodePhysHeap = IMG_TRUE;
+ gsRGXData.uiTDMetaCodePhysHeapID = 1;
+
+ gsRGXData.bHasTDSecureBufPhysHeap = IMG_TRUE;
+ gsRGXData.uiTDSecureBufPhysHeapID = 2;
+#endif
+
+ /*
+ * Setup RGX device
+ */
+ gsDevices[0].pvOSDevice = pvOSDevice;
+ gsDevices[0].pszName = "rk3368";
+
+ /* Device setup information */
+ gsDevices[0].sRegsCpuPBase.uiAddr = RK_GPU_PBASE;
+ gsDevices[0].ui32RegsSize = RK_GPU_SIZE;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ gsDevices[0].ui32IRQ = RK_IRQ_GPU;
+#else
+ gsDevices[0].ui32IRQ = platform_get_irq(gpsPVRLDMDev, 0);
+#endif
+
+
+ /* Device's physical heaps */
+ gsDevices[0].pasPhysHeaps = &gsPhysHeapConfig[0];
+ gsDevices[0].ui32PhysHeapCount = IMG_ARR_NUM_ELEMS(gsPhysHeapConfig);
+
+ /* Device's physical heap IDs */
+ gsDevices[0].aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = 0;
+ gsDevices[0].aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = 0;
+
+ gsDevices[0].eBIFTilingMode = geBIFTilingMode;
+ gsDevices[0].pui32BIFTilingHeapConfigs = gauiBIFTilingHeapXStrides;
+ gsDevices[0].ui32BIFTilingHeapCount = IMG_ARR_NUM_ELEMS(gauiBIFTilingHeapXStrides);
+
+ /* No power management on RK system */
+ gsDevices[0].pfnPrePowerState = RkPrePowerState;
+ gsDevices[0].pfnPostPowerState = RkPostPowerState;
+
+ /* No clock frequency either */
+ gsDevices[0].pfnClockFreqGet = NULL;
+
+ gsDevices[0].pfnCheckMemAllocSize = NULL;
+
+ gsDevices[0].hDevData = &gsRGXData;
+
+ /* Rk Init */
+ gsDevices[0].hSysData = (IMG_HANDLE)RgxRkInit(&gsDevices[0]);
+ if (!gsDevices[0].hSysData)
+ {
+ gsDevices[0].pvOSDevice = NULL;
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+#if defined(PVR_DVFS)
+ gsDevices[0].sDVFS.sDVFSDeviceCfg.ui32PollMs = 100;
+ gsDevices[0].sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_FALSE;
+
+ gsDevices[0].sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90;
+ gsDevices[0].sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10;
+#endif
+
+ /* Setup other system specific stuff */
+#if defined(SUPPORT_ION)
+ IonInit(NULL);
+#endif
+
+ *ppsDevConfig = &gsDevices[0];
+
+ return PVRSRV_OK;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+
+ /* Rk UnInit */
+ RgxRkUnInit(psDevConfig->hSysData);
+ psDevConfig->hSysData = NULL;
+
+#if defined(SUPPORT_ION)
+ IonDeinit();
+#endif
+
+ psDevConfig->pvOSDevice = NULL;
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+
+ return OSInstallSystemLISR(phLISRData, ui32IRQ, pszName, pfnLISR, pvData,
+ SYS_IRQ_FLAG_TRIGGER_DEFAULT);
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ return OSUninstallSystemLISR(hLISRData);
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+
+ return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (sysconfig.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+
+#if !defined(__SYSCCONFIG_H__)
+#define __SYSCCONFIG_H__
+
+//zxl:rk special data
+#define RGX_RK_CORE_CLOCK_SPEED (100*1000*1000)
+#define RK_GPU_PBASE 0xffa30000
+#define RK_GPU_SIZE 0x10000
+#define RK_IRQ_GPU 40
+
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (100)
+
+/* BIF Tiling mode configuration */
+static RGXFWIF_BIFTILINGMODE geBIFTilingMode = RGXFWIF_BIFTILINGMODE_256x16;
+
+/* default BIF tiling heap x-stride configurations. */
+static IMG_UINT32 gauiBIFTilingHeapXStrides[RGXFWIF_NUM_BIF_TILING_CONFIGS] =
+{
+ 0, /* BIF tiling heap 1 x-stride */
+ 1, /* BIF tiling heap 2 x-stride */
+ 2, /* BIF tiling heap 3 x-stride */
+ 3 /* BIF tiling heap 4 x-stride */
+};
+
+/*****************************************************************************
+ * system specific data structures
+ *****************************************************************************/
+
+#endif /* __SYSCCONFIG_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#define MAX_HW_TIME_US (500000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT (10000)
+
+#define SYS_RGX_OF_COMPATIBLE "arm,rogue-G6110"
+
+#if defined(__linux__)
+#define SYS_RGX_DEV_NAME "rgxrk3368"
+#endif
+
+#endif /* !defined(__SYSINFO_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rogue
+
+#if !defined(_ROGUE_TRACE_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ROGUE_TRACE_EVENTS_H
+
+#include <linux/tracepoint.h>
+#include <linux/time.h>
+
+#define show_secs_from_ns(ns) \
+ ({ \
+ u64 t = ns + (NSEC_PER_USEC / 2); \
+ do_div(t, NSEC_PER_SEC); \
+ t; \
+ })
+
+#define show_usecs_from_ns(ns) \
+ ({ \
+ u64 t = ns + (NSEC_PER_USEC / 2) ; \
+ u32 rem; \
+ do_div(t, NSEC_PER_USEC); \
+ rem = do_div(t, USEC_PER_SEC); \
+ })
+
+void trace_fence_update_enabled_callback(void);
+void trace_fence_update_disabled_callback(void);
+
+TRACE_EVENT_FN(rogue_fence_update,
+
+ TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+ u32 sync_fwaddr, u32 sync_value),
+
+ TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+
+ TP_STRUCT__entry(
+ __string( comm, comm )
+ __string( cmd, cmd )
+ __string( dm, dm )
+ __field( u32, ctx_id )
+ __field( u32, offset )
+ __field( u32, sync_fwaddr )
+ __field( u32, sync_value )
+ ),
+
+ TP_fast_assign(
+ __assign_str(comm, comm);
+ __assign_str(cmd, cmd);
+ __assign_str(dm, dm);
+ __entry->ctx_id = ctx_id;
+ __entry->offset = offset;
+ __entry->sync_fwaddr = sync_fwaddr;
+ __entry->sync_value = sync_value;
+ ),
+
+ TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+ __get_str(comm),
+ __get_str(cmd),
+ __get_str(dm),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->offset,
+ (unsigned long)__entry->sync_fwaddr,
+ (unsigned long)__entry->sync_value),
+
+ trace_fence_update_enabled_callback,
+ trace_fence_update_disabled_callback
+);
+
+void trace_fence_check_enabled_callback(void);
+void trace_fence_check_disabled_callback(void);
+
+TRACE_EVENT_FN(rogue_fence_check,
+
+ TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+ u32 sync_fwaddr, u32 sync_value),
+
+ TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+
+ TP_STRUCT__entry(
+ __string( comm, comm )
+ __string( cmd, cmd )
+ __string( dm, dm )
+ __field( u32, ctx_id )
+ __field( u32, offset )
+ __field( u32, sync_fwaddr )
+ __field( u32, sync_value )
+ ),
+
+ TP_fast_assign(
+ __assign_str(comm, comm);
+ __assign_str(cmd, cmd);
+ __assign_str(dm, dm);
+ __entry->ctx_id = ctx_id;
+ __entry->offset = offset;
+ __entry->sync_fwaddr = sync_fwaddr;
+ __entry->sync_value = sync_value;
+ ),
+
+ TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+ __get_str(comm),
+ __get_str(cmd),
+ __get_str(dm),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->offset,
+ (unsigned long)__entry->sync_fwaddr,
+ (unsigned long)__entry->sync_value),
+
+ trace_fence_check_enabled_callback,
+ trace_fence_check_disabled_callback
+);
+
+TRACE_EVENT(rogue_create_fw_context,
+
+ TP_PROTO(const char *comm, const char *dm, u32 ctx_id),
+
+ TP_ARGS(comm, dm, ctx_id),
+
+ TP_STRUCT__entry(
+ __string( comm, comm )
+ __string( dm, dm )
+ __field( u32, ctx_id )
+ ),
+
+ TP_fast_assign(
+ __assign_str(comm, comm);
+ __assign_str(dm, dm);
+ __entry->ctx_id = ctx_id;
+ ),
+
+ TP_printk("comm=%s dm=%s ctx_id=%lu",
+ __get_str(comm),
+ __get_str(dm),
+ (unsigned long)__entry->ctx_id)
+);
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+void PVRGpuTraceEnableUfoCallback(void);
+void PVRGpuTraceDisableUfoCallback(void);
+
+TRACE_EVENT_FN(rogue_ufo_update,
+
+ TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr,
+ u32 old_value, u32 new_value),
+
+ TP_ARGS(timestamp, ctx_id, job_id, fwaddr, old_value, new_value),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __field( u32, ctx_id )
+ __field( u32, job_id )
+ __field( u32, fwaddr )
+ __field( u32, old_value )
+ __field( u32, new_value )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->ctx_id = ctx_id;
+ __entry->job_id = job_id;
+ __entry->fwaddr = fwaddr;
+ __entry->old_value = old_value;
+ __entry->new_value = new_value;
+ ),
+
+ TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx "
+ "old_value=%#lx new_value=%#lx",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->job_id,
+ (unsigned long)__entry->fwaddr,
+ (unsigned long)__entry->old_value,
+ (unsigned long)__entry->new_value),
+ PVRGpuTraceEnableUfoCallback,
+ PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_check_fail,
+
+ TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr,
+ u32 value, u32 required),
+
+ TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value, required),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __field( u32, ctx_id )
+ __field( u32, job_id )
+ __field( u32, fwaddr )
+ __field( u32, value )
+ __field( u32, required )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->ctx_id = ctx_id;
+ __entry->job_id = job_id;
+ __entry->fwaddr = fwaddr;
+ __entry->value = value;
+ __entry->required = required;
+ ),
+
+ TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx "
+ "value=%#lx required=%#lx",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->job_id,
+ (unsigned long)__entry->fwaddr,
+ (unsigned long)__entry->value,
+ (unsigned long)__entry->required),
+ PVRGpuTraceEnableUfoCallback,
+ PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_pr_check_fail,
+
+ TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr,
+ u32 value, u32 required),
+
+ TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value, required),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __field( u32, ctx_id )
+ __field( u32, job_id )
+ __field( u32, fwaddr )
+ __field( u32, value )
+ __field( u32, required )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->ctx_id = ctx_id;
+ __entry->job_id = job_id;
+ __entry->fwaddr = fwaddr;
+ __entry->value = value;
+ __entry->required = required;
+ ),
+
+ TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx "
+ "value=%#lx required=%#lx",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->job_id,
+ (unsigned long)__entry->fwaddr,
+ (unsigned long)__entry->value,
+ (unsigned long)__entry->required),
+ PVRGpuTraceEnableUfoCallback,
+ PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_check_success,
+
+ TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr, u32 value),
+
+ TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __field( u32, ctx_id )
+ __field( u32, job_id )
+ __field( u32, fwaddr )
+ __field( u32, value )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->ctx_id = ctx_id;
+ __entry->job_id = job_id;
+ __entry->fwaddr = fwaddr;
+ __entry->value = value;
+ ),
+
+ TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx value=%#lx",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->job_id,
+ (unsigned long)__entry->fwaddr,
+ (unsigned long)__entry->value),
+ PVRGpuTraceEnableUfoCallback,
+ PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_pr_check_success,
+
+ TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr, u32 value),
+
+ TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __field( u32, ctx_id )
+ __field( u32, job_id )
+ __field( u32, fwaddr )
+ __field( u32, value )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->ctx_id = ctx_id;
+ __entry->job_id = job_id;
+ __entry->fwaddr = fwaddr;
+ __entry->value = value;
+ ),
+
+ TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx value=%#lx",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->job_id,
+ (unsigned long)__entry->fwaddr,
+ (unsigned long)__entry->value),
+ PVRGpuTraceEnableUfoCallback,
+ PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT(rogue_events_lost,
+
+ TP_PROTO(u32 event_source, u32 last_ordinal, u32 curr_ordinal),
+
+ TP_ARGS(event_source, last_ordinal, curr_ordinal),
+
+ TP_STRUCT__entry(
+ __field( u32, event_source )
+ __field( u32, last_ordinal )
+ __field( u32, curr_ordinal )
+ ),
+
+ TP_fast_assign(
+ __entry->event_source = event_source;
+ __entry->last_ordinal = last_ordinal;
+ __entry->curr_ordinal = curr_ordinal;
+ ),
+
+ TP_printk("event_source=%s last_ordinal=%u curr_ordinal=%u",
+ __print_symbolic(__entry->event_source, {0, "GPU"}, {1, "Host"}),
+ __entry->last_ordinal,
+ __entry->curr_ordinal)
+);
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void);
+void PVRGpuTraceDisableFirmwareActivityCallback(void);
+
+TRACE_EVENT_FN(rogue_firmware_activity,
+
+ TP_PROTO(u64 timestamp, const char *task, u32 fw_event),
+
+ TP_ARGS(timestamp, task, fw_event),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __string( task, task )
+ __field( u32, fw_event )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __assign_str(task, task);
+ __entry->fw_event = fw_event;
+ ),
+
+ TP_printk("ts=%llu.%06lu task=%s event=%s",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ __get_str(task),
+ __print_symbolic(__entry->fw_event,
+ /* These values are from pvr_gputrace.h. */
+ { 1, "begin" },
+ { 2, "end" })),
+
+ PVRGpuTraceEnableFirmwareActivityCallback,
+ PVRGpuTraceDisableFirmwareActivityCallback
+);
+
+#endif /* defined(SUPPORT_GPUTRACE_EVENTS) */
+
+#undef show_secs_from_ns
+#undef show_usecs_from_ns
+
+#endif /* _ROGUE_TRACE_EVENTS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */
+#define TRACE_INCLUDE_FILE rogue_trace_events
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services API Kernel mode Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exported services API details
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+*/ /**************************************************************************/
+
+#ifndef SERVICES_KM_H
+#define SERVICES_KM_H
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+/*! 4k page size definition */
+#define PVRSRV_4K_PAGE_SIZE 4096UL /*!< Size of a 4K Page */
+#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT 12 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+/*! 16k page size definition */
+#define PVRSRV_16K_PAGE_SIZE 16384UL /*!< Size of a 16K Page */
+#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT 14 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+/*! 64k page size definition */
+#define PVRSRV_64K_PAGE_SIZE 65536UL /*!< Size of a 64K Page */
+#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT 16 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+/*! 256k page size definition */
+#define PVRSRV_256K_PAGE_SIZE 262144UL /*!< Size of a 256K Page */
+#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT 18 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+/*! 1MB page size definition */
+#define PVRSRV_1M_PAGE_SIZE 1048576UL /*!< Size of a 1M Page */
+#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT 20 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+/*! 2MB page size definition */
+#define PVRSRV_2M_PAGE_SIZE 2097152UL /*!< Size of a 2M Page */
+#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT 21 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+
+/*!
+ * Forward declaration (look on connection.h)
+ */
+typedef struct _PVRSRV_DEV_CONNECTION_ PVRSRV_DEV_CONNECTION;
+
+/*!
+ Flags for Services connection.
+ Allows to define per-client policy for Services
+*/
+#define SRV_FLAGS_INIT_PROCESS (1U << 1) /*!< Allows connect to succeed if SrvInit
+ * has not yet run (used by SrvInit itself) */
+
+#define SRV_WORKEST_ENABLED (1U << 2) /*!< If Workload Estimation is enabled */
+#define SRV_PDVFS_ENABLED (1U << 3) /*!< If PDVFS is enabled */
+
+#define SRV_NO_HWPERF_CLIENT_STREAM (1U << 4) /*!< Don't create HWPerf for this connection */
+
+/*
+ * Bits 20 - 27 are used to pass information needed for validation
+ * of the GPU Virtualisation Validation mechanism. In particular:
+ *
+ * Bits:
+ * [20 - 22]: OSid of the memory region that will be used for allocations
+ * [23 - 25]: OSid that will be emitted by the Firmware for all memory accesses
+ * regarding that memory context.
+ * [26]: If the AXI Protection register will be set to secure for that OSid
+ * [27]: If the Emulator Wrapper Register checking for protection violation
+ * will be set to secure for that OSid
+ */
+
+#define VIRTVAL_FLAG_OSID_SHIFT (20)
+#define SRV_VIRTVAL_FLAG_OSID_MASK (7U << VIRTVAL_FLAG_OSID_SHIFT)
+
+#define VIRTVAL_FLAG_OSIDREG_SHIFT (23)
+#define SRV_VIRTVAL_FLAG_OSIDREG_MASK (7U << VIRTVAL_FLAG_OSIDREG_SHIFT)
+
+#define VIRTVAL_FLAG_AXIPREG_SHIFT (26)
+#define SRV_VIRTVAL_FLAG_AXIPREG_MASK (1U << VIRTVAL_FLAG_AXIPREG_SHIFT)
+
+#define VIRTVAL_FLAG_AXIPTD_SHIFT (27)
+#define SRV_VIRTVAL_FLAG_AXIPTD_MASK (1U << VIRTVAL_FLAG_AXIPTD_SHIFT)
+
+#define SRV_FLAGS_PDUMPCTRL (1U << 31) /*!< PDump Ctrl client flag */
+
+/*
+ Pdump flags which are accessible to Services clients
+*/
+#define PDUMP_NONE 0x00000000UL /*<! No flags */
+
+#define PDUMP_CONT 0x40000000UL /*<! Output this entry always regardless of framed capture range,
+ used by client applications being dumped. */
+
+/* Status of the device. */
+typedef enum
+{
+ PVRSRV_DEVICE_STATUS_UNKNOWN, /* status of the device is unknown */
+ PVRSRV_DEVICE_STATUS_OK, /* the device is operational */
+ PVRSRV_DEVICE_STATUS_NOT_RESPONDING, /* the device is not responding */
+ PVRSRV_DEVICE_STATUS_DEVICE_ERROR /* the device is not operational */
+} PVRSRV_DEVICE_STATUS;
+
+#endif /* SERVICES_KM_H */
+/**************************************************************************//**
+End of file (services_km.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services definitions required by external drivers
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides services data structures, defines and prototypes
+ required by external drivers
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__SERVICESEXT_H__)
+#define __SERVICESEXT_H__
+
+/* include/ */
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_3dtypes.h"
+#include "pvrsrv_device_types.h"
+
+
+/*
+ * Lock buffer read/write flags
+ */
+#define PVRSRV_LOCKFLG_READONLY (1) /*!< The locking process will only read the locked surface */
+
+/*!
+ *****************************************************************************
+ * Services State
+ *****************************************************************************/
+typedef enum _PVRSRV_SERVICES_STATE_
+{
+ PVRSRV_SERVICES_STATE_OK = 0,
+ PVRSRV_SERVICES_STATE_BAD,
+} PVRSRV_SERVICES_STATE;
+
+
+/*!
+ *****************************************************************************
+ * States for power management
+ *****************************************************************************/
+/*!
+ System Power State Enum
+ */
+typedef enum _PVRSRV_SYS_POWER_STATE_
+{
+ PVRSRV_SYS_POWER_STATE_Unspecified = -1, /*!< Unspecified : Uninitialised */
+ PVRSRV_SYS_POWER_STATE_OFF = 0, /*!< Off */
+ PVRSRV_SYS_POWER_STATE_ON = 1, /*!< On */
+
+ PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff /*!< Force enum to be at least 32-bits wide */
+
+} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE; /*!< Typedef for ptr to PVRSRV_SYS_POWER_STATE */
+
+/*!
+ Device Power State Enum
+ */
+typedef enum _PVRSRV_DEV_POWER_STATE_
+{
+ PVRSRV_DEV_POWER_STATE_DEFAULT = -1, /*!< Default state for the device */
+ PVRSRV_DEV_POWER_STATE_OFF = 0, /*!< Unpowered */
+ PVRSRV_DEV_POWER_STATE_ON = 1, /*!< Running */
+
+ PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff /*!< Force enum to be at least 32-bits wide */
+
+} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE; /*!< Typedef for ptr to PVRSRV_DEV_POWER_STATE */ /* PRQA S 3205 */
+
+
+/* Power transition handler prototypes */
+
+/*!
+ Typedef for a pointer to a Function that will be called before a transition
+ from one power state to another. See also PFN_POST_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+/*!
+ Typedef for a pointer to a Function that will be called after a transition
+ from one power state to another. See also PFN_PRE_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+
+/* Clock speed handler prototypes */
+
+/*!
+ Typedef for a pointer to a Function that will be caled before a transition
+ from one clockspeed to another. See also PFN_POST_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+/*!
+ Typedef for a pointer to a Function that will be caled after a transition
+ from one clockspeed to another. See also PFN_PRE_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+/*!
+ Typedef for a pointer to a function that will be called to transition the device
+ to a forced idle state. Used in unison with (forced) power requests, DVFS and cluster count changes.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_REQUEST) (IMG_HANDLE hDevHandle,
+ IMG_BOOL bDeviceOffPermitted);
+
+/*!
+ Typedef for a pointer to a function that will be called to cancel a forced idle state
+ and return the firmware back to a state where the hardware can be scheduled.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_CANCEL_REQUEST) (IMG_HANDLE hDevHandle);
+
+typedef PVRSRV_ERROR (*PFN_DUST_COUNT_REQUEST) (IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32DustCount);
+
+/*!
+ *****************************************************************************
+ * This structure is used for OS independent registry (profile) access
+ *****************************************************************************/
+
+typedef struct _PVRSRV_REGISTRY_INFO
+{
+ IMG_UINT32 ui32DevCookie;
+ IMG_PCHAR pszKey;
+ IMG_PCHAR pszValue;
+ IMG_PCHAR pszBuf;
+ IMG_UINT32 ui32BufSize;
+} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
+
+#endif /* __SERVICESEXT_H__ */
+/*****************************************************************************
+ End of file (servicesext.h)
+*****************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title PVR Common Bridge Module (kernel side)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements core PVRSRV API, server side
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "ra.h"
+#include "pvr_bridge.h"
+#include "connection_server.h"
+#include "device.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "srvkm.h"
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "srvcore.h"
+#include "rgxinit.h"
+#include "pvrsrv.h"
+#include "power.h"
+#include "lists.h"
+#include "rgxdevice.h"
+
+#include "rgx_options.h"
+#include "pvrversion.h"
+#include "lock.h"
+#include "osfunc.h"
+#include "device_connection.h"
+
+#include "rgxdevice.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#include "services_km.h"
+#endif
+
+#include "pvrsrv_tlstreams.h"
+#include "tlstream.h"
+
+/* For the purpose of maintainability, it is intended that this file should not
+ * contain any OS specific #ifdefs. Please find a way to add e.g.
+ * an osfunc.c abstraction or override the entire function in question within
+ * env,*,pvr_bridge_k.c
+ */
+
+PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT] = { {.pfFunction = DummyBW,} ,};
+
+#define PVR_DISPATCH_OFFSET_FIRST_FUNC 0
+#define PVR_DISPATCH_OFFSET_LAST_FUNC 1
+#define PVR_DISPATCH_OFFSET_ARRAY_MAX 2
+
+#define PVR_BUFFER_POOL_MAX 10
+
+typedef struct
+{
+ IMG_BOOL bTaken;
+ void *pvBuffer;
+} PVR_POOL_BUFFER;
+
+static struct
+{
+ POS_LOCK hLock;
+ IMG_UINT uiCount;
+ PVR_POOL_BUFFER asPool[PVR_BUFFER_POOL_MAX];
+} *g_psBridgePool = NULL;
+
+static IMG_UINT16 g_BridgeDispatchTableStartOffsets[BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT][PVR_DISPATCH_OFFSET_ARRAY_MAX];
+
+#if defined(DEBUG_BRIDGE_KM)
+/* a lock used for protecting bridge call timing calculations
+ * for calls which do not acquire a lock
+ */
+POS_LOCK g_hStatsLock;
+PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+#endif
+
+void BridgeDispatchTableStartOffsetsInit(void)
+{
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCEXPORT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCEXPORT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCSEXPORT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCSEXPORT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST;
+#if defined(SUPPORT_RGX)
+ /* Need a gap here to start next entry at element 128 */
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXINIT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXINIT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXINIT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXINIT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_BREAKPOINT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_BREAKPOINT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEBUGMISC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEBUGMISC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXRAY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXRAY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_REGCONFIG][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_REGCONFIG][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TIMERQUERY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TIMERQUERY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXSIGNALS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXSIGNALS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST;
+#endif
+}
+
+#if defined(DEBUG_BRIDGE_KM)
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void *pvDest,
+ void *pvSrc,
+ IMG_UINT32 ui32Size)
+{
+ g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyFromUserTotalBytes+=ui32Size;
+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
+ return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void *pvDest,
+ void *pvSrc,
+ IMG_UINT32 ui32Size)
+{
+ g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyToUserTotalBytes+=ui32Size;
+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
+ return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#else
+INLINE PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void *pvDest,
+ void *pvSrc,
+ IMG_UINT32 ui32Size)
+{
+ PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+ return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+INLINE PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void *pvDest,
+ void *pvSrc,
+ IMG_UINT32 ui32Size)
+{
+ PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+ return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#endif
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32ClientBuildOptions,
+ IMG_UINT32 ui32ClientDDKVersion,
+ IMG_UINT32 ui32ClientDDKBuild,
+ IMG_UINT8 *pui8KernelArch,
+ IMG_UINT32 *pui32CapabilityFlags,
+ IMG_UINT32 *ui32PVRBridges,
+ IMG_UINT32 *ui32RGXBridges)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch;
+ IMG_UINT32 ui32DDKVersion, ui32DDKBuild;
+ PVRSRV_DATA *psSRVData = NULL;
+ IMG_UINT64 ui64ProcessVASpaceSize = OSGetCurrentProcessVASpaceSize();
+ static IMG_BOOL bIsFirstConnection=IMG_FALSE;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ /* Clear the flags */
+ *pui32CapabilityFlags = 0;
+ psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+ psSRVData = PVRSRVGetPVRSRVData();
+
+ psConnection->ui32ClientFlags = ui32Flags;
+
+ /* output the available bridges */
+ *ui32PVRBridges = gui32PVRBridges;
+ *ui32RGXBridges = gui32RGXBridges;
+
+ /*Set flags to pass back to the client showing which cache coherency is available.*/
+ /*Is the system CPU cache coherent?*/
+ if (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig))
+ {
+ *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_DEVICE_FLAG;
+ }
+ /*Is the system device cache coherent?*/
+ if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig))
+ {
+ *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_CPU_FLAG;
+ }
+ /* Has the system device non-mappable local memory?*/
+ if (PVRSRVSystemHasNonMappableLocalMemory(psDeviceNode->psDevConfig))
+ {
+ *pui32CapabilityFlags |= PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG;
+ }
+
+ /* Set flags to indicate shared-virtual-memory (SVM) allocation availability */
+ if (! psDeviceNode->ui64GeneralSVMHeapSize || ! ui64ProcessVASpaceSize)
+ {
+ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED;
+ }
+ else
+ {
+ if (ui64ProcessVASpaceSize <= psDeviceNode->ui64GeneralSVMHeapSize)
+ {
+ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED;
+ }
+ else
+ {
+ /* This can happen when processor has more virtual address bits
+ than device (i.e. alloc is not always guaranteed to succeed) */
+ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL;
+ }
+ }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;
+ IMG_BOOL bOSidAxiProtReg = IMG_FALSE;
+
+ IMG_PID pIDCurrent = OSGetCurrentClientProcessIDKM();
+
+ ui32OSid = (ui32Flags & SRV_VIRTVAL_FLAG_OSID_MASK) >> (VIRTVAL_FLAG_OSID_SHIFT);
+ ui32OSidReg = (ui32Flags & SRV_VIRTVAL_FLAG_OSIDREG_MASK) >> (VIRTVAL_FLAG_OSIDREG_SHIFT);
+
+#if defined(EMULATOR)
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK)
+ {
+ IMG_UINT32 ui32OSidAxiProtReg = 0, ui32OSidAxiProtTD = 0;
+
+ ui32OSidAxiProtReg = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPREG_MASK) >> (VIRTVAL_FLAG_AXIPREG_SHIFT);
+ ui32OSidAxiProtTD = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPTD_MASK) >> (VIRTVAL_FLAG_AXIPTD_SHIFT);
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "[AxiProt & Virt]: Setting bOSidAxiProt of Emulator's Trusted Device for Catbase %d to %s",
+ ui32OSidReg,
+ (ui32OSidAxiProtTD == 1)?"TRUE":"FALSE"));
+
+ bOSidAxiProtReg = ui32OSidAxiProtReg == 1;
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "[AxiProt & Virt]: Setting bOSidAxiProt of FW's Register for Catbase %d to %s",
+ ui32OSidReg,
+ bOSidAxiProtReg?"TRUE":"FALSE"));
+
+ SetAxiProtOSid(ui32OSidReg, ui32OSidAxiProtTD);
+ }
+
+#endif
+
+ InsertPidOSidsCoupling(pIDCurrent, ui32OSid, ui32OSidReg, bOSidAxiProtReg);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"[GPU Virtualization Validation]: OSIDs: %d, %d\n",ui32OSid, ui32OSidReg));
+}
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Only enable if enabled in the UM */
+ if(ui32Flags & SRV_WORKEST_ENABLED)
+ {
+ psDevInfo->bWorkEstEnabled = IMG_TRUE;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVConnectKM: Workload Estimation disabled. Not enabled in UM."));
+ }
+#endif
+
+#if defined(SUPPORT_PDVFS)
+ /* Only enable if enabled in the UM */
+ if(ui32Flags & SRV_PDVFS_ENABLED)
+ {
+ psDevInfo->bPDVFSEnabled = IMG_TRUE;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVConnectKM: Proactive DVFS disabled. Not enabled in UM."));
+ }
+#endif
+
+ if (ui32Flags & SRV_FLAGS_INIT_PROCESS)
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+#else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Connecting as init process", __func__));
+ if ((OSProcHasPrivSrvInit() == IMG_FALSE) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Rejecting init process", __func__));
+ eError = PVRSRV_ERROR_SRV_CONNECT_FAILED;
+ goto chk_exit;
+ }
+#if defined (__linux__) || defined(INTEGRITY_OS)
+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE);
+#endif
+ }
+ else
+ {
+ /*
+ * This check has to be done here (before the client against kernel check)
+ * while the client options have not yet been modified
+ */
+#if !defined(PVRSRV_GPUVIRT_GUESTDRV)
+ eError = RGXClientConnectCompatCheck_ClientAgainstFW(psDeviceNode, ui32ClientBuildOptions);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed. Mismatch between client and firmware build options.",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+ goto chk_exit;
+ }
+#endif
+
+ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
+ {
+ if (!PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed. Driver unusable.",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_INIT_FAILURE;
+ goto chk_exit;
+ }
+ }
+ else
+ {
+ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_RETRY;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_RETRY;
+ goto chk_exit;
+ }
+ }
+ }
+#endif /* defined(SUPPORT_KERNEL_SRVINIT) */
+
+ ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+ ui32DDKBuild = PVRVERSION_BUILD;
+
+ if(IMG_FALSE == bIsFirstConnection)
+ {
+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions = ui32ClientBuildOptions;
+
+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildVersion = ui32DDKVersion;
+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildVersion = ui32ClientDDKVersion;
+
+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision = ui32DDKBuild;
+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision = ui32ClientDDKBuild;
+
+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType = ((RGX_BUILD_OPTIONS_KM) & OPTIONS_DEBUG_MASK)? \
+ BUILD_TYPE_DEBUG:BUILD_TYPE_RELEASE;
+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType = (ui32ClientBuildOptions & OPTIONS_DEBUG_MASK)? \
+ BUILD_TYPE_DEBUG:BUILD_TYPE_RELEASE;
+ }
+
+ /* Masking out every option that is not kernel specific*/
+ ui32ClientBuildOptions &= RGX_BUILD_OPTIONS_MASK_KM;
+
+ /*
+ * Validate the build options
+ */
+ ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+ if (ui32BuildOptions != ui32ClientBuildOptions)
+ {
+ ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32ClientBuildOptions;
+#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
+ /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
+ ui32BuildOptionsMismatch &= ~OPTIONS_DEBUG_MASK;
+#endif
+ if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+ "extra options present in client-side driver: (0x%x). Please check rgx_options.h",
+ __FUNCTION__,
+ ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+ eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+ goto chk_exit;
+ }
+
+ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+ "extra options present in KM driver: (0x%x). Please check rgx_options.h",
+ __FUNCTION__,
+ ui32BuildOptions & ui32BuildOptionsMismatch ));
+ eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+ goto chk_exit;
+ }
+ if(IMG_FALSE == bIsFirstConnection)
+ {
+ PVR_LOG(("%s: COMPAT_TEST: Client-side (0x%04x) (%s) and KM driver (0x%04x) (%s) build options differ.",
+ __FUNCTION__,
+ ui32ClientBuildOptions,
+ (psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug",
+ ui32BuildOptions,
+ (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug"));
+ }else{
+ PVR_DPF((PVR_DBG_WARNING, "%s: COMPAT_TEST: Client-side (0x%04x) and KM driver (0x%04x) build options differ.",
+ __FUNCTION__,
+ ui32ClientBuildOptions,
+ ui32BuildOptions));
+
+ }
+ if(!psSRVData->sDriverInfo.bIsNoMatch)
+ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: Client-side and KM driver build options match. [ OK ]", __FUNCTION__));
+ }
+
+ /*
+ * Validate DDK version
+ */
+ if (ui32ClientDDKVersion != ui32DDKVersion)
+ {
+ if(!psSRVData->sDriverInfo.bIsNoMatch)
+ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+ PVR_LOG(("(FAIL) %s: Incompatible driver DDK version (%u.%u) / client DDK version (%u.%u).",
+ __FUNCTION__,
+ PVRVERSION_MAJ, PVRVERSION_MIN,
+ PVRVERSION_UNPACK_MAJ(ui32ClientDDKVersion),
+ PVRVERSION_UNPACK_MIN(ui32ClientDDKVersion)));
+ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+ PVR_DBG_BREAK;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK version (%u.%u) and client DDK version (%u.%u) match. [ OK ]",
+ __FUNCTION__,
+ PVRVERSION_MAJ, PVRVERSION_MIN, PVRVERSION_MAJ, PVRVERSION_MIN));
+ }
+
+ /* Create stream for every connection except for the special clients
+ * that doesn't need it e.g.: recipients of HWPerf data. */
+ if (!(psConnection->ui32ClientFlags & SRV_NO_HWPERF_CLIENT_STREAM))
+ {
+ IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
+ OSSNPrintf(acStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE,
+ PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC,
+ psConnection->pid);
+ eError = TLStreamCreate(&psConnection->hClientTLStream, acStreamName,
+ 131072, TL_FLAG_ALLOCATE_ON_FIRST_OPEN, NULL,
+ NULL, NULL, NULL);
+ if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_ALREADY_EXISTS)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Could not create private TL stream (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ psConnection->hClientTLStream = NULL;
+ }
+ /* Reset error status. We don't want to propagate any errors from here. */
+ eError = PVRSRV_OK;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Created stream \"%s\".", acStreamName));
+ }
+
+ /*
+ * Validate DDK build
+ */
+ if (ui32ClientDDKBuild != ui32DDKBuild)
+ {
+ if(!psSRVData->sDriverInfo.bIsNoMatch)
+ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+ PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch in driver DDK revision (%d) / client DDK revision (%d).",
+ __FUNCTION__, ui32DDKBuild, ui32ClientDDKBuild));
+#if defined(PVRSRV_STRICT_COMPAT_CHECK)
+ eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+ PVR_DBG_BREAK;
+ goto chk_exit;
+#endif
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK revision (%d) and client DDK revision (%d) match. [ OK ]",
+ __FUNCTION__, ui32DDKBuild, ui32ClientDDKBuild));
+ }
+
+ /* Success so far so is it the PDump client that is connecting? */
+ if (ui32Flags & SRV_FLAGS_PDUMPCTRL)
+ {
+ PDumpConnectionNotify();
+ }
+
+ PVR_ASSERT(pui8KernelArch != NULL);
+ /* Can't use __SIZEOF_POINTER__ here as it is not defined on Windows */
+ if (sizeof(void *) == 8)
+ {
+ *pui8KernelArch = 64;
+ }
+ else
+ {
+ *pui8KernelArch = 32;
+ }
+
+ bIsFirstConnection = IMG_TRUE;
+
+#if defined(DEBUG_BRIDGE_KM)
+ {
+ int ii;
+
+ /* dump dispatch table offset lookup table */
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: g_BridgeDispatchTableStartOffsets[0-%lu] entries:", __FUNCTION__, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT - 1));
+ for (ii=0; ii < BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT; ii++)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "g_BridgeDispatchTableStartOffsets[%d]: %u", ii, g_BridgeDispatchTableStartOffsets[ii][PVR_DISPATCH_OFFSET_FIRST_FUNC]));
+ }
+ }
+#endif
+
+chk_exit:
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(void)
+{
+ /* just return OK, per-process data is cleaned up by resmgr */
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVAcquireGlobalEventObjectKM
+@Description Acquire the global event object.
+@Output phGlobalEventObject On success, points to the global event
+ object handle
+@Return PVRSRV_ERROR PVRSRV_OK on success or an error
+ otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ *phGlobalEventObject = psPVRSRVData->hGlobalEventObject;
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVReleaseGlobalEventObjectKM
+@Description Release the global event object.
+@Output hGlobalEventObject Global event object handle
+@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ PVR_ASSERT(psPVRSRVData->hGlobalEventObject == hGlobalEventObject);
+
+ return PVRSRV_OK;
+}
+
+/*
+ PVRSRVDumpDebugInfoKM
+*/
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32VerbLevel)
+{
+ if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ PVR_LOG(("User requested PVR debug info"));
+
+ PVRSRVDebugRequest(psDeviceNode, ui32VerbLevel, NULL, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ PVRSRVGetDevClockSpeedKM
+*/
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_PUINT32 pui32RGXClockSpeed)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVR_ASSERT(psDeviceNode->pfnDeviceClockSpeed != NULL);
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ eError = psDeviceNode->pfnDeviceClockSpeed(psDeviceNode, pui32RGXClockSpeed);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDevClockSpeedKM: "
+ "Could not get device clock speed (%d)!",
+ eError));
+ }
+
+ return eError;
+}
+
+
+/*
+ PVRSRVHWOpTimeoutKM
+*/
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
+ PVR_LOG(("User requested OS reset"));
+ OSPanic();
+#endif
+ PVR_LOG(("HW operation timeout, dump server info"));
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MEDIUM, NULL, NULL);
+ return PVRSRV_OK;
+}
+
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+ void *psBridgeIn,
+ void *psBridgeOut,
+ CONNECTION_DATA *psConnection)
+{
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if defined(DEBUG_BRIDGE_KM)
+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u (%s) mapped to "
+ "Dummy Wrapper (probably not what you want!)",
+ __FUNCTION__, ui32DispatchTableEntry, g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName));
+#else
+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u mapped to "
+ "Dummy Wrapper (probably not what you want!)",
+ __FUNCTION__, ui32DispatchTableEntry));
+#endif
+ return PVRSRV_ERROR_BRIDGE_ENOTTY;
+}
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32AlignChecksSize,
+ IMG_UINT32 aui32AlignChecks[])
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(NO_HARDWARE) && defined(RGXFW_ALIGNCHECKS)
+
+ PVR_ASSERT(psDeviceNode->pfnAlignmentCheck != NULL);
+ return psDeviceNode->pfnAlignmentCheck(psDeviceNode, ui32AlignChecksSize,
+ aui32AlignChecks);
+
+#else
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(ui32AlignChecksSize);
+ PVR_UNREFERENCED_PARAMETER(aui32AlignChecks);
+
+ return PVRSRV_OK;
+
+#endif /* !defined(NO_HARDWARE) */
+
+}
+#endif /* defined(SUPPORT_KERNEL_SRVINIT) */
+
+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *pui32DeviceStatus)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* First try to update the status. */
+ if (psDeviceNode->pfnUpdateHealthStatus != NULL)
+ {
+ PVRSRV_ERROR eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode,
+ IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDeviceStatusKM: Failed to"
+ "check for device status (%d)", eError));
+
+ /* Return unknown status and error because we don't know what
+ * happened and if the status is valid. */
+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN;
+ return eError;
+ }
+ }
+
+ switch (OSAtomicRead(&psDeviceNode->eHealthStatus))
+ {
+ case PVRSRV_DEVICE_HEALTH_STATUS_OK:
+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_OK;
+ return PVRSRV_OK;
+ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:
+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_NOT_RESPONDING;
+ return PVRSRV_OK;
+ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:
+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_DEVICE_ERROR;
+ return PVRSRV_OK;
+ default:
+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN;
+ return PVRSRV_ERROR_INTERNAL_ERROR;
+ }
+}
+
+/*!
+ * *****************************************************************************
+ * @brief A wrapper for filling in the g_BridgeDispatchTable array that does
+ * error checking.
+ *
+ * @param ui32Index
+ * @param pszIOCName
+ * @param pfFunction
+ * @param pszFunctionName
+ *
+ * @return
+ ********************************************************************************/
+void
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+ IMG_UINT32 ui32Index,
+ const IMG_CHAR *pszIOCName,
+ BridgeWrapperFunction pfFunction,
+ const IMG_CHAR *pszFunctionName,
+ POS_LOCK hBridgeLock,
+ const IMG_CHAR *pszBridgeLockName,
+ IMG_BOOL bUseLock)
+{
+ static IMG_UINT32 ui32PrevIndex = IMG_UINT32_MAX; /* -1 */
+
+#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
+ PVR_UNREFERENCED_PARAMETER(pszFunctionName);
+ PVR_UNREFERENCED_PARAMETER(pszBridgeLockName);
+#endif
+
+ ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+ /* Enable this to dump out the dispatch table entries */
+ PVR_DPF((PVR_DBG_WARNING, "%s: g_BridgeDispatchTableStartOffsets[%d]=%d", __FUNCTION__, ui32BridgeGroup, g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]));
+ PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName, pszBridgeLockName));
+#endif
+
+ /* Any gaps are sub-optimal in-terms of memory usage, but we are mainly
+ * interested in spotting any large gap of wasted memory that could be
+ * accidentally introduced.
+ *
+ * This will currently flag up any gaps > 5 entries.
+ *
+ * NOTE: This shouldn't be debug only since switching from debug->release
+ * etc is likely to modify the available ioctls and thus be a point where
+ * mistakes are exposed. This isn't run at a performance critical time.
+ */
+ if((ui32PrevIndex != IMG_UINT32_MAX) &&
+ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
+ (ui32Index <= ui32PrevIndex)))
+ {
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)",
+ __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
+ ui32Index, pszIOCName));
+#else
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
+ __FUNCTION__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
+#endif
+ }
+
+ if (ui32Index >= BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Index %u (%s) out of range",
+ __FUNCTION__, (IMG_UINT)ui32Index, pszIOCName));
+
+#if defined(DEBUG_BRIDGE_KM)
+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT = %lu",
+ __FUNCTION__, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXINIT_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXINIT_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST = %lu\n",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST));
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGX_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGX_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST));
+#endif
+
+ OSPanic();
+ }
+
+ /* Panic if the previous entry has been overwritten as this is not allowed!
+ * NOTE: This shouldn't be debug only since switching from debug->release
+ * etc is likely to modify the available ioctls and thus be a point where
+ * mistakes are exposed. This isn't run at a performance critical time.
+ */
+ if(g_BridgeDispatchTable[ui32Index].pfFunction)
+ {
+ if(g_BridgeDispatchTable[ui32Index].pfFunction != pfFunction)
+ {
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Adding dispatch table entry for %s clobbers an existing entry for %s (current pfn=<%p>, new pfn=<%p>)",
+ __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName),
+ (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction));
+#else
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Adding dispatch table entry for %s clobbers an existing entry (index=%u). (current pfn=<%p>, new pfn=<%p>)",
+ __FUNCTION__, pszIOCName, ui32Index,
+ (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction));
+ PVR_DPF((PVR_DBG_WARNING, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
+#endif
+ OSPanic();
+ }
+ }
+ else
+ {
+ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
+ g_BridgeDispatchTable[ui32Index].hBridgeLock = hBridgeLock;
+ g_BridgeDispatchTable[ui32Index].bUseLock = bUseLock;
+#if defined(DEBUG_BRIDGE_KM)
+ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
+ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
+ g_BridgeDispatchTable[ui32Index].pszBridgeLockName = pszBridgeLockName;
+ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
+ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
+ g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0;
+ g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0;
+#endif
+ }
+
+ ui32PrevIndex = ui32Index;
+}
+
+PVRSRV_ERROR
+PVRSRVInitSrvDisconnectKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_BOOL bInitSuccesful,
+ IMG_UINT32 ui32ClientBuildOptions)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+ if (psConnection)
+ {
+ /* Assume this is being called by a user space process */
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+#else
+ if (!(psConnection->ui32ClientFlags & SRV_FLAGS_INIT_PROCESS))
+ {
+ return PVRSRV_ERROR_SRV_DISCONNECT_FAILED;
+ }
+
+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE);
+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE);
+#endif
+
+ eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccesful);
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL,
+ (eError == PVRSRV_OK) && bInitSuccesful);
+#endif
+
+ return eError;
+}
+
+static PVRSRV_ERROR _BridgeBufferPoolCreate(void)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_DPF((PVR_DBG_VERBOSE, "BridgePoolCreate: Creating bridge buffer pool."));
+
+ g_psBridgePool = OSAllocZMemNoStats(sizeof(*g_psBridgePool));
+ if (g_psBridgePool == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BridgePoolCreate: Failed to allocate memory "
+ "for the bridge buffer pool."));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ eError = OSLockCreate(&g_psBridgePool->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BridgePoolCreate: Failed to create lock "
+ "for the bridge buffer pool."));
+ OSFreeMemNoStats(g_psBridgePool);
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+static void _BridgeBufferPoolDestroy(void)
+{
+ IMG_UINT i;
+
+ PVR_DPF((PVR_DBG_VERBOSE, "Destroying bridge buffer pool."));
+
+ for (i = 0; i < g_psBridgePool->uiCount; i++)
+ OSFreeMem(g_psBridgePool->asPool[i].pvBuffer);
+
+ OSLockDestroy(g_psBridgePool->hLock);
+ OSFreeMemNoStats(g_psBridgePool);
+}
+
+PVRSRV_ERROR BridgeInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ eError = _BridgeBufferPoolCreate();
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to create bridge buffer pool"));
+ return eError;
+ }
+
+#if defined(DEBUG_BRIDGE_KM)
+ eError = OSLockCreate(&g_hStatsLock, LOCK_TYPE_PASSIVE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to create bridge stats lock"));
+ return eError;
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+void BridgeDeinit(void)
+{
+#if defined(DEBUG_BRIDGE_KM)
+ if(g_hStatsLock)
+ {
+ OSLockDestroy(g_hStatsLock);
+ g_hStatsLock = NULL;
+ }
+#endif
+
+ _BridgeBufferPoolDestroy();
+}
+
+static PVR_POOL_BUFFER *_BridgePoolAcquireBuffer(void **ppvBridgeIn,
+ void **ppvBridgeOut)
+{
+ PVR_POOL_BUFFER *psPoolBuffer = NULL;
+ IMG_UINT i;
+
+ PVR_ASSERT(g_psBridgePool != NULL);
+ PVR_ASSERT(ppvBridgeIn != NULL && ppvBridgeOut != NULL);
+
+ OSLockAcquire(g_psBridgePool->hLock);
+
+ for (i = 0; i < PVR_BUFFER_POOL_MAX; i++)
+ {
+ PVR_POOL_BUFFER *psBuffer = &g_psBridgePool->asPool[i];
+
+ if (psBuffer->pvBuffer != NULL)
+ {
+ if (psBuffer->bTaken)
+ continue;
+
+ PVR_DPF((PVR_DBG_VERBOSE, "_BridgePoolAcquireBuffer: "
+ "Reusing buffer %p.", psBuffer->pvBuffer));
+
+ psBuffer->bTaken = IMG_TRUE;
+ *ppvBridgeIn = psBuffer->pvBuffer;
+ *ppvBridgeOut = ((IMG_BYTE *) psBuffer->pvBuffer) +
+ PVRSRV_MAX_BRIDGE_IN_SIZE;
+
+ psPoolBuffer = psBuffer;
+ goto return_;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_VERBOSE, "_BridgePoolAcquireBuffer: "
+ "Allocating new bridge buffer."));
+
+ psBuffer->pvBuffer = OSAllocZMemNoStats(PVRSRV_MAX_BRIDGE_IN_SIZE +
+ PVRSRV_MAX_BRIDGE_OUT_SIZE);
+ if (psBuffer->pvBuffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_BridgePoolAcquireBuffer: "
+ "Out of memory! Could not allocate new buffer."));
+ goto return_;
+ }
+
+ *ppvBridgeIn = psBuffer->pvBuffer;
+ *ppvBridgeOut = ((IMG_BYTE *) psBuffer->pvBuffer) +
+ PVRSRV_MAX_BRIDGE_IN_SIZE;
+ g_psBridgePool->uiCount++;
+
+ psPoolBuffer = psBuffer;
+ goto return_;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "_BridgePoolAcquireBuffer: "
+ "Not enough buffers in the pool."));
+
+return_:
+ OSLockRelease(g_psBridgePool->hLock);
+
+ return psPoolBuffer;
+}
+
+static void _BridgePoolReleaseBuffers(PVR_POOL_BUFFER *psBuffer)
+{
+ PVR_ASSERT(g_psBridgePool != NULL);
+
+ if (psBuffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Called release on NULL buffer",
+ __FUNCTION__));
+ return;
+ }
+
+ OSLockAcquire(g_psBridgePool->hLock);
+
+ PVR_DPF((PVR_DBG_VERBOSE, "_BridgePoolReleaseBuffers: "
+ "Releasing buffer %p.", psBuffer->pvBuffer));
+ psBuffer->bTaken = IMG_FALSE;
+
+ OSLockRelease(g_psBridgePool->hLock);
+}
+
+PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection,
+ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM)
+{
+
+ void * psBridgeIn=NULL;
+ void * psBridgeOut=NULL;
+ BridgeWrapperFunction pfBridgeHandler;
+ IMG_UINT32 ui32DispatchTableEntry, ui32GroupBoundary;
+ PVRSRV_ERROR err = PVRSRV_OK;
+ PVR_POOL_BUFFER *psPoolBuffer = NULL;
+ IMG_UINT32 ui32Timestamp = OSClockus();
+#if defined(DEBUG_BRIDGE_KM)
+ IMG_UINT64 ui64TimeStart;
+ IMG_UINT64 ui64TimeEnd;
+ IMG_UINT64 ui64TimeDiff;
+#endif
+
+#if defined(DEBUG_BRIDGE_KM_STOP_AT_DISPATCH)
+ PVR_DBG_BREAK;
+#endif
+
+ if(BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT <= psBridgePackageKM->ui32BridgeID)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Out of range dispatch table group ID: %d",
+ __FUNCTION__, psBridgePackageKM->ui32BridgeID));
+ err = PVRSRV_ERROR_BRIDGE_EINVAL;
+ goto return_error;
+ }
+ ui32DispatchTableEntry = g_BridgeDispatchTableStartOffsets[psBridgePackageKM->ui32BridgeID][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+ ui32GroupBoundary = g_BridgeDispatchTableStartOffsets[psBridgePackageKM->ui32BridgeID][PVR_DISPATCH_OFFSET_LAST_FUNC];
+
+ /* bridge function is not implemented in this build */
+ if(0 == ui32DispatchTableEntry)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)",
+ __FUNCTION__,
+ ui32DispatchTableEntry,ui32GroupBoundary, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+ /* this points to DummyBW() which returns PVRSRV_ERROR_ENOTTY */
+ err = g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction(ui32DispatchTableEntry,
+ psBridgeIn,
+ psBridgeOut,
+ psConnection);
+ goto return_error;
+ }
+ else
+ {
+ ui32DispatchTableEntry += psBridgePackageKM->ui32FunctionID;
+ }
+ if(ui32DispatchTableEntry > ui32GroupBoundary)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)",
+ __FUNCTION__,
+ ui32DispatchTableEntry,ui32GroupBoundary, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+ err = PVRSRV_ERROR_BRIDGE_EINVAL;
+ goto return_error;
+ }
+ if(BRIDGE_DISPATCH_TABLE_ENTRY_COUNT <= ui32DispatchTableEntry)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, entry count = %lu,"
+ " (bridge module %d, function %d)", __FUNCTION__,
+ ui32DispatchTableEntry, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT,
+ psBridgePackageKM->ui32BridgeID,
+ psBridgePackageKM->ui32FunctionID));
+ err = PVRSRV_ERROR_BRIDGE_EINVAL;
+ goto return_error;
+ }
+#if defined(DEBUG_BRIDGE_KM)
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry=%d, (bridge module %d, function %d)",
+ __FUNCTION__,
+ ui32DispatchTableEntry, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: %s",
+ __FUNCTION__,
+ g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName));
+ g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CallCount++;
+ g_BridgeGlobalStats.ui32IOCTLCount++;
+#endif
+
+ if (g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock == NULL &&
+ g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+ {
+ /* Acquire default global bridge lock if calling module has no independent lock */
+ OSAcquireBridgeLock();
+
+ /* Request for global bridge buffers */
+ OSGetGlobalBridgeBuffers(&psBridgeIn,
+ &psBridgeOut);
+ }
+ else
+ {
+ if (g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock != NULL &&
+ g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+ {
+ OSLockAcquire(g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock);
+ }
+
+ psPoolBuffer = _BridgePoolAcquireBuffer(&psBridgeIn,
+ &psBridgeOut);
+ if (psPoolBuffer == NULL)
+ {
+ err = PVRSRV_ERROR_BRIDGE_ENOMEM;
+ goto unlock_and_return_error;
+ }
+ }
+
+#if defined(DEBUG_BRIDGE_KM)
+ ui64TimeStart = OSClockns64();
+#endif
+
+ if (psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bridge input buffer too small "
+ "(data size %u, buffer size %u)!", __FUNCTION__,
+ psBridgePackageKM->ui32InBufferSize, PVRSRV_MAX_BRIDGE_IN_SIZE));
+ err = PVRSRV_ERROR_BRIDGE_ERANGE;
+ goto unlock_and_return_error;
+ }
+
+#if !defined(INTEGRITY_OS)
+ if (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bridge output buffer too small "
+ "(data size %u, buffer size %u)!", __FUNCTION__,
+ psBridgePackageKM->ui32OutBufferSize, PVRSRV_MAX_BRIDGE_OUT_SIZE));
+ err = PVRSRV_ERROR_BRIDGE_ERANGE;
+ goto unlock_and_return_error;
+ }
+
+ if((CopyFromUserWrapper (psConnection,
+ ui32DispatchTableEntry,
+ psBridgeIn,
+ psBridgePackageKM->pvParamIn,
+ psBridgePackageKM->ui32InBufferSize) != PVRSRV_OK)
+#if defined __QNXNTO__
+/* For Neutrino, the output bridge buffer acts as an input as well */
+ || (CopyFromUserWrapper(psConnection,
+ ui32DispatchTableEntry,
+ psBridgeOut,
+ (void *)((IMG_UINT32)psBridgePackageKM->pvParamIn + psBridgePackageKM->ui32InBufferSize),
+ psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+#endif
+ ) /* end of if-condition */
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: CopyFromUserWrapper returned an error!", __FUNCTION__));
+ err = PVRSRV_ERROR_BRIDGE_EFAULT;
+ goto unlock_and_return_error;
+ }
+#else
+ psBridgeIn = psBridgePackageKM->pvParamIn;
+ psBridgeOut = psBridgePackageKM->pvParamOut;
+#endif
+
+ pfBridgeHandler =
+ (BridgeWrapperFunction)g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction;
+
+ if (pfBridgeHandler == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: ui32DispatchTableEntry = %d is not a registered function!",
+ __FUNCTION__, ui32DispatchTableEntry));
+ err = PVRSRV_ERROR_BRIDGE_EFAULT;
+ goto unlock_and_return_error;
+ }
+
+ /* pfBridgeHandler functions do not fail and return an IMG_INT.
+ * The value returned is either 0 or PVRSRV_OK (0).
+ * In the event this changes an error may be +ve or -ve,
+ * so try to return something consistent here.
+ */
+ if (0 != pfBridgeHandler(ui32DispatchTableEntry,
+ psBridgeIn,
+ psBridgeOut,
+ psConnection)
+ )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: pfBridgeHandler returned an error", __FUNCTION__));
+ err = PVRSRV_ERROR_BRIDGE_EPERM;
+ goto unlock_and_return_error;
+ }
+
+ /*
+ This should always be true as a.t.m. all bridge calls have to
+ return an error message, but this could change so we do this
+ check to be safe.
+ */
+ if (psBridgePackageKM->ui32OutBufferSize > 0)
+ {
+#if !defined(INTEGRITY_OS)
+ if (CopyToUserWrapper (psConnection,
+ ui32DispatchTableEntry,
+ psBridgePackageKM->pvParamOut,
+ psBridgeOut,
+ psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+ {
+ err = PVRSRV_ERROR_BRIDGE_EFAULT;
+ goto unlock_and_return_error;
+ }
+#endif
+ }
+
+#if defined(DEBUG_BRIDGE_KM)
+ ui64TimeEnd = OSClockns64();
+
+ ui64TimeDiff = ui64TimeEnd - ui64TimeStart;
+
+ /* if there is no lock held then acquire the stats lock to
+ * ensure the calculations are done safely
+ */
+ if(!g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+ {
+ OSLockAcquire(g_hStatsLock);
+ }
+
+ g_BridgeDispatchTable[ui32DispatchTableEntry].ui64TotalTimeNS += ui64TimeDiff;
+
+ if(ui64TimeDiff > g_BridgeDispatchTable[ui32DispatchTableEntry].ui64MaxTimeNS)
+ {
+ g_BridgeDispatchTable[ui32DispatchTableEntry].ui64MaxTimeNS = ui64TimeDiff;
+ }
+
+ if(!g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+ {
+ OSLockRelease(g_hStatsLock);
+ }
+#endif
+
+unlock_and_return_error:
+ if (g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock == NULL &&
+ g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+ {
+ OSReleaseBridgeLock();
+ }
+ else
+ {
+ if (g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock != NULL &&
+ g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+ {
+ OSLockRelease(g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock);
+ }
+
+ _BridgePoolReleaseBuffers(psPoolBuffer);
+ }
+
+return_error:
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: returning (err = %d)", __FUNCTION__, err));
+ }
+ /* ignore transport layer bridge to avoid HTB flooding */
+ if (psBridgePackageKM->ui32BridgeID != PVRSRV_BRIDGE_PVRTL)
+ {
+ if (err)
+ {
+ HTBLOGK(HTB_SF_BRG_BRIDGE_CALL_ERR, ui32Timestamp,
+ psBridgePackageKM->ui32BridgeID,
+ psBridgePackageKM->ui32FunctionID, err);
+ }
+ else
+ {
+ HTBLOGK(HTB_SF_BRG_BRIDGE_CALL, ui32Timestamp,
+ psBridgePackageKM->ui32BridgeID,
+ psBridgePackageKM->ui32FunctionID);
+ }
+ }
+ return err;
+}
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title PVR Bridge Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the PVR Bridge code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __BRIDGED_PVR_BRIDGE_H__
+#define __BRIDGED_PVR_BRIDGE_H__
+
+#include "lock_types.h"
+#include "connection_server.h"
+#include "pvr_debug.h"
+
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void *pvDest,
+ void *pvSrc,
+ IMG_UINT32 ui32Size);
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void *pvDest,
+ void *pvSrc,
+ IMG_UINT32 ui32Size);
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+ void *psBridgeIn,
+ void *psBridgeOut,
+ CONNECTION_DATA *psConnection);
+
+typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32DispatchTableEntry,
+ void *psBridgeIn,
+ void *psBridgeOut,
+ CONNECTION_DATA *psConnection);
+
+typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
+{
+ BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl
+ arguments before calling into srvkm proper */
+ POS_LOCK hBridgeLock; /*!< The bridge lock which needs to be acquired
+ before calling the above wrapper */
+ IMG_BOOL bUseLock; /*!< Specify whether to use a bridge lock at all */
+#if defined(DEBUG_BRIDGE_KM)
+ const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */
+ const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */
+ const IMG_CHAR *pszBridgeLockName; /*!< Name of bridge lock which will be acquired */
+ IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */
+ IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from
+ userspace within this ioctl */
+ IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from
+ userspace within this ioctl */
+ IMG_UINT64 ui64TotalTimeNS; /*!< The total amount of time spent in this bridge function */
+ IMG_UINT64 ui64MaxTimeNS; /*!< The maximum amount of time for a single call to this bridge function */
+#endif
+}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
+
+#if defined(SUPPORT_RGX)
+ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_DISPATCH_LAST+1)
+ #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_LAST+1)
+#else
+ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_DISPATCH_LAST+1)
+ #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_LAST+1)
+#endif
+
+extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
+
+void BridgeDispatchTableStartOffsetsInit(void);
+
+void
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+ IMG_UINT32 ui32Index,
+ const IMG_CHAR *pszIOCName,
+ BridgeWrapperFunction pfFunction,
+ const IMG_CHAR *pszFunctionName,
+ POS_LOCK hBridgeLock,
+ const IMG_CHAR* pszBridgeLockName,
+ IMG_BOOL bUseLock );
+
+
+/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */
+#define SetDispatchTableEntry(ui32BridgeGroup, ui32Index, pfFunction,\
+ hBridgeLock, bUseLock) \
+ _SetDispatchTableEntry(ui32BridgeGroup, ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\
+ (POS_LOCK)hBridgeLock, #hBridgeLock, bUseLock )
+
+#define DISPATCH_TABLE_GAP_THRESHOLD 5
+
+
+#if defined(DEBUG_BRIDGE_KM)
+typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
+{
+ IMG_UINT32 ui32IOCTLCount;
+ IMG_UINT32 ui32TotalCopyFromUserBytes;
+ IMG_UINT32 ui32TotalCopyToUserBytes;
+} PVRSRV_BRIDGE_GLOBAL_STATS;
+
+/* OS specific code may want to report the stats held here and within the
+ * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a
+ * debugfs entry /sys/kernel/debug/pvr/bridge_stats) */
+extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+#endif
+
+PVRSRV_ERROR BridgeInit(void);
+void BridgeDeinit(void);
+
+PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection,
+ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM);
+
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32ClientBuildOptions,
+ IMG_UINT32 ui32ClientDDKVersion,
+ IMG_UINT32 ui32ClientDDKBuild,
+ IMG_UINT8 *pui8KernelArch,
+ IMG_UINT32 *ui32CapabilityFlags,
+ IMG_UINT32 *ui32PVRBridges,
+ IMG_UINT32 *ui32RGXBridges);
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(void);
+
+PVRSRV_ERROR
+PVRSRVInitSrvDisconnectKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bInitSuccesful,
+ IMG_UINT32 ui32ClientBuildOptions);
+
+PVRSRV_ERROR
+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject);
+
+PVRSRV_ERROR
+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject);
+
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32VerbLevel);
+
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_PUINT32 pui32RGXClockSpeed);
+
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32FWAlignChecksSize,
+ IMG_UINT32 aui32FWAlignChecks[]);
+
+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *pui32DeviceStatus);
+
+#endif /* __BRIDGED_PVR_BRIDGE_H__ */
+
+/******************************************************************************
+ End of file (srvcore.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Initialisation server internal header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the connections between the various parts of the
+ initialisation server.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SRVINIT_H__
+#define __SRVINIT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "device_connection.h"
+
+PVRSRV_ERROR SrvInit(void);
+
+#if defined(SUPPORT_RGX)
+IMG_INTERNAL PVRSRV_ERROR RGXInit(SHARED_DEV_CONNECTION hServices);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+#endif /* __SRVINIT_H__ */
+
+/******************************************************************************
+ End of file (srvinit.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Initialisation server os-dependent functionality definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Glue header for os-dependent API calls
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SRVINIT_OSFUNC_H__
+#define __SRVINIT_OSFUNC_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#include <linux/string.h> //strlen, strcopy
+#include <linux/ctype.h> //toupper
+#else
+#include <stddef.h>
+#include <ctype.h> //toupper
+#include <string.h> //strlen, strcopy
+#include <stdio.h>
+#endif
+
+#if (defined(__linux__) && defined(__KERNEL__)) || (defined(INTEGRITY_OS) && defined(SUPPORT_KERNEL_SRVINIT)) || defined(__QNXNTO__)
+#include "osfunc.h"
+static inline void SRVINITDeviceMemCopy(void *pvDst, const void *pvSrc, size_t uiSize)
+{
+ OSDeviceMemCopy(pvDst, pvSrc, uiSize);
+}
+
+static inline void SRVINITDeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t uiSize)
+{
+ OSDeviceMemSet(pvDest, ui8Value, uiSize);
+}
+#else
+#include "services.h"
+static inline void SRVINITDeviceMemCopy(void *pvDst, const void *pvSrc, size_t uiSize)
+{
+ PVRSRVDeviceMemCopy(pvDst, pvSrc, uiSize);
+}
+
+static inline void SRVINITDeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t uiSize)
+{
+ PVRSRVDeviceMemSet(pvDest, ui8Value, uiSize);
+}
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __SRVINIT_OSFUNC_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services initialisation PDump routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(PDUMP)
+#include "pdumpdefs.h"
+#include "client_pdump_bridge.h"
+#include "srvinit_pdump.h"
+
+IMG_INTERNAL void
+SRVINITPDumpComment(SHARED_DEV_CONNECTION hServices, const IMG_CHAR *pszFormat, ...)
+{
+ IMG_CHAR szScript[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
+ va_list argList;
+
+ va_start(argList, pszFormat);
+ vsnprintf(szScript, sizeof(szScript), pszFormat, argList);
+ va_end(argList);
+
+ (void) BridgePVRSRVPDumpComment(hServices,
+ szScript,
+ PDUMP_FLAGS_CONTINUOUS);
+}
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Initialisation server PDump related definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SRVINIT_PDUMP_H__
+#define __SRVINIT_PDUMP_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "device_connection.h"
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#include "pdump_km.h"
+#else
+#include <stdio.h>
+#include <stdarg.h>
+#include "pdump_um.h"
+#endif
+
+#if defined(PDUMP)
+
+__printf(2, 3)
+IMG_INTERNAL void SRVINITPDumpComment(SHARED_DEV_CONNECTION hServices, const IMG_CHAR *pszFormat, ...);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SRVINITPDumpComment)
+#endif
+static INLINE void
+SRVINITPDumpComment(SHARED_DEV_CONNECTION hServices, const IMG_CHAR *pszFormat, ...)
+{
+ PVR_UNREFERENCED_PARAMETER(hServices);
+ PVR_UNREFERENCED_PARAMETER(pszFormat);
+}
+
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __SRVINIT_PDUMP_H__ */
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Services kernel module internal header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_H
+#define SRVKM_H
+
+#include "servicesext.h"
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+struct _PVRSRV_DEVICE_NODE_;
+
+/*************************************************************************/ /*!
+@Function PVRSRVDriverInit
+@Description Performs one time initialisation of Services.
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDriverInit(void);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDriverInit
+@Description Performs one time de-initialisation of Services.
+@Return void
+*/ /**************************************************************************/
+void IMG_CALLCONV PVRSRVDriverDeInit(void);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDeviceCreate
+@Description Creates a PVR Services device node for an OS native device.
+@Input pvOSDevice OS native device
+@Output ppsDeviceNode Points to the new device node on success
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDeviceCreate(void *pvOSDevice,
+ struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+/*************************************************************************/ /*!
+@Function PVRSRVDeviceInitialise
+@Description Initialises the given device, created by PVRSRVDeviceCreate, so
+ that's in a functional state ready to be used.
+@Input psDeviceNode Device node of the device to be initialised
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceInitialise(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+#endif
+
+/*************************************************************************/ /*!
+@Function PVRSRVDeviceDestroy
+@Description Destroys a PVR Services device node.
+@Input psDeviceNode Device node to destroy
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDeviceDestroy(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+/******************
+HIGHER LEVEL MACROS
+*******************/
+
+/*----------------------------------------------------------------------------
+Repeats the body of the loop for a certain minimum time, or until the body
+exits by its own means (break, return, goto, etc.)
+
+Example of usage:
+
+LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+{
+ if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
+ {
+ bTimeout = IMG_FALSE;
+ break;
+ }
+
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+} END_LOOP_UNTIL_TIMEOUT();
+
+-----------------------------------------------------------------------------*/
+
+/* uiNotLastLoop will remain at 1 until the timeout has expired, at which time
+ * it will be decremented and the loop executed one final time. This is necessary
+ * when preemption is enabled.
+ */
+/* PRQA S 3411,3431 12 */ /* critical format, leave alone */
+#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
+{\
+ IMG_UINT32 uiOffset, uiStart, uiCurrent; \
+ IMG_INT32 iNotLastLoop; \
+ for(uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\
+ ((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--; \
+ uiCurrent = OSClockus(), \
+ uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \
+ uiStart = uiCurrent < uiStart ? 0 : uiStart)
+
+#define END_LOOP_UNTIL_TIMEOUT() \
+}
+
+#endif /* SRVKM_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services synchronisation interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements client side code for services synchronisation
+ interface
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "client_sync_bridge.h"
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+#include "client_synctracking_bridge.h"
+#endif
+#include "pvr_bridge.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "lock.h"
+#include "log2.h"
+/* FIXME */
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+
+#define SYNC_BLOCK_LIST_CHUNCK_SIZE 10
+
+/*
+ This defines the maximum amount of synchronisation memory
+ that can be allocated per SyncPrim context.
+ In reality this number is meaningless as we would run out
+ of synchronisation memory before we reach this limit, but
+ we need to provide a size to the span RA.
+*/
+#define MAX_SYNC_MEM (4 * 1024 * 1024)
+
+typedef struct _SYNC_BLOCK_LIST_
+{
+ IMG_UINT32 ui32BlockCount; /*!< Number of contexts in the list */
+ IMG_UINT32 ui32BlockListSize; /*!< Size of the array contexts */
+ SYNC_PRIM_BLOCK **papsSyncPrimBlock; /*!< Array of syncprim blocks */
+} SYNC_BLOCK_LIST;
+
+typedef struct _SYNC_OP_COOKIE_
+{
+ IMG_UINT32 ui32SyncCount;
+ IMG_UINT32 ui32ClientSyncCount;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_BOOL bHaveServerSync;
+ IMG_HANDLE hBridge;
+ IMG_HANDLE hServerCookie;
+
+ SYNC_BLOCK_LIST *psSyncBlockList;
+ PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim;
+ /*
+ Client sync(s) info.
+ If this changes update the calculation of ui32ClientAllocSize
+ */
+ IMG_UINT32 *paui32SyncBlockIndex;
+ IMG_UINT32 *paui32Index;
+ IMG_UINT32 *paui32Flags;
+ IMG_UINT32 *paui32FenceValue;
+ IMG_UINT32 *paui32UpdateValue;
+
+ /*
+ Server sync(s) info
+ If this changes update the calculation of ui32ServerAllocSize
+ */
+ IMG_HANDLE *pahServerSync;
+ IMG_UINT32 *paui32ServerFlags;
+} SYNC_OP_COOKIE;
+
+/* forward declaration */
+static PVRSRV_ERROR
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value);
+
+/*
+ Internal interfaces for management of SYNC_PRIM_CONTEXT
+*/
+static void
+_SyncPrimContextUnref(SYNC_PRIM_CONTEXT *psContext)
+{
+ if (!OSAtomicRead(&psContext->hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_SyncPrimContextUnref context already freed"));
+ }
+ else if (0 == OSAtomicDecrement(&psContext->hRefCount))
+ {
+ /* SyncPrimContextDestroy only when no longer referenced */
+ RA_Delete(psContext->psSpanRA);
+ RA_Delete(psContext->psSubAllocRA);
+ OSFreeMem(psContext);
+ }
+}
+
+static void
+_SyncPrimContextRef(SYNC_PRIM_CONTEXT *psContext)
+{
+ if (!OSAtomicRead(&psContext->hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_SyncPrimContextRef context use after free"));
+ }
+ else
+ {
+ OSAtomicIncrement(&psContext->hRefCount);
+ }
+}
+
+/*
+ Internal interfaces for management of synchronisation block memory
+*/
+static PVRSRV_ERROR
+AllocSyncPrimitiveBlock(SYNC_PRIM_CONTEXT *psContext,
+ SYNC_PRIM_BLOCK **ppsSyncBlock)
+{
+ SYNC_PRIM_BLOCK *psSyncBlk;
+ IMG_HANDLE hSyncPMR;
+ IMG_HANDLE hSyncImportHandle;
+ IMG_DEVMEM_SIZE_T uiImportSize;
+ PVRSRV_ERROR eError;
+
+ psSyncBlk = OSAllocMem(sizeof(SYNC_PRIM_BLOCK));
+ if (psSyncBlk == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+ psSyncBlk->psContext = psContext;
+
+ /* Allocate sync prim block */
+ eError = BridgeAllocSyncPrimitiveBlock(psContext->hDevConnection,
+ &psSyncBlk->hServerSyncPrimBlock,
+ &psSyncBlk->ui32FirmwareAddr,
+ &psSyncBlk->ui32SyncBlockSize,
+ &hSyncPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_blockalloc;
+ }
+
+ /* Make it mappable by the client */
+ eError = DevmemMakeLocalImportHandle(psContext->hDevConnection,
+ hSyncPMR,
+ &hSyncImportHandle);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_export;
+ }
+
+ /* Get CPU mapping of the memory block */
+ eError = DevmemLocalImport(psContext->hDevConnection,
+ hSyncImportHandle,
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE,
+ &psSyncBlk->hMemDesc,
+ &uiImportSize,
+ "SyncPrimitiveBlock");
+
+ /*
+ Regardless of success or failure we "undo" the export
+ */
+ DevmemUnmakeLocalImportHandle(psContext->hDevConnection,
+ hSyncImportHandle);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_import;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc,
+ (void **) &psSyncBlk->pui32LinAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_cpuvaddr;
+ }
+
+ *ppsSyncBlock = psSyncBlk;
+ return PVRSRV_OK;
+
+fail_cpuvaddr:
+ DevmemFree(psSyncBlk->hMemDesc);
+fail_import:
+fail_export:
+ BridgeFreeSyncPrimitiveBlock(psContext->hDevConnection,
+ psSyncBlk->hServerSyncPrimBlock);
+fail_blockalloc:
+ OSFreeMem(psSyncBlk);
+fail_alloc:
+ return eError;
+}
+
+static void
+FreeSyncPrimitiveBlock(SYNC_PRIM_BLOCK *psSyncBlk)
+{
+ SYNC_PRIM_CONTEXT *psContext = psSyncBlk->psContext;
+
+ DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc);
+ DevmemFree(psSyncBlk->hMemDesc);
+ BridgeFreeSyncPrimitiveBlock(psContext->hDevConnection,
+ psSyncBlk->hServerSyncPrimBlock);
+ OSFreeMem(psSyncBlk);
+}
+
+static PVRSRV_ERROR
+SyncPrimBlockImport(RA_PERARENA_HANDLE hArena,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *puiBase,
+ RA_LENGTH_T *puiActualSize,
+ RA_PERISPAN_HANDLE *phImport)
+{
+ SYNC_PRIM_CONTEXT *psContext = hArena;
+ SYNC_PRIM_BLOCK *psSyncBlock = NULL;
+ RA_LENGTH_T uiSpanSize;
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(uFlags);
+
+ /* Check we've not be called with an unexpected size */
+ if (!hArena || sizeof(IMG_UINT32) != uSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input params", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ /*
+ Ensure the synprim context doesn't go away while we have sync blocks
+ attached to it
+ */
+ _SyncPrimContextRef(psContext);
+
+ /* Allocate the block of memory */
+ eError = AllocSyncPrimitiveBlock(psContext, &psSyncBlock);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate syncprim block (%d)", eError));
+ goto fail_syncblockalloc;
+ }
+
+ /* Allocate a span for it */
+ eError = RA_Alloc(psContext->psSpanRA,
+ psSyncBlock->ui32SyncBlockSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0,
+ psSyncBlock->ui32SyncBlockSize,
+ pszAnnotation,
+ &psSyncBlock->uiSpanBase,
+ &uiSpanSize,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_spanalloc;
+ }
+
+ /*
+ There is no reason the span RA should return an allocation larger
+ then we request
+ */
+ PVR_ASSERT(uiSpanSize == psSyncBlock->ui32SyncBlockSize);
+
+ *puiBase = psSyncBlock->uiSpanBase;
+ *puiActualSize = psSyncBlock->ui32SyncBlockSize;
+ *phImport = psSyncBlock;
+ return PVRSRV_OK;
+
+fail_spanalloc:
+ FreeSyncPrimitiveBlock(psSyncBlock);
+fail_syncblockalloc:
+ _SyncPrimContextUnref(psContext);
+e0:
+ return eError;
+}
+
+static void
+SyncPrimBlockUnimport(RA_PERARENA_HANDLE hArena,
+ RA_BASE_T uiBase,
+ RA_PERISPAN_HANDLE hImport)
+{
+ SYNC_PRIM_CONTEXT *psContext = hArena;
+ SYNC_PRIM_BLOCK *psSyncBlock = hImport;
+
+ if (!psContext || !psSyncBlock || uiBase != psSyncBlock->uiSpanBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input params", __FUNCTION__));
+ return;
+ }
+
+ /* Free the span this import is using */
+ RA_Free(psContext->psSpanRA, uiBase);
+
+ /* Free the syncpim block */
+ FreeSyncPrimitiveBlock(psSyncBlock);
+
+ /* Drop our reference to the syncprim context */
+ _SyncPrimContextUnref(psContext);
+}
+
+static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt)
+{
+ IMG_UINT64 ui64Temp;
+
+ PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL);
+
+ /* FIXME: Subtracting a 64-bit address from another and then implicit
+ * cast to 32-bit number. Need to review all call sequences that use this
+ * function, added explicit casting for now.
+ */
+ ui64Temp = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
+ PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+ return (IMG_UINT32)ui64Temp;
+}
+
+static void SyncPrimGetCPULinAddr(SYNC_PRIM *psSyncInt)
+{
+ SYNC_PRIM_BLOCK *psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+
+ psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr +
+ (SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32));
+}
+
+static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt)
+{
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ {
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hConn =
+ psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ if(PVRSRVIsBridgeEnabled(hConn, PVRSRV_BRIDGE_SYNCTRACKING))
+ {
+ /* remove this sync record */
+ eError = BridgeSyncRecordRemoveByHandle(hConn,
+ psSyncInt->u.sLocal.hRecord);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to remove SyncRecord", __FUNCTION__));
+ }
+ }
+ else
+#endif /* if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+ {
+ IMG_UINT32 ui32FWAddr = psSyncBlock->ui32FirmwareAddr +
+ SyncPrimGetOffset(psSyncInt);
+
+ eError = BridgeSyncFreeEvent(hConn, ui32FWAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "BridgeSyncAllocEvent failed with error:"
+ " %d", eError));
+ }
+ }
+ }
+ /* reset the sync prim value as it is freed.
+ * this guarantees the client sync allocated to the client will
+ * have a value of zero and the client does not need to
+ * explicitly initialise the sync value to zero.
+ * the allocation of the backing memory for the sync prim block
+ * is done with ZERO_ON_ALLOC so the memory is initially all zero.
+ */
+ (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE);
+
+ RA_Free(psContext->psSubAllocRA, psSyncInt->u.sLocal.uiSpanAddr);
+ OSFreeMem(psSyncInt);
+ _SyncPrimContextUnref(psContext);
+}
+
+static void SyncPrimServerFree(SYNC_PRIM *psSyncInt)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeServerSyncFree(psSyncInt->u.sServer.hBridge,
+ psSyncInt->u.sServer.hServerSync);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimServerFree failed"));
+ }
+ OSFreeMem(psSyncInt);
+}
+
+static void SyncPrimLocalUnref(SYNC_PRIM *psSyncInt)
+{
+ if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed"));
+ }
+ else if (0 == OSAtomicDecrement(&psSyncInt->u.sLocal.hRefCount))
+ {
+ SyncPrimLocalFree(psSyncInt);
+ }
+}
+
+static void SyncPrimLocalRef(SYNC_PRIM *psSyncInt)
+{
+ if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalRef sync use after free"));
+ }
+ else
+ {
+ OSAtomicIncrement(&psSyncInt->u.sLocal.hRefCount);
+ }
+}
+
+static IMG_UINT32 SyncPrimGetFirmwareAddrLocal(SYNC_PRIM *psSyncInt)
+{
+ SYNC_PRIM_BLOCK *psSyncBlock;
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt);
+}
+
+static IMG_UINT32 SyncPrimGetFirmwareAddrServer(SYNC_PRIM *psSyncInt)
+{
+ return psSyncInt->u.sServer.ui32FirmwareAddr;
+}
+
+#if !defined(__KERNEL__)
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandleLocal(SYNC_PRIM *psSyncInt)
+{
+ return psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection;
+}
+
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandleServer(SYNC_PRIM *psSyncInt)
+{
+ return psSyncInt->u.sServer.hBridge;
+}
+
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ SYNC_PRIM *psSyncInt;
+
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ return _SyncPrimGetBridgeHandleLocal(psSyncInt);
+ }
+ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ return _SyncPrimGetBridgeHandleServer(psSyncInt);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_SyncPrimGetBridgeHandle: Invalid sync type"));
+ /*
+ Either the client has given us a bad pointer or there is an
+ error in this module
+ */
+ return 0;
+ }
+}
+#endif
+
+/*
+ Internal interfaces for management of syncprim block lists
+*/
+static SYNC_BLOCK_LIST *_SyncPrimBlockListCreate(void)
+{
+ SYNC_BLOCK_LIST *psBlockList;
+
+ psBlockList = OSAllocMem(sizeof(SYNC_BLOCK_LIST));
+ if (!psBlockList)
+ {
+ return NULL;
+ }
+
+ psBlockList->ui32BlockCount = 0;
+ psBlockList->ui32BlockListSize = SYNC_BLOCK_LIST_CHUNCK_SIZE;
+
+ psBlockList->papsSyncPrimBlock = OSAllocMem(sizeof(SYNC_PRIM_BLOCK *)
+ * SYNC_BLOCK_LIST_CHUNCK_SIZE);
+ if (!psBlockList->papsSyncPrimBlock)
+ {
+ OSFreeMem(psBlockList);
+ return NULL;
+ }
+
+ OSCachedMemSet(psBlockList->papsSyncPrimBlock,
+ 0,
+ sizeof(SYNC_PRIM_BLOCK *) * psBlockList->ui32BlockListSize);
+
+ return psBlockList;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListAdd(SYNC_BLOCK_LIST *psBlockList,
+ SYNC_PRIM_BLOCK *psSyncPrimBlock)
+{
+ IMG_UINT32 i;
+
+ /* Check the context isn't already on the list */
+ for (i=0;i<psBlockList->ui32BlockCount;i++)
+ {
+ if (psBlockList->papsSyncPrimBlock[i] == psSyncPrimBlock)
+ {
+ return PVRSRV_OK;
+ }
+ }
+
+ /* Check we have space for a new item */
+ if (psBlockList->ui32BlockCount == psBlockList->ui32BlockListSize)
+ {
+ SYNC_PRIM_BLOCK **papsNewSyncPrimBlock;
+
+ papsNewSyncPrimBlock = OSAllocMem(sizeof(SYNC_PRIM_BLOCK *) *
+ (psBlockList->ui32BlockListSize +
+ SYNC_BLOCK_LIST_CHUNCK_SIZE));
+ if (!papsNewSyncPrimBlock)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ OSCachedMemCopy(papsNewSyncPrimBlock,
+ psBlockList->papsSyncPrimBlock,
+ sizeof(SYNC_PRIM_CONTEXT *) *
+ psBlockList->ui32BlockListSize);
+
+ OSFreeMem(psBlockList->papsSyncPrimBlock);
+
+ psBlockList->papsSyncPrimBlock = papsNewSyncPrimBlock;
+ psBlockList->ui32BlockListSize += SYNC_BLOCK_LIST_CHUNCK_SIZE;
+ }
+
+ /* Add the context to the list */
+ psBlockList->papsSyncPrimBlock[psBlockList->ui32BlockCount++] = psSyncPrimBlock;
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListBlockToIndex(SYNC_BLOCK_LIST *psBlockList,
+ SYNC_PRIM_BLOCK *psSyncPrimBlock,
+ IMG_UINT32 *pui32Index)
+{
+ IMG_UINT32 i;
+
+ for (i=0;i<psBlockList->ui32BlockCount;i++)
+ {
+ if (psBlockList->papsSyncPrimBlock[i] == psSyncPrimBlock)
+ {
+ *pui32Index = i;
+ return PVRSRV_OK;
+ }
+ }
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListHandleArrayCreate(SYNC_BLOCK_LIST *psBlockList,
+ IMG_UINT32 *pui32BlockHandleCount,
+ IMG_HANDLE **ppahHandleList)
+{
+ IMG_HANDLE *pahHandleList;
+ IMG_UINT32 i;
+
+ pahHandleList = OSAllocMem(sizeof(IMG_HANDLE) *
+ psBlockList->ui32BlockCount);
+ if (!pahHandleList)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ for (i=0;i<psBlockList->ui32BlockCount;i++)
+ {
+ pahHandleList[i] = psBlockList->papsSyncPrimBlock[i]->hServerSyncPrimBlock;
+ }
+
+ *ppahHandleList = pahHandleList;
+ *pui32BlockHandleCount = psBlockList->ui32BlockCount;
+
+ return PVRSRV_OK;
+}
+
+static void _SyncPrimBlockListHandleArrayDestroy(IMG_HANDLE *pahHandleList)
+{
+ OSFreeMem(pahHandleList);
+}
+
+static IMG_UINT32 _SyncPrimBlockListGetClientValue(SYNC_BLOCK_LIST *psBlockList,
+ IMG_UINT32 ui32BlockIndex,
+ IMG_UINT32 ui32Index)
+{
+ return psBlockList->papsSyncPrimBlock[ui32BlockIndex]->pui32LinAddr[ui32Index];
+}
+
+static void _SyncPrimBlockListDestroy(SYNC_BLOCK_LIST *psBlockList)
+{
+ OSFreeMem(psBlockList->papsSyncPrimBlock);
+ OSFreeMem(psBlockList);
+}
+
+
+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align)
+{
+ PVR_ASSERT(IsPower2(ui32Align));
+ return ExactLog2(ui32Align);
+}
+
+/*
+ External interfaces
+*/
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection,
+ PSYNC_PRIM_CONTEXT *phSyncPrimContext)
+{
+ SYNC_PRIM_CONTEXT *psContext;
+ PVRSRV_ERROR eError;
+
+ psContext = OSAllocMem(sizeof(SYNC_PRIM_CONTEXT));
+ if (psContext == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ psContext->hDevConnection = hDevConnection;
+
+ OSSNPrintf(psContext->azName, SYNC_PRIM_NAME_SIZE, "Sync Prim RA-%p", psContext);
+ OSSNPrintf(psContext->azSpanName, SYNC_PRIM_NAME_SIZE, "Sync Prim span RA-%p", psContext);
+
+ /*
+ Create the RA for sub-allocations of the SynPrim's
+
+ Note:
+ The import size doesn't matter here as the server will pass
+ back the blocksize when does the import which overrides
+ what we specify here.
+ */
+
+ psContext->psSubAllocRA = RA_Create(psContext->azName,
+ /* Params for imports */
+ _Log2(sizeof(IMG_UINT32)),
+ RA_LOCKCLASS_2,
+ SyncPrimBlockImport,
+ SyncPrimBlockUnimport,
+ psContext,
+ IMG_FALSE);
+ if (psContext->psSubAllocRA == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_suballoc;
+ }
+
+ /*
+ Create the span-management RA
+
+ The RA requires that we work with linear spans. For our use
+ here we don't require this behaviour as we're always working
+ within offsets of blocks (imports). However, we need to keep
+ the RA happy so we create the "span" management RA which
+ ensures that all are imports are added to the RA in a linear
+ fashion
+ */
+ psContext->psSpanRA = RA_Create(psContext->azSpanName,
+ /* Params for imports */
+ 0,
+ RA_LOCKCLASS_1,
+ NULL,
+ NULL,
+ NULL,
+ IMG_FALSE);
+ if (psContext->psSpanRA == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_span;
+ }
+
+ if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_MEM, 0, NULL))
+ {
+ RA_Delete(psContext->psSpanRA);
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_span;
+ }
+
+ OSAtomicWrite(&psContext->hRefCount, 1);
+
+ *phSyncPrimContext = psContext;
+ return PVRSRV_OK;
+fail_span:
+ RA_Delete(psContext->psSubAllocRA);
+fail_suballoc:
+ OSFreeMem(psContext);
+fail_alloc:
+ return eError;
+}
+
+IMG_INTERNAL void SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext)
+{
+ SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+ if (1 != OSAtomicRead(&psContext->hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s attempted with active references, may be the result of a race", __FUNCTION__));
+ }
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+#if defined(__KERNEL__)
+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Forcing context destruction due to bad driver state.", __FUNCTION__));
+ OSAtomicWrite(&psContext->hRefCount, 1);
+ }
+#endif
+#endif
+ _SyncPrimContextUnref(psContext);
+}
+
+static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName,
+ IMG_BOOL bServerSync)
+{
+ SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM *psNewSync;
+ PVRSRV_ERROR eError;
+ RA_BASE_T uiSpanAddr;
+
+ if (!hSyncPrimContext)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid context", __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psNewSync = OSAllocMem(sizeof(SYNC_PRIM));
+ if (psNewSync == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ eError = RA_Alloc(psContext->psSubAllocRA,
+ sizeof(IMG_UINT32),
+ RA_NO_IMPORT_MULTIPLIER,
+ 0,
+ sizeof(IMG_UINT32),
+ "Sync_Prim",
+ &uiSpanAddr,
+ NULL,
+ (RA_PERISPAN_HANDLE *) &psSyncBlock);
+ if (PVRSRV_OK != eError)
+ {
+ goto fail_raalloc;
+ }
+ psNewSync->eType = SYNC_PRIM_TYPE_LOCAL;
+ OSAtomicWrite(&psNewSync->u.sLocal.hRefCount, 1);
+ psNewSync->u.sLocal.uiSpanAddr = uiSpanAddr;
+ psNewSync->u.sLocal.psSyncBlock = psSyncBlock;
+ SyncPrimGetCPULinAddr(psNewSync);
+ *ppsSync = &psNewSync->sCommon;
+ _SyncPrimContextRef(psContext);
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ if(PVRSRVIsBridgeEnabled(psSyncBlock->psContext->hDevConnection, PVRSRV_BRIDGE_SYNCTRACKING))
+ {
+ IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+ if(pszClassName)
+ {
+ /* Copy the class name annotation into a fixed-size array */
+ OSStringNCopy(szClassName, pszClassName, SYNC_MAX_CLASS_NAME_LEN - 1);
+ szClassName[SYNC_MAX_CLASS_NAME_LEN - 1] = 0;
+ }
+ else
+ {
+ /* No class name annotation */
+ szClassName[0] = 0;
+ }
+ /* record this sync */
+ eError = BridgeSyncRecordAdd(
+ psSyncBlock->psContext->hDevConnection,
+ &psNewSync->u.sLocal.hRecord,
+ psSyncBlock->hServerSyncPrimBlock,
+ psSyncBlock->ui32FirmwareAddr,
+ SyncPrimGetOffset(psNewSync),
+ bServerSync,
+ OSStringNLength(szClassName, SYNC_MAX_CLASS_NAME_LEN),
+ szClassName);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to add SyncRecord", __FUNCTION__));
+ }
+ }
+ else
+#endif /* if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+ {
+ eError = BridgeSyncAllocEvent(hSyncPrimContext->hDevConnection,
+ bServerSync,
+ psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psNewSync),
+ OSStringNLength(pszClassName, SYNC_MAX_CLASS_NAME_LEN),
+ pszClassName);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "BridgeSyncAllocEvent failed with error: %d",
+ eError));
+ }
+ }
+
+ return PVRSRV_OK;
+
+fail_raalloc:
+ OSFreeMem(psNewSync);
+fail_alloc:
+ return eError;
+}
+
+#if defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName)
+{
+ return _SyncPrimAlloc(hSyncPrimContext,
+ ppsSync,
+ pszClassName,
+ IMG_TRUE);
+}
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName)
+{
+ return _SyncPrimAlloc(hSyncPrimContext,
+ ppsSync,
+ pszClassName,
+ IMG_FALSE);
+}
+
+static PVRSRV_ERROR
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError;
+
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ eError = BridgeSyncPrimSet(psContext->hDevConnection,
+ psSyncBlock->hServerSyncPrimBlock,
+ SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32),
+ ui32Value);
+ }
+ else
+ {
+ eError = BridgeServerSyncPrimSet(psSyncInt->u.sServer.hBridge,
+ psSyncInt->u.sServer.hServerSync,
+ ui32Value);
+ }
+ /* These functions don't actually fail */
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ SyncPrimLocalUnref(psSyncInt);
+ }
+ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ SyncPrimServerFree(psSyncInt);
+ }
+ else
+ {
+ /*
+ Either the client has given us a bad pointer or there is an
+ error in this module
+ */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto err_out;
+ }
+
+err_out:
+ return eError;
+}
+
+#if defined(NO_HARDWARE)
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ /* There is no check for the psSyncInt to be LOCAL as this call
+ substitutes the Firmware updating a sync and that sync could
+ be a server one */
+
+ eError = _SyncPrimSetValue(psSyncInt, ui32Value);
+
+err_out:
+ return eError;
+}
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimSet: Invalid sync type"));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto err_out;
+ }
+
+ eError = _SyncPrimSetValue(psSyncInt, ui32Value);
+
+#if defined(PDUMP)
+ SyncPrimPDump(psSync);
+#endif
+err_out:
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_HANDLE *phBlock,
+ IMG_UINT32 *pui32Offset)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ if(!psSync || !phBlock || !pui32Offset)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimGetHandleAndOffset: invalid input pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ *phBlock = psSyncInt->u.sLocal.psSyncBlock->hServerSyncPrimBlock;
+ *pui32Offset = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: psSync not a Local sync prim (%d)",
+ __FUNCTION__, psSyncInt->eType));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+err_out:
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ *pui32FwAddr = 0;
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ *pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt);
+ }
+ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ *pui32FwAddr = SyncPrimGetFirmwareAddrServer(psSyncInt);
+ }
+ else
+ {
+ /* Either the client has given us a bad pointer or there is an
+ * error in this module
+ */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto err_out;
+ }
+
+err_out:
+ return eError;
+}
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR SyncPrimDumpSyncs(IMG_UINT32 ui32SyncCount, PVRSRV_CLIENT_SYNC_PRIM **papsSync, const IMG_CHAR *pcszExtraInfo)
+{
+#if defined(PVRSRV_NEED_PVR_DPF)
+ SYNC_PRIM *psSyncInt;
+ PVRSRV_CLIENT_SYNC_PRIM **papsServerSync;
+ IMG_UINT32 ui32ServerSyncs = 0;
+ IMG_UINT32 *pui32UID = NULL;
+ IMG_UINT32 *pui32FWAddr = NULL;
+ IMG_UINT32 *pui32CurrentOp = NULL;
+ IMG_UINT32 *pui32NextOp = NULL;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ papsServerSync = OSAllocMem(ui32SyncCount * sizeof(PVRSRV_CLIENT_SYNC_PRIM *));
+ if (!papsServerSync)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ for (i = 0; i < ui32SyncCount; i++)
+ {
+ psSyncInt = IMG_CONTAINER_OF(papsSync[i], SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: sync=local fw=0x%x curr=0x%04x",
+ pcszExtraInfo,
+ SyncPrimGetFirmwareAddrLocal(psSyncInt),
+ *psSyncInt->sCommon.pui32LinAddr));
+ }
+ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ papsServerSync[ui32ServerSyncs++] = papsSync[i];
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimDumpSyncs: Invalid sync type"));
+ /*
+ Either the client has given us a bad pointer or there is an
+ error in this module
+ */
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto err_free;
+ }
+ }
+
+ if (ui32ServerSyncs > 0)
+ {
+ pui32UID = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+ if (!pui32UID)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free;
+ }
+ pui32FWAddr = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+ if (!pui32FWAddr)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free;
+ }
+ pui32CurrentOp = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+ if (!pui32CurrentOp)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free;
+ }
+ pui32NextOp = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+ if (!pui32NextOp)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free;
+ }
+ eError = SyncPrimServerGetStatus(ui32ServerSyncs, papsServerSync,
+ pui32UID,
+ pui32FWAddr,
+ pui32CurrentOp,
+ pui32NextOp);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimDumpSyncs: Error querying server sync status (%d)",
+ eError));
+ goto err_free;
+ }
+ for (i = 0; i < ui32ServerSyncs; i++)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: sync=server fw=0x%x curr=0x%04x next=0x%04x id=%u%s",
+ pcszExtraInfo,
+ pui32FWAddr[i],
+ pui32CurrentOp[i],
+ pui32NextOp[i],
+ pui32UID[i],
+ (pui32NextOp[i] - pui32CurrentOp[i] == 1) ? " *" :
+ (pui32NextOp[i] - pui32CurrentOp[i] > 1) ? " **" :
+ ""));
+ }
+ }
+
+err_free:
+ OSFreeMem(papsServerSync);
+ if (pui32UID)
+ {
+ OSFreeMem(pui32UID);
+ }
+ if (pui32FWAddr)
+ {
+ OSFreeMem(pui32FWAddr);
+ }
+ if (pui32CurrentOp)
+ {
+ OSFreeMem(pui32CurrentOp);
+ }
+ if (pui32NextOp)
+ {
+ OSFreeMem(pui32NextOp);
+ }
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32SyncCount);
+ PVR_UNREFERENCED_PARAMETER(papsSync);
+ PVR_UNREFERENCED_PARAMETER(pcszExtraInfo);
+ return PVRSRV_OK;
+#endif
+}
+#endif
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpCreate(IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim,
+ PSYNC_OP_COOKIE *ppsCookie)
+{
+ SYNC_OP_COOKIE *psNewCookie;
+ SYNC_BLOCK_LIST *psSyncBlockList;
+ IMG_UINT32 ui32ServerSyncCount = 0;
+ IMG_UINT32 ui32ClientSyncCount = 0;
+ IMG_UINT32 ui32ServerAllocSize;
+ IMG_UINT32 ui32ClientAllocSize;
+ IMG_UINT32 ui32TotalAllocSize;
+ IMG_UINT32 ui32ServerIndex = 0;
+ IMG_UINT32 ui32ClientIndex = 0;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32SyncBlockCount;
+ IMG_HANDLE hBridge;
+ IMG_HANDLE *pahHandleList;
+ IMG_CHAR *pcPtr;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bServerSync;
+
+ psSyncBlockList = _SyncPrimBlockListCreate();
+
+ if (!psSyncBlockList)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ eError = SyncPrimIsServerSync(papsSyncPrim[i], &bServerSync);
+ if (PVRSRV_OK != eError) goto e1;
+ if (bServerSync)
+ {
+ ui32ServerSyncCount++;
+ }
+ else
+ {
+ SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[i];
+
+ ui32ClientSyncCount++;
+ eError = _SyncPrimBlockListAdd(psSyncBlockList, psSync->u.sLocal.psSyncBlock);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+ }
+ }
+
+ ui32ServerAllocSize = ui32ServerSyncCount * (sizeof(IMG_HANDLE) + sizeof(IMG_UINT32));
+ ui32ClientAllocSize = ui32ClientSyncCount * (5 * sizeof(IMG_UINT32));
+ ui32TotalAllocSize = sizeof(SYNC_OP_COOKIE) +
+ (sizeof(PVRSRV_CLIENT_SYNC_PRIM *) * ui32SyncCount) +
+ ui32ServerAllocSize +
+ ui32ClientAllocSize;
+
+ psNewCookie = OSAllocMem(ui32TotalAllocSize);
+ pcPtr = (IMG_CHAR *) psNewCookie;
+
+ if (!psNewCookie)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ /* Setup the pointers */
+ pcPtr += sizeof(SYNC_OP_COOKIE);
+ psNewCookie->papsSyncPrim = (PVRSRV_CLIENT_SYNC_PRIM **) pcPtr;
+
+ pcPtr += sizeof(PVRSRV_CLIENT_SYNC_PRIM *) * ui32SyncCount;
+ psNewCookie->paui32SyncBlockIndex = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32Index = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32Flags = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32FenceValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32UpdateValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->pahServerSync =(IMG_HANDLE *) pcPtr;
+ pcPtr += sizeof(IMG_HANDLE) * ui32ServerSyncCount;
+
+ psNewCookie->paui32ServerFlags =(IMG_UINT32 *) pcPtr;
+ pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+
+ /* Check the pointer setup went ok */
+ if (!(pcPtr == (((IMG_CHAR *) psNewCookie) + ui32TotalAllocSize)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: cookie setup failed", __FUNCTION__));
+ eError = PVRSRV_ERROR_INTERNAL_ERROR;
+ goto e2;
+ }
+
+ psNewCookie->ui32SyncCount = ui32SyncCount;
+ psNewCookie->ui32ServerSyncCount = ui32ServerSyncCount;
+ psNewCookie->ui32ClientSyncCount = ui32ClientSyncCount;
+ psNewCookie->psSyncBlockList = psSyncBlockList;
+
+ /*
+ Get the bridge handle from the 1st sync.
+
+ Note: We assume the all syncs have been created with the same
+ services connection.
+ */
+ eError = SyncPrimIsServerSync(papsSyncPrim[0], &bServerSync);
+ if (PVRSRV_OK != eError) goto e2;
+ if (bServerSync)
+ {
+ SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[0];
+
+ hBridge = psSync->u.sServer.hBridge;
+ }
+ else
+ {
+ SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[0];
+
+ hBridge = psSync->u.sLocal.psSyncBlock->psContext->hDevConnection;
+ }
+
+ psNewCookie->hBridge = hBridge;
+
+ if (ui32ServerSyncCount)
+ {
+ psNewCookie->bHaveServerSync = IMG_TRUE;
+ }
+ else
+ {
+ psNewCookie->bHaveServerSync = IMG_FALSE;
+ }
+
+ /* Fill in the server and client sync data */
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[i];
+
+ eError = SyncPrimIsServerSync(papsSyncPrim[i], &bServerSync);
+ if (PVRSRV_OK != eError) goto e2;
+ if (bServerSync)
+ {
+ psNewCookie->pahServerSync[ui32ServerIndex] = psSync->u.sServer.hServerSync;
+
+ ui32ServerIndex++;
+ }
+ else
+ {
+ /* Location of sync */
+ eError = _SyncPrimBlockListBlockToIndex(psSyncBlockList,
+ psSync->u.sLocal.psSyncBlock,
+ &psNewCookie->paui32SyncBlockIndex[ui32ClientIndex]);
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ /* Workout the index to sync */
+ psNewCookie->paui32Index[ui32ClientIndex] =
+ SyncPrimGetOffset(psSync)/sizeof(IMG_UINT32);
+
+ ui32ClientIndex++;
+ }
+
+ psNewCookie->papsSyncPrim[i] = papsSyncPrim[i];
+ }
+
+ eError = _SyncPrimBlockListHandleArrayCreate(psSyncBlockList,
+ &ui32SyncBlockCount,
+ &pahHandleList);
+ if (eError !=PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ /*
+ Create the server side cookie. Here we pass in all the unchanging
+ data so we only need to pass in the minimum at takeop time
+ */
+ eError = BridgeSyncPrimOpCreate(hBridge,
+ ui32SyncBlockCount,
+ pahHandleList,
+ psNewCookie->ui32ClientSyncCount,
+ psNewCookie->paui32SyncBlockIndex,
+ psNewCookie->paui32Index,
+ psNewCookie->ui32ServerSyncCount,
+ psNewCookie->pahServerSync,
+ &psNewCookie->hServerCookie);
+
+ /* Free the handle list regardless of error */
+ _SyncPrimBlockListHandleArrayDestroy(pahHandleList);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ /* Increase the reference count on all referenced local sync prims
+ * so that they cannot be freed until this Op is finished with
+ */
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ SYNC_PRIM *psSyncInt;
+ psSyncInt = IMG_CONTAINER_OF(papsSyncPrim[i], SYNC_PRIM, sCommon);
+ if (SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType)
+ {
+ SyncPrimLocalRef(psSyncInt);
+ }
+ }
+
+ *ppsCookie = psNewCookie;
+ return PVRSRV_OK;
+
+e2:
+ OSFreeMem(psNewCookie);
+e1:
+ _SyncPrimBlockListDestroy(psSyncBlockList);
+e0:
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpTake(PSYNC_OP_COOKIE psCookie,
+ IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM_OP *pasSyncOp)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32ServerIndex = 0;
+ IMG_UINT32 ui32ClientIndex = 0;
+ IMG_UINT32 i;
+ IMG_BOOL bServerSync;
+
+ /* Copy client sync operations */
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ /*
+ Sanity check the client passes in the same syncs as the
+ ones we got at create time
+ */
+ if (psCookie->papsSyncPrim[i] != pasSyncOp[i].psSync)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ eError = SyncPrimIsServerSync(pasSyncOp[i].psSync, &bServerSync);
+ if (PVRSRV_OK != eError) goto e0;
+ if (bServerSync)
+ {
+ psCookie->paui32ServerFlags[ui32ServerIndex] =
+ pasSyncOp[i].ui32Flags;
+
+ ui32ServerIndex++;
+ }
+ else
+ {
+ /* Client operation information */
+ psCookie->paui32Flags[ui32ClientIndex] =
+ pasSyncOp[i].ui32Flags;
+ psCookie->paui32FenceValue[ui32ClientIndex] =
+ pasSyncOp[i].ui32FenceValue;
+ psCookie->paui32UpdateValue[ui32ClientIndex] =
+ pasSyncOp[i].ui32UpdateValue;
+
+ ui32ClientIndex++;
+ }
+ }
+
+ eError = BridgeSyncPrimOpTake(psCookie->hBridge,
+ psCookie->hServerCookie,
+ psCookie->ui32ClientSyncCount,
+ psCookie->paui32Flags,
+ psCookie->paui32FenceValue,
+ psCookie->paui32UpdateValue,
+ psCookie->ui32ServerSyncCount,
+ psCookie->paui32ServerFlags);
+
+e0:
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpReady(PSYNC_OP_COOKIE psCookie,
+ IMG_BOOL *pbReady)
+{
+ PVRSRV_ERROR eError;
+ if (!psCookie)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ /*
+ If we have a server sync we have no choice
+ but to do the check in the server
+ */
+ if (psCookie->bHaveServerSync)
+ {
+ eError = BridgeSyncPrimOpReady(psCookie->hBridge,
+ psCookie->hServerCookie,
+ pbReady);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to do sync check in server (Error = %d)",
+ __FUNCTION__, eError));
+ goto e0;
+ }
+ }
+ else
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 ui32SnapShot;
+ IMG_BOOL bReady = IMG_TRUE;
+
+ for (i=0;i<psCookie->ui32ClientSyncCount;i++)
+ {
+ if ((psCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK) == 0)
+ {
+ continue;
+ }
+
+ ui32SnapShot = _SyncPrimBlockListGetClientValue(psCookie->psSyncBlockList,
+ psCookie->paui32SyncBlockIndex[i],
+ psCookie->paui32Index[i]);
+ if (ui32SnapShot != psCookie->paui32FenceValue[i])
+ {
+ bReady = IMG_FALSE;
+ break;
+ }
+ }
+
+ *pbReady = bReady;
+ }
+
+ return PVRSRV_OK;
+e0:
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpComplete(PSYNC_OP_COOKIE psCookie)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeSyncPrimOpComplete(psCookie->hBridge,
+ psCookie->hServerCookie);
+
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpDestroy(PSYNC_OP_COOKIE psCookie)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 i;
+
+ eError = BridgeSyncPrimOpDestroy(psCookie->hBridge, psCookie->hServerCookie);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to destroy SyncPrimOp (Error = %d)",
+ __FUNCTION__, eError));
+ goto err_out;
+ }
+
+ /* Decrease the reference count on all referenced local sync prims
+ * so that they can be freed now this Op is finished with
+ */
+ for (i=0;i<psCookie->ui32SyncCount;i++)
+ {
+ SYNC_PRIM *psSyncInt;
+ psSyncInt = IMG_CONTAINER_OF(psCookie->papsSyncPrim[i], SYNC_PRIM, sCommon);
+ if (SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType)
+ {
+ SyncPrimLocalUnref(psSyncInt);
+ }
+ }
+
+ _SyncPrimBlockListDestroy(psCookie->psSyncBlockList);
+ OSFreeMem(psCookie);
+
+err_out:
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpResolve(PSYNC_OP_COOKIE psCookie,
+ IMG_UINT32 *pui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM_OP **ppsSyncOp)
+{
+ IMG_UINT32 ui32ServerIndex = 0;
+ IMG_UINT32 ui32ClientIndex = 0;
+ PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOps;
+ IMG_UINT32 i;
+ IMG_BOOL bServerSync;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ psSyncOps = OSAllocMem(sizeof(PVRSRV_CLIENT_SYNC_PRIM_OP) *
+ psCookie->ui32SyncCount);
+ if (!psSyncOps)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ for (i=0; i<psCookie->ui32SyncCount; i++)
+ {
+ psSyncOps[i].psSync = psCookie->papsSyncPrim[i];
+ eError = SyncPrimIsServerSync(psCookie->papsSyncPrim[i], &bServerSync);
+ if (PVRSRV_OK != eError) goto e1;
+ if (bServerSync)
+ {
+ psSyncOps[i].ui32FenceValue = 0;
+ psSyncOps[i].ui32UpdateValue = 0;
+ psSyncOps[i].ui32Flags = psCookie->paui32ServerFlags[ui32ServerIndex];
+ ui32ServerIndex++;
+ }
+ else
+ {
+ psSyncOps[i].ui32FenceValue = psCookie->paui32FenceValue[ui32ClientIndex];
+ psSyncOps[i].ui32UpdateValue = psCookie->paui32UpdateValue[ui32ClientIndex];
+ psSyncOps[i].ui32Flags = psCookie->paui32Flags[ui32ClientIndex];
+ ui32ClientIndex++;
+ }
+ }
+
+ *ppsSyncOp = psSyncOps;
+ *pui32SyncCount = psCookie->ui32SyncCount;
+
+e1:
+ OSFreeMem(psSyncOps);
+e0:
+ return eError;
+}
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerAlloc(SYNC_BRIDGE_HANDLE hBridge,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName
+ PVR_DBG_FILELINE_PARAM)
+{
+ IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+ SYNC_PRIM *psNewSync;
+ PVRSRV_ERROR eError;
+
+#if !defined(PVR_SYNC_PRIM_ALLOC_TRACE)
+ PVR_DBG_FILELINE_UNREF();
+#endif
+ psNewSync = OSAllocMem(sizeof(SYNC_PRIM));
+ if (psNewSync == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ OSCachedMemSet(psNewSync, 0, sizeof(SYNC_PRIM));
+
+ if(pszClassName)
+ {
+ /* Copy the class name annotation into a fixed-size array */
+ OSStringNCopy(szClassName, pszClassName, SYNC_MAX_CLASS_NAME_LEN - 1);
+ szClassName[SYNC_MAX_CLASS_NAME_LEN - 1] = 0;
+ }
+ else
+ {
+ /* No class name annotation */
+ szClassName[0] = 0;
+ }
+
+ eError = BridgeServerSyncAlloc(hBridge,
+ &psNewSync->u.sServer.hServerSync,
+ &psNewSync->u.sServer.ui32FirmwareAddr,
+ OSStringNLength(szClassName, SYNC_MAX_CLASS_NAME_LEN),
+ szClassName);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+#if defined(PVR_SYNC_PRIM_ALLOC_TRACE)
+ PVR_DPF((PVR_DBG_WARNING, "Allocated sync=server fw=0x%x [%p]" PVR_DBG_FILELINE_FMT,
+ psNewSync->u.sServer.ui32FirmwareAddr, &psNewSync->sCommon PVR_DBG_FILELINE_ARG));
+#endif
+
+ psNewSync->eType = SYNC_PRIM_TYPE_SERVER;
+ psNewSync->u.sServer.hBridge = hBridge;
+ *ppsSync = &psNewSync->sCommon;
+
+ return PVRSRV_OK;
+e1:
+ OSFreeMem(psNewSync);
+e0:
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerGetStatus(IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM **papsSync,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ SYNC_BRIDGE_HANDLE hBridge = NULL;
+ IMG_HANDLE *pahServerHandle;
+ IMG_BOOL bServerSync;
+
+ if (papsSync[0])
+ {
+ hBridge = _SyncPrimGetBridgeHandle(papsSync[0]);
+ }
+ if (!hBridge)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid Sync connection\n", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e0;
+ }
+
+ pahServerHandle = OSAllocMem(sizeof(IMG_HANDLE) * ui32SyncCount);
+ if (pahServerHandle == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ /*
+ Check that all the sync we've been passed are server syncs
+ and that they all are on the same connection.
+ */
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ SYNC_PRIM *psIntSync = IMG_CONTAINER_OF(papsSync[i], SYNC_PRIM, sCommon);
+
+ eError = SyncPrimIsServerSync(papsSync[i], &bServerSync);
+ if (PVRSRV_OK != eError) goto e1;
+ if (!bServerSync)
+ {
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e1;
+ }
+
+ if (!papsSync[i] || hBridge != _SyncPrimGetBridgeHandle(papsSync[i]))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncServerGetStatus: Sync connection is different\n"));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e1;
+ }
+
+ pahServerHandle[i] = psIntSync->u.sServer.hServerSync;
+ }
+
+ eError = BridgeServerSyncGetStatus(hBridge,
+ ui32SyncCount,
+ pahServerHandle,
+ pui32UID,
+ pui32FWAddr,
+ pui32CurrentOp,
+ pui32NextOp);
+ OSFreeMem(pahServerHandle);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ return PVRSRV_OK;
+
+e1:
+ OSFreeMem(pahServerHandle);
+e0:
+ return eError;
+}
+
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimIsServerSync(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_BOOL *pbServerSync)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ *pbServerSync = IMG_FALSE;
+ }
+ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ *pbServerSync = IMG_TRUE;
+ }
+ else
+ {
+ /* Either the client has given us a bad pointer or there is an
+ * error in this module
+ */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e0;
+ }
+
+e0:
+ return eError;
+}
+
+IMG_INTERNAL
+IMG_HANDLE SyncPrimGetServerHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ SYNC_PRIM *psSyncInt;
+
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+ goto e0;
+ }
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ return psSyncInt->u.sServer.hServerSync;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid sync type (%d)",
+ __FUNCTION__, psSyncInt->eType));
+ goto e0;
+ }
+e0:
+ return 0;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerQueueOp(PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOp)
+{
+ SYNC_PRIM *psSyncInt;
+ IMG_BOOL bUpdate;
+ PVRSRV_ERROR eError;
+
+ if (!psSyncOp)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ psSyncInt = IMG_CONTAINER_OF(psSyncOp->psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_SERVER)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid sync type (%d)",
+ __FUNCTION__, psSyncInt->eType));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e0;
+ }
+ if (0 == psSyncOp->ui32Flags)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: no sync flags", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e0;
+ }
+
+ if (psSyncOp->ui32Flags & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+ {
+ bUpdate = IMG_TRUE;
+ }else
+ {
+ bUpdate = IMG_FALSE;
+ }
+
+ eError = BridgeServerSyncQueueHWOp(psSyncInt->u.sServer.hBridge,
+ psSyncInt->u.sServer.hServerSync,
+ bUpdate,
+ &psSyncOp->ui32FenceValue,
+ &psSyncOp->ui32UpdateValue);
+e0:
+ return eError;
+}
+
+#if defined(PDUMP)
+IMG_INTERNAL void SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ SYNC_PRIM *psSyncInt;
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psSync != NULL);
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDump: Invalid sync type"));
+ PVR_ASSERT(IMG_FALSE);
+ return;
+ }
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ eError = BridgeSyncPrimPDump(psContext->hDevConnection,
+ psSyncBlock->hServerSyncPrimBlock,
+ SyncPrimGetOffset(psSyncInt));
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+ SYNC_PRIM *psSyncInt;
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psSync != NULL);
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDump: Invalid sync type"));
+ PVR_ASSERT(IMG_FALSE);
+ return;
+ }
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ eError = BridgeSyncPrimPDumpValue(psContext->hDevConnection,
+ psSyncBlock->hServerSyncPrimBlock,
+ SyncPrimGetOffset(psSyncInt),
+ ui32Value);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ SYNC_PRIM *psSyncInt;
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psSync != NULL);
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDumpPol: Invalid sync type (expected SYNC_PRIM_TYPE_LOCAL)"));
+ PVR_ASSERT(IMG_FALSE);
+ return;
+ }
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ eError = BridgeSyncPrimPDumpPol(psContext->hDevConnection,
+ psSyncBlock->hServerSyncPrimBlock,
+ SyncPrimGetOffset(psSyncInt),
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ ui32PDumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimOpPDumpPol(PSYNC_OP_COOKIE psCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psCookie != NULL);
+
+ eError = BridgeSyncPrimOpPDumpPol(psCookie->hBridge,
+ psCookie->hServerCookie,
+ eOperator,
+ ui32PDumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT64 uiWriteOffset,
+ IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize)
+{
+ SYNC_PRIM *psSyncInt;
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psSync != NULL);
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDumpCBP: Invalid sync type"));
+ PVR_ASSERT(IMG_FALSE);
+ return;
+ }
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ /* FIXME: uiWriteOffset, uiPacketSize, uiBufferSize were changed to
+ * 64-bit quantities to resolve Windows compiler warnings.
+ * However the bridge is only 32-bit hence compiler warnings
+ * of implicit cast and loss of data.
+ * Added explicit cast and assert to remove warning.
+ */
+#if (defined(_WIN32) && !defined(_WIN64)) || (defined(LINUX) && defined(__i386__))
+ PVR_ASSERT(uiWriteOffset<IMG_UINT32_MAX);
+ PVR_ASSERT(uiPacketSize<IMG_UINT32_MAX);
+ PVR_ASSERT(uiBufferSize<IMG_UINT32_MAX);
+#endif
+ eError = BridgeSyncPrimPDumpCBP(psContext->hDevConnection,
+ psSyncBlock->hServerSyncPrimBlock,
+ SyncPrimGetOffset(psSyncInt),
+ (IMG_UINT32)uiWriteOffset,
+ (IMG_UINT32)uiPacketSize,
+ (IMG_UINT32)uiBufferSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+#endif
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Synchronisation interface header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the client side interface for synchronisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_
+#define _SYNC_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include <powervr/sync_external.h>
+#include "pdumpdefs.h"
+#include "dllist.h"
+#include "pvr_debug.h"
+
+#include "device_connection.h"
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+/*************************************************************************/ /*!
+@Function SyncPrimContextCreate
+
+@Description Create a new synchronisation context
+
+@Input hBridge Bridge handle
+
+@Input hDeviceNode Device node handle
+
+@Output hSyncPrimContext Handle to the created synchronisation
+ primitive context
+
+@Return PVRSRV_OK if the synchronisation primitive context was
+ successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection,
+ PSYNC_PRIM_CONTEXT *hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function SyncPrimContextDestroy
+
+@Description Destroy a synchronisation context
+
+@Input hSyncPrimContext Handle to the synchronisation
+ primitive context to destroy
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function SyncPrimAlloc
+
+@Description Allocate a new synchronisation primitive on the specified
+ synchronisation context
+
+@Input hSyncPrimContext Handle to the synchronisation
+ primitive context
+
+@Output ppsSync Created synchronisation primitive
+
+@Input pszClassName Sync source annotation
+
+@Return PVRSRV_OK if the synchronisation primitive was
+ successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName);
+
+#if defined(__KERNEL__)
+/*************************************************************************/ /*!
+@Function SyncPrimAllocForServerSync
+
+@Description Allocate a new synchronisation primitive on the specified
+ synchronisation context for a server sync
+
+@Input hSyncPrimContext Handle to the synchronisation
+ primitive context
+
+@Output ppsSync Created synchronisation primitive
+
+@Input pszClassName Sync source annotation
+
+@Return PVRSRV_OK if the synchronisation primitive was
+ successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName);
+#endif
+
+/*************************************************************************/ /*!
+@Function SyncPrimFree
+
+@Description Free a synchronisation primitive
+
+@Input psSync The synchronisation primitive to free
+
+@Return PVRSRV_OK if the synchronisation primitive was
+ successfully freed
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function SyncPrimSet
+
+@Description Set the synchronisation primitive to a value
+
+@Input psSync The synchronisation primitive to set
+
+@Input ui32Value Value to set it to
+
+@Return PVRSRV_OK on success
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+#if defined(NO_HARDWARE)
+
+/*************************************************************************/ /*!
+@Function SyncPrimNoHwUpdate
+
+@Description Updates the synchronisation primitive value (in NoHardware drivers)
+
+@Input psSync The synchronisation primitive to update
+
+@Input ui32Value Value to update it to
+
+@Return PVRSRV_OK on success
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+#endif
+
+PVRSRV_ERROR
+SyncPrimServerAlloc(SHARED_DEV_CONNECTION hDevConnection,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName
+ PVR_DBG_FILELINE_PARAM);
+
+PVRSRV_ERROR
+SyncPrimServerGetStatus(IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM **papsSync,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp);
+
+PVRSRV_ERROR
+SyncPrimServerQueueOp(PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOp);
+
+PVRSRV_ERROR
+SyncPrimIsServerSync(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_BOOL *pbServerSync);
+
+IMG_HANDLE
+SyncPrimGetServerHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+
+
+PVRSRV_ERROR
+SyncPrimOpCreate(IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim,
+ PSYNC_OP_COOKIE *ppsCookie);
+
+PVRSRV_ERROR
+SyncPrimOpTake(PSYNC_OP_COOKIE psCookie,
+ IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM_OP *pasSyncOp);
+
+PVRSRV_ERROR
+SyncPrimOpReady(PSYNC_OP_COOKIE psCookie,
+ IMG_BOOL *pbReady);
+
+PVRSRV_ERROR
+SyncPrimOpComplete(PSYNC_OP_COOKIE psCookie);
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpDestroy(PSYNC_OP_COOKIE psCookie);
+
+PVRSRV_ERROR
+SyncPrimOpResolve(PSYNC_OP_COOKIE psCookie,
+ IMG_UINT32 *pui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM_OP **ppsSyncOp);
+
+PVRSRV_ERROR
+SyncPrimDumpSyncs(IMG_UINT32 ui32SyncCount, PVRSRV_CLIENT_SYNC_PRIM **papsSync, const IMG_CHAR *pcszExtraInfo);
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function SyncPrimPDump
+
+@Description PDump the current value of the synchronisation primitive
+
+@Input psSync The synchronisation primitive to PDump
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function SyncPrimPDumpValue
+
+@Description PDump the ui32Value as the value of the synchronisation
+ primitive (regardless of the current value).
+
+@Input psSync The synchronisation primitive to PDump
+@Input ui32Value Value to give to the sync prim on the pdump
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function SyncPrimPDumpPol
+
+@Description Do a PDump poll of the synchronisation primitive
+
+@Input psSync The synchronisation primitive to PDump
+
+@Input ui32Value Value to poll for
+
+@Input ui32Mask PDump mask operator
+
+@Input ui32PDumpFlags PDump flags
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function SyncPrimOpPDumpPol
+
+@Description Do a PDump poll all the synchronisation primitives on this
+ Operation cookie.
+
+@Input psCookie Operation cookie
+
+@Input ui32PDumpFlags PDump flags
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimOpPDumpPol(PSYNC_OP_COOKIE psCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function SyncPrimPDumpCBP
+
+@Description Do a PDump CB poll using the synchronisation primitive
+
+@Input psSync The synchronisation primitive to PDump
+
+@Input uiWriteOffset Current write offset of buffer
+
+@Input uiPacketSize Size of the packet to write into CB
+
+@Input uiBufferSize Size of the CB
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT64 uiWriteOffset,
+ IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpValue)
+#endif
+static INLINE void
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psSync);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDump)
+#endif
+static INLINE void
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ PVR_UNREFERENCED_PARAMETER(psSync);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpPol)
+#endif
+static INLINE void
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psSync);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimServerPDumpPol)
+#endif
+static INLINE void
+SyncPrimServerPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psSync);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpCBP)
+#endif
+static INLINE void
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT64 uiWriteOffset,
+ IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize)
+{
+ PVR_UNREFERENCED_PARAMETER(psSync);
+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+ PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+ PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+}
+#endif /* PDUMP */
+#endif /* _PVRSRV_SYNC_ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services synchronisation checkpoint interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements server side code for services synchronisation
+ interface
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "dllist.h"
+#include "sync.h"
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+#include "lock.h"
+#include "log2.h"
+#include "pvrsrv.h"
+#include "pdump_km.h"
+
+#include "pvrsrv_sync_km.h"
+
+#define SYNC_CHECKPOINT_BLOCK_LIST_CHUNK_SIZE 10
+#define LOCAL_SYNC_CHECKPOINT_RESET_VALUE PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED
+
+/*
+ This defines the maximum amount of synchronisation memory
+ that can be allocated per sync checkpoint context.
+ In reality this number is meaningless as we would run out
+ of synchronisation memory before we reach this limit, but
+ we need to provide a size to the span RA.
+*/
+#define MAX_SYNC_CHECKPOINT_MEM (4 * 1024 * 1024)
+
+/* definitions for functions to be implemented by OS-specific sync - the OS-specific sync code
+ will call x when initialised, in order to register functions we can then call */
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PVRSRV_FENCE_KM fence, IMG_UINT32 *nr_checkpoints, PSYNC_CHECKPOINT *checkpoint_handles);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(const IMG_CHAR *fence_name, PVRSRV_TIMELINE_KM timeline, PVRSRV_FENCE_KM *new_fence, PSYNC_CHECKPOINT *new_checkpoint_handle);
+
+
+typedef struct _SYNC_CHECKPOINT_BLOCK_LIST_
+{
+ IMG_UINT32 ui32BlockCount; /*!< Number of contexts in the list */
+ IMG_UINT32 ui32BlockListSize; /*!< Size of the array contexts */
+ SYNC_CHECKPOINT_BLOCK **papsSyncCheckpointBlock; /*!< Array of sync checkpoint blocks */
+} SYNC_CHECKPOINT_BLOCK_LIST;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1))
+
+struct SYNC_CHECKPOINT_RECORD
+{
+ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< handle to SYNC_CHECKPOINT_BLOCK */
+ IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */
+ IMG_UINT32 ui32FwBlockAddr;
+ IMG_PID uiPID;
+ IMG_UINT64 ui64OSTime;
+ DLLIST_NODE sNode;
+ IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+};
+#endif /* defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+static PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN g_pfnFenceResolve = NULL;
+static PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN g_pfnFenceCreate = NULL;
+
+PVRSRV_ERROR
+SyncCheckpointRecordAdd(PSYNC_CHECKPOINT_RECORD_HANDLE *phRecord,
+ SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock,
+ IMG_UINT32 ui32FwBlockAddr,
+ IMG_UINT32 ui32SyncOffset,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName);
+PVRSRV_ERROR
+SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord);
+static void _SyncCheckpointState(PDLLIST_NODE psNode,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+static PVRSRV_ERROR _SyncCheckpointRecordListInit(_SYNC_CHECKPOINT_CONTEXT *psContext);
+static void _SyncCheckpointRecordListDeinit(_SYNC_CHECKPOINT_CONTEXT *psContext);
+
+PVRSRV_ERROR SyncCheckpointSignalPDump(_SYNC_CHECKPOINT *psSyncCheckpoint);
+PVRSRV_ERROR SyncCheckpointErrorPDump(_SYNC_CHECKPOINT *psSyncCheckpoint);
+
+PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve,
+ PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate);
+
+/* Unique incremental ID assigned to sync checkpoints when allocated */
+static IMG_UINT32 g_SyncCheckpointUID = 0;
+
+/*
+ Internal interfaces for management of _SYNC_CHECKPOINT_CONTEXT
+*/
+static void
+_SyncCheckpointContextUnref(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+ if (!OSAtomicRead(&psContext->hRefCount))
+ {
+ PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT, "_SyncCheckpointContextUnref context already freed");
+ }
+ else if (0 == OSAtomicDecrement(&psContext->hRefCount))
+ {
+ /* SyncCheckpointContextDestroy only when no longer referenced */
+ _SyncCheckpointRecordListDeinit(psContext);
+ PVRSRVUnregisterDbgRequestNotify(psContext->hCheckpointNotify);
+ OSLockDestroy(psContext->hCheckpointListLock);
+ RA_Delete(psContext->psSpanRA);
+ RA_Delete(psContext->psSubAllocRA);
+ OSFreeMem(psContext);
+ }
+}
+
+static void
+_SyncCheckpointContextRef(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+ if (!OSAtomicRead(&psContext->hRefCount))
+ {
+ PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT, "_SyncCheckpointContextRef context use after free");
+ }
+ else
+ {
+ OSAtomicIncrement(&psContext->hRefCount);
+ }
+}
+
+/*
+ Internal interfaces for management of synchronisation block memory
+*/
+static PVRSRV_ERROR
+_AllocSyncCheckpointBlock(_SYNC_CHECKPOINT_CONTEXT *psContext,
+ SYNC_CHECKPOINT_BLOCK **ppsSyncBlock)
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ SYNC_CHECKPOINT_BLOCK *psSyncBlk;
+ PVRSRV_ERROR eError;
+
+ psSyncBlk = OSAllocMem(sizeof(SYNC_CHECKPOINT_BLOCK));
+ if (psSyncBlk == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_LOG_ERROR(eError, "OSAllocMem");
+ goto fail_alloc;
+ }
+ psSyncBlk->psContext = psContext;
+
+ /* Allocate sync checkpoint block */
+ psDevNode = psContext->psDevNode;
+ if (!psDevNode)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_LOG_ERROR(eError, "context device node invalid");
+ goto fail_alloc_ufo_block;
+ }
+ psSyncBlk->psDevNode = psDevNode;
+
+ eError = psDevNode->pfnAllocUFOBlock(psDevNode,
+ &psSyncBlk->hMemDesc,
+ &psSyncBlk->ui32FirmwareAddr,
+ &psSyncBlk->ui32SyncBlockSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "failed to allocate ufo block");
+ goto fail_alloc_ufo_block;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc,
+ (void **) &psSyncBlk->pui32LinAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "DevmemAcquireCpuVirtAddr");
+ goto fail_devmem_acquire;
+ }
+
+ OSAtomicWrite(&psSyncBlk->hRefCount, 1);
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Allocated Sync Checkpoint UFO block (FirmwareVAddr = 0x%08x)",
+ psSyncBlk->ui32FirmwareAddr);
+
+ *ppsSyncBlock = psSyncBlk;
+ return PVRSRV_OK;
+
+fail_devmem_acquire:
+ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc);
+fail_alloc_ufo_block:
+ OSFreeMem(psSyncBlk);
+fail_alloc:
+ return eError;
+}
+
+static void
+_FreeSyncCheckpointBlock(SYNC_CHECKPOINT_BLOCK *psSyncBlk)
+{
+ if (0 == OSAtomicDecrement(&psSyncBlk->hRefCount))
+ {
+ PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+ DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc);
+ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc);
+ OSFreeMem(psSyncBlk);
+ }
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *puiBase,
+ RA_LENGTH_T *puiActualSize,
+ RA_PERISPAN_HANDLE *phImport)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = hArena;
+ SYNC_CHECKPOINT_BLOCK *psSyncBlock = NULL;
+ RA_LENGTH_T uiSpanSize;
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(uFlags);
+
+ PVR_LOG_IF_FALSE((hArena != NULL), "hArena is NULL");
+
+ /* Check we've not be called with an unexpected size */
+ PVR_LOG_IF_FALSE((uSize == sizeof(_SYNC_CHECKPOINT_FW_OBJ)), "uiSize is not the size of _SYNC_CHECKPOINT_FW_OBJ");
+
+ /*
+ Ensure the sync checkpoint context doesn't go away while we have sync blocks
+ attached to it
+ */
+ _SyncCheckpointContextRef(psContext);
+
+ /* Allocate the block of memory */
+ eError = _AllocSyncCheckpointBlock(psContext, &psSyncBlock);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_syncblockalloc;
+ }
+
+ /* Allocate a span for it */
+ eError = RA_Alloc(psContext->psSpanRA,
+ psSyncBlock->ui32SyncBlockSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0,
+ psSyncBlock->ui32SyncBlockSize,
+ pszAnnotation,
+ &psSyncBlock->uiSpanBase,
+ &uiSpanSize,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_spanalloc;
+ }
+
+ /*
+ There is no reason the span RA should return an allocation larger
+ then we request
+ */
+ PVR_LOG_IF_FALSE((uiSpanSize == psSyncBlock->ui32SyncBlockSize), "uiSpanSize invalid");
+
+ *puiBase = psSyncBlock->uiSpanBase;
+ *puiActualSize = psSyncBlock->ui32SyncBlockSize;
+ *phImport = psSyncBlock;
+ return PVRSRV_OK;
+
+fail_spanalloc:
+ _FreeSyncCheckpointBlock(psSyncBlock);
+fail_syncblockalloc:
+ _SyncCheckpointContextUnref(psContext);
+
+ return eError;
+}
+
+static void
+_SyncCheckpointBlockUnimport(RA_PERARENA_HANDLE hArena,
+ RA_BASE_T uiBase,
+ RA_PERISPAN_HANDLE hImport)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = hArena;
+ SYNC_CHECKPOINT_BLOCK *psSyncBlock = hImport;
+
+ PVR_LOG_IF_FALSE((psContext != NULL), "hArena invalid");
+ PVR_LOG_IF_FALSE((psSyncBlock != NULL), "hImport invalid");
+ PVR_LOG_IF_FALSE((uiBase == psSyncBlock->uiSpanBase), "uiBase invalid");
+
+ /* Free the span this import is using */
+ RA_Free(psContext->psSpanRA, uiBase);
+
+ /* Free the sync checkpoint block */
+ _FreeSyncCheckpointBlock(psSyncBlock);
+
+ /* Drop our reference to the sync checkpoint context */
+ _SyncCheckpointContextUnref(psContext);
+}
+
+static INLINE IMG_UINT32 _SyncCheckpointGetOffset(_SYNC_CHECKPOINT *psSyncInt)
+{
+ IMG_UINT64 ui64Temp;
+
+ ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncCheckpointBlock->uiSpanBase;
+ PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+ return (IMG_UINT32)ui64Temp;
+}
+
+/* Used by SyncCheckpointContextCreate() below */
+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align)
+{
+ PVR_ASSERT(IsPower2(ui32Align));
+ return ExactLog2(ui32Align);
+}
+
+/*
+ External interfaces
+*/
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve, PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ g_pfnFenceResolve = pfnFenceResolve;
+ g_pfnFenceCreate = pfnFenceCreate;
+
+ return eError;
+}
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointResolveFence(PVRSRV_FENCE_KM hFence, IMG_UINT32 *pui32NumSyncCheckpoints, PSYNC_CHECKPOINT *psSyncCheckpoints)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!g_pfnFenceResolve)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", __FUNCTION__));
+ PVR_LOG_ERROR(eError, "g_pfnFenceResolve is NULL");
+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+ }
+ else
+ {
+ eError = g_pfnFenceResolve(hFence, pui32NumSyncCheckpoints, psSyncCheckpoints);
+ }
+ return eError;
+}
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointCreateFence(const IMG_CHAR *pszFenceName, PVRSRV_TIMELINE_KM hTimeline, PVRSRV_FENCE_KM *phNewFence, PSYNC_CHECKPOINT *psNewSyncCheckpoint)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!g_pfnFenceCreate)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", __FUNCTION__));
+ PVR_LOG_ERROR(eError, "g_pfnFenceCreate is NULL");
+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+ }
+ else
+ {
+ eError = g_pfnFenceCreate(pszFenceName, hTimeline, phNewFence, psNewSyncCheckpoint);
+ }
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointContextCreate(PVRSRV_DEVICE_NODE *psDevNode,
+ PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_LOGR_IF_FALSE((ppsSyncCheckpointContext != NULL), "ppsSyncCheckpointContext invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+ psContext = OSAllocZMem(sizeof(_SYNC_CHECKPOINT_CONTEXT));
+ if (psContext == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ psContext->psDevNode = psDevNode;
+
+ OSSNPrintf(psContext->azName, SYNC_CHECKPOINT_NAME_SIZE, "Sync Prim RA-%p", psContext);
+ OSSNPrintf(psContext->azSpanName, SYNC_CHECKPOINT_NAME_SIZE, "Sync Prim span RA-%p", psContext);
+
+ /*
+ Create the RA for sub-allocations of the sync checkpoints
+
+ Note:
+ The import size doesn't matter here as the server will pass
+ back the blocksize when it does the import which overrides
+ what we specify here.
+ */
+ psContext->psSubAllocRA = RA_Create(psContext->azName,
+ /* Params for imports */
+ _Log2(sizeof(IMG_UINT32)),
+ RA_LOCKCLASS_2,
+ _SyncCheckpointBlockImport,
+ _SyncCheckpointBlockUnimport,
+ psContext,
+ IMG_FALSE);
+ if (psContext->psSubAllocRA == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_suballoc;
+ }
+
+ /*
+ Create the span-management RA
+
+ The RA requires that we work with linear spans. For our use
+ here we don't require this behaviour as we're always working
+ within offsets of blocks (imports). However, we need to keep
+ the RA happy so we create the "span" management RA which
+ ensures that all are imports are added to the RA in a linear
+ fashion
+ */
+ psContext->psSpanRA = RA_Create(psContext->azSpanName,
+ /* Params for imports */
+ 0,
+ RA_LOCKCLASS_1,
+ NULL,
+ NULL,
+ NULL,
+ IMG_FALSE);
+ if (psContext->psSpanRA == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_span;
+ }
+
+ if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_CHECKPOINT_MEM, 0, NULL))
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_span_add;
+ }
+
+ OSAtomicWrite(&psContext->hRefCount, 1);
+ OSAtomicWrite(&psContext->hCheckpointCount, 0);
+
+ eError = OSLockCreate(&psContext->hCheckpointListLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_span;
+ }
+
+ dllist_init(&psContext->sCheckpointList);
+
+ eError = PVRSRVRegisterDbgRequestNotify(&psContext->hCheckpointNotify,
+ psDevNode,
+ _SyncCheckpointDebugRequest,
+ DEBUG_REQUEST_SYNCCHECKPOINT,
+ psContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_register_dbg_request;
+ }
+
+ eError = _SyncCheckpointRecordListInit(psContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_record_list_init;
+ }
+
+ *ppsSyncCheckpointContext = (PSYNC_CHECKPOINT_CONTEXT)psContext;
+ return PVRSRV_OK;
+
+fail_record_list_init:
+ PVRSRVUnregisterDbgRequestNotify(psContext->hCheckpointNotify);
+fail_register_dbg_request:
+ OSLockDestroy(psContext->hCheckpointListLock);
+fail_span_add:
+ RA_Delete(psContext->psSpanRA);
+fail_span:
+ RA_Delete(psContext->psSubAllocRA);
+fail_suballoc:
+ OSFreeMem(psContext);
+fail_alloc:
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointContext;
+ IMG_INT iRf = 0;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpointContext != NULL), "psSyncCheckpointContext invalid");
+
+ iRf = OSAtomicRead(&psContext->hCheckpointCount);
+ if (iRf != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s <%p> attempted with active references (iRf=%d), may be the result of a race", __FUNCTION__, (void*)psContext, iRf));
+ eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT;
+ }
+ else
+ {
+ IMG_INT iRf2 = 0;
+
+ iRf2 = OSAtomicRead(&psContext->hRefCount);
+ _SyncCheckpointContextUnref(psContext);
+ }
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR
+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext,
+ const IMG_CHAR *pszCheckpointName,
+ PSYNC_CHECKPOINT *ppsSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL;
+ _SYNC_CHECKPOINT_CONTEXT *psSyncContextInt = (_SYNC_CHECKPOINT_CONTEXT*)psSyncContext;
+ PVRSRV_ERROR eError;
+
+ PVR_LOGR_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_LOGR_IF_FALSE((ppsSyncCheckpoint != NULL), "ppsSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+ psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint));
+
+ if (psNewSyncCheckpoint == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_LOG_ERROR(eError, "OSAllocMem");
+ goto fail_alloc;
+ }
+
+ eError = RA_Alloc(psSyncContextInt->psSubAllocRA,
+ sizeof(_SYNC_CHECKPOINT_FW_OBJ),
+ RA_NO_IMPORT_MULTIPLIER,
+ 0,
+ sizeof(IMG_UINT32),
+ (IMG_CHAR*)pszCheckpointName,
+ &psNewSyncCheckpoint->uiSpanAddr,
+ NULL,
+ (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_LOG_ERROR(eError, "RA_Alloc");
+ goto fail_raalloc;
+ }
+ psNewSyncCheckpoint->psSyncCheckpointFwObj = (volatile _SYNC_CHECKPOINT_FW_OBJ*)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr +
+ (_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32)));
+
+ /* the allocation of the backing memory for the sync prim block
+ * is done with ZERO_ON_ALLOC so the memory is initially all zero.
+ * States are also reset to unsignalled on free, so no need to set here
+ */
+ OSAtomicWrite(&psNewSyncCheckpoint->hRefCount, 1);
+ OSAtomicWrite(&psNewSyncCheckpoint->hEnqueuedCCBCount, 0);
+
+ if(pszCheckpointName)
+ {
+ /* Copy over the checkpoint name annotation */
+ OSStringNCopy(psNewSyncCheckpoint->azName, pszCheckpointName, SYNC_CHECKPOINT_NAME_SIZE);
+ psNewSyncCheckpoint->azName[SYNC_CHECKPOINT_NAME_SIZE-1] = 0;
+ }
+ else
+ {
+ /* No sync checkpoint name annotation */
+ psNewSyncCheckpoint->azName[0] = '\0';
+ }
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ {
+ IMG_CHAR szChkptName[SYNC_CHECKPOINT_MAX_CLASS_NAME_LEN];
+
+ if(pszCheckpointName)
+ {
+ /* Copy the checkpoint name annotation into a fixed-size array */
+ OSStringNCopy(szChkptName, pszCheckpointName, SYNC_CHECKPOINT_MAX_CLASS_NAME_LEN - 1);
+ szChkptName[SYNC_MAX_CLASS_NAME_LEN - 1] = 0;
+ }
+ else
+ {
+ /* No checkpoint name annotation */
+ szChkptName[0] = 0;
+ }
+ /* record this sync */
+ eError = SyncCheckpointRecordAdd(&psNewSyncCheckpoint->hRecord,
+ psNewSyncCheckpoint->psSyncCheckpointBlock,
+ psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr,
+ _SyncCheckpointGetOffset(psNewSyncCheckpoint),
+ OSStringNLength(szChkptName, SYNC_CHECKPOINT_MAX_CLASS_NAME_LEN),
+ szChkptName);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "SyncCheckpointRecordAdd");
+ }
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(pszCheckpointName);
+#endif /* if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+ OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount);
+
+ /* Assign unique ID to this sync checkpoint */
+ psNewSyncCheckpoint->ui32UID = g_SyncCheckpointUID++;
+
+ /* Add the sync checkpoint to the context list */
+ OSLockAcquire(psSyncContextInt->hCheckpointListLock);
+ dllist_add_to_head(&psSyncContextInt->sCheckpointList,
+ &psNewSyncCheckpoint->sListNode);
+ OSLockRelease(psSyncContextInt->hCheckpointListLock);
+
+ *ppsSyncCheckpoint = (PSYNC_CHECKPOINT)psNewSyncCheckpoint;
+
+ return PVRSRV_OK;
+
+fail_raalloc:
+ OSFreeMem(psNewSyncCheckpoint);
+fail_alloc:
+
+ return eError;
+}
+
+IMG_INTERNAL void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpointInt->psSyncCheckpointBlock->psContext;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if (!OSAtomicRead(&psSyncCheckpointInt->hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncCheckpointUnref sync checkpoint already freed"));
+ }
+ else if (0 == OSAtomicDecrement(&psSyncCheckpointInt->hRefCount))
+ {
+ /* If the firmware has serviced all enqueued references to the sync checkpoint, free it */
+ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount == (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)))
+ {
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ {
+ PVRSRV_ERROR eError;
+ /* remove this sync record */
+ eError = SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord);
+ PVR_LOG_IF_ERROR(eError, "SyncCheckpointRecordRemove");
+ }
+#endif
+ /* Remove the sync checkpoint from the global list */
+ OSLockAcquire(psContext->hCheckpointListLock);
+ dllist_remove_node(&psSyncCheckpointInt->sListNode);
+ OSLockRelease(psContext->hCheckpointListLock);
+
+ OSAtomicDecrement(&psSyncCheckpointInt->psSyncCheckpointBlock->psContext->hCheckpointCount);
+ RA_Free(psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA, psSyncCheckpointInt->uiSpanAddr);
+ }
+ }
+}
+
+IMG_INTERNAL void
+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if(psSyncCheckpointInt)
+ {
+ PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED), "psSyncCheckpoint already signalled");
+
+ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED)
+ {
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Signalled Sync Checkpoint (FirmwareVAddr = 0x%08x)",
+ (psSyncCheckpointInt->psSyncCheckpointBlock->ui32FirmwareAddr + _SyncCheckpointGetOffset(psSyncCheckpointInt)));
+ SyncCheckpointSignalPDump(psSyncCheckpointInt);
+#endif
+ }
+ }
+}
+
+IMG_INTERNAL void
+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if(psSyncCheckpointInt)
+ {
+ PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED), "psSyncCheckpoint already signalled");
+
+ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED)
+ {
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Errored Sync Checkpoint (FirmwareVAddr = 0x%08x)",
+ (psSyncCheckpointInt->psSyncCheckpointBlock->ui32FirmwareAddr + _SyncCheckpointGetOffset(psSyncCheckpointInt)));
+ SyncCheckpointErrorPDump(psSyncCheckpointInt);
+#endif
+ }
+ }
+}
+
+IMG_BOOL SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ IMG_BOOL bRet = IMG_FALSE;
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if (psSyncCheckpointInt)
+ {
+ bRet = ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ||
+ (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED));
+ }
+ return bRet;
+}
+
+IMG_INTERNAL IMG_BOOL
+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ IMG_BOOL bRet = IMG_FALSE;
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if (psSyncCheckpointInt)
+ {
+ bRet = (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED);
+ }
+ return bRet;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ PVRSRV_ERROR eRet = PVRSRV_OK;
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOGR_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+ OSAtomicIncrement(&psSyncCheckpointInt->hRefCount);
+
+ return eRet;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ PVRSRV_ERROR eRet = PVRSRV_OK;
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOGR_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+ OSAtomicDecrement(&psSyncCheckpointInt->hRefCount);
+
+ return eRet;
+}
+
+IMG_INTERNAL void
+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if (psSyncCheckpointInt)
+ {
+ OSAtomicIncrement(&psSyncCheckpointInt->hEnqueuedCCBCount);
+ }
+}
+
+IMG_INTERNAL IMG_UINT32
+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+ SYNC_CHECKPOINT_BLOCK *psSyncBlock;
+
+ PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+ psSyncBlock = psSyncCheckpointInt->psSyncCheckpointBlock;
+ /* add 1 to addr to indicate this FW addr is a sync checkpoint (not a sync prim) */
+ return psSyncBlock->ui32FirmwareAddr + _SyncCheckpointGetOffset(psSyncCheckpointInt) + 1;
+
+invalid_chkpt:
+ return 0;
+}
+
+void SyncCheckpointErrorFromUFO(PSYNC_CHECKPOINT_CONTEXT psSyncContext,
+ IMG_UINT32 ui32FwAddr)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT *)psSyncContext;
+ _SYNC_CHECKPOINT *psSyncCheckpointInt;
+ IMG_BOOL bFound = IMG_FALSE;
+ PDLLIST_NODE psNode;
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: Entry (ui32FWAddr=%d) >",__FUNCTION__, ui32FwAddr));
+
+ OSLockAcquire(psContext->hCheckpointListLock);
+ psNode = dllist_get_next_node(&psContext->sCheckpointList);
+ while ((psNode != NULL) && !bFound)
+ {
+ psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+ if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+ {
+ bFound = IMG_TRUE;
+ /* Mark as errored */
+ SyncCheckpointError((PSYNC_CHECKPOINT)psSyncCheckpointInt);
+ }
+ psNode = dllist_get_next_node(psNode);
+ }
+ OSLockRelease(psContext->hCheckpointListLock);
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: Exit <",__FUNCTION__));
+}
+
+static void _SyncCheckpointState(PDLLIST_NODE psNode,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+
+ if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED)
+ {
+ PVR_DUMPDEBUG_LOG("\tPending sync checkpoint(ID = %d, FWAddr = 0x%08x): (%s)",
+ psSyncCheckpoint->ui32UID,
+ psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + _SyncCheckpointGetOffset(psSyncCheckpoint),
+ psSyncCheckpoint->azName);
+ }
+}
+
+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT *)hDebugRequestHandle;
+ DLLIST_NODE *psNode, *psNext;
+
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH)
+ {
+ PVR_DUMPDEBUG_LOG("Dumping all pending sync checkpoints");
+ OSLockAcquire(psContext->hCheckpointListLock);
+ dllist_foreach_node(&psContext->sCheckpointList, psNode, psNext)
+ {
+ _SyncCheckpointState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ OSLockRelease(psContext->hCheckpointListLock);
+ }
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+PVRSRV_ERROR
+SyncCheckpointRecordAdd(
+ PSYNC_CHECKPOINT_RECORD_HANDLE * phRecord,
+ SYNC_CHECKPOINT_BLOCK * hSyncCheckpointBlock,
+ IMG_UINT32 ui32FwBlockAddr,
+ IMG_UINT32 ui32SyncOffset,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = hSyncCheckpointBlock->psContext;
+ struct SYNC_CHECKPOINT_RECORD * psSyncRec;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!phRecord)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ *phRecord = NULL;
+
+ psSyncRec = OSAllocMem(sizeof(*psSyncRec));
+ if (!psSyncRec)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ psSyncRec->psSyncCheckpointBlock = hSyncCheckpointBlock;
+ psSyncRec->ui32SyncOffset = ui32SyncOffset;
+ psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
+ psSyncRec->ui64OSTime = OSClockns64();
+ psSyncRec->uiPID = OSGetCurrentProcessID();
+
+ if(pszClassName)
+ {
+ if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+ ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+ /* Copy over the class name annotation */
+ OSStringNCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize);
+ psSyncRec->szClassName[ui32ClassNameSize] = 0;
+ }
+ else
+ {
+ /* No class name annotation */
+ psSyncRec->szClassName[0] = 0;
+ }
+
+ OSLockAcquire(psContext->hCheckpointRecordLock);
+ dllist_add_to_head(&psContext->sCheckpointRecordList, &psSyncRec->sNode);
+ OSLockRelease(psContext->hCheckpointRecordLock);
+
+ *phRecord = (PSYNC_CHECKPOINT_RECORD_HANDLE)psSyncRec;
+
+fail_alloc:
+ return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord)
+{
+ struct SYNC_CHECKPOINT_RECORD **ppFreedSync;
+ struct SYNC_CHECKPOINT_RECORD *pSync = (struct SYNC_CHECKPOINT_RECORD*)hRecord;
+ _SYNC_CHECKPOINT_CONTEXT *psContext;
+
+ if (!hRecord)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psContext = pSync->psSyncCheckpointBlock->psContext;
+
+ OSLockAcquire(psContext->hCheckpointRecordLock);
+
+ dllist_remove_node(&pSync->sNode);
+
+ if (psContext->uiCheckpointRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: freed sync record index out of range", __FUNCTION__));
+ psContext->uiCheckpointRecordFreeIdx = 0;
+ }
+ ppFreedSync = &psContext->apsCheckpointRecordsFreed[psContext->uiCheckpointRecordFreeIdx];
+ psContext->uiCheckpointRecordFreeIdx =
+ (psContext->uiCheckpointRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN;
+
+ if (*ppFreedSync)
+ {
+ OSFreeMem(*ppFreedSync);
+ }
+ pSync->psSyncCheckpointBlock = NULL;
+ pSync->ui64OSTime = OSClockns64();
+ *ppFreedSync = pSync;
+
+ OSLockRelease(psContext->hCheckpointRecordLock);
+
+ return PVRSRV_OK;
+}
+
+#define NS_IN_S (1000000000UL)
+static void _SyncCheckpointRecordPrint(struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec,
+ IMG_UINT64 ui64TimeNow,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock;
+ IMG_UINT64 ui64DeltaS;
+ IMG_UINT32 ui32DeltaF;
+ IMG_UINT64 ui64Delta = ui64TimeNow - psSyncCheckpointRec->ui64OSTime;
+ ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF);
+
+ if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr)
+ {
+ void *pSyncCheckpointAddr;
+ pSyncCheckpointAddr = (void*)(psSyncCheckpointBlock->pui32LinAddr + psSyncCheckpointRec->ui32SyncOffset);
+
+ PVR_DUMPDEBUG_LOG("\t%05u %05llu.%09u FWAddr=0x%08x State=%s (%s)",
+ psSyncCheckpointRec->uiPID,
+ ui64DeltaS, ui32DeltaF,
+ (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset),
+ (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ? "SIGNALLED" : ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ? "ERRORED" : "NOT_SIGNALLED"),
+ psSyncCheckpointRec->szClassName
+ );
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("\t%05u %05llu.%09u FWAddr=0x%08x State=<null_ptr> (%s)",
+ psSyncCheckpointRec->uiPID,
+ ui64DeltaS, ui32DeltaF,
+ (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset),
+ psSyncCheckpointRec->szClassName
+ );
+ }
+}
+
+static void _SyncCheckpointRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT *)hDebugRequestHandle;
+ IMG_UINT64 ui64TimeNowS;
+ IMG_UINT32 ui32TimeNowF;
+ IMG_UINT64 ui64TimeNow = OSClockns64();
+ DLLIST_NODE *psNode, *psNext;
+
+ ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF);
+
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH)
+ {
+ IMG_UINT32 i;
+
+ OSLockAcquire(psContext->hCheckpointRecordLock);
+
+ PVR_DUMPDEBUG_LOG("Dumping all allocated sync checkpoints @ %05llu.%09u", ui64TimeNowS, ui32TimeNowF);
+ PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-17s %-14s (%s)",
+ "PID", "Time Delta (s)", "Address", "State", "Annotation");
+
+ dllist_foreach_node(&psContext->sCheckpointRecordList, psNode, psNext)
+ {
+ struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec =
+ IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+ _SyncCheckpointRecordPrint(psSyncCheckpointRec, ui64TimeNow,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+ PVR_DUMPDEBUG_LOG("Dumping all recently freed sync checkpoints @ %05llu.%09u", ui64TimeNowS, ui32TimeNowF);
+ PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-17s %-14s (%s)",
+ "PID", "Time Delta (s)", "Address", "State", "Annotation");
+ for (i = DECREMENT_WITH_WRAP(psContext->uiCheckpointRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN);
+ i != psContext->uiCheckpointRecordFreeIdx;
+ i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN))
+ {
+ if (psContext->apsCheckpointRecordsFreed[i])
+ {
+ _SyncCheckpointRecordPrint(psContext->apsCheckpointRecordsFreed[i],
+ ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ else
+ {
+ break;
+ }
+ }
+ OSLockRelease(psContext->hCheckpointRecordLock);
+ }
+}
+#undef NS_IN_S
+static PVRSRV_ERROR _SyncCheckpointRecordListInit(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+ PVRSRV_ERROR eError;
+
+ eError = OSLockCreate(&psContext->hCheckpointRecordLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lock_create;
+ }
+ dllist_init(&psContext->sCheckpointRecordList);
+
+ eError = PVRSRVRegisterDbgRequestNotify(&psContext->hCheckpointRecordNotify,
+ psContext->psDevNode,
+ _SyncCheckpointRecordRequest,
+ DEBUG_REQUEST_SYNCCHECKPOINT,
+ psContext);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_dbg_register;
+ }
+
+ return PVRSRV_OK;
+
+fail_dbg_register:
+ OSLockDestroy(psContext->hCheckpointRecordLock);
+fail_lock_create:
+ return eError;
+}
+
+static void _SyncCheckpointRecordListDeinit(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+ int i;
+ DLLIST_NODE *psNode, *psNext;
+
+ OSLockAcquire(psContext->hCheckpointRecordLock);
+ dllist_foreach_node(&psContext->sCheckpointRecordList, psNode, psNext)
+ {
+ struct SYNC_CHECKPOINT_RECORD *pSyncCheckpointRec =
+ IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+
+ dllist_remove_node(psNode);
+ OSFreeMem(pSyncCheckpointRec);
+ }
+
+ for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++)
+ {
+ if (psContext->apsCheckpointRecordsFreed[i])
+ {
+ OSFreeMem(psContext->apsCheckpointRecordsFreed[i]);
+ psContext->apsCheckpointRecordsFreed[i] = NULL;
+ }
+ }
+ OSLockRelease(psContext->hCheckpointRecordLock);
+
+ PVRSRVUnregisterDbgRequestNotify(psContext->hCheckpointRecordNotify);
+ psContext->hCheckpointRecordNotify = NULL;
+
+ OSLockDestroy(psContext->hCheckpointRecordLock);
+ psContext->hCheckpointRecordLock = NULL;
+}
+#else /* defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+PVRSRV_ERROR
+SyncCheckpointRecordAdd(
+ PSYNC_CHECKPOINT_RECORD_HANDLE * phRecord,
+ SYNC_CHECKPOINT_BLOCK * hSyncCheckpointBlock,
+ IMG_UINT32 ui32FwBlockAddr,
+ IMG_UINT32 ui32SyncOffset,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName)
+{
+ PVR_UNREFERENCED_PARAMETER(phRecord);
+ PVR_UNREFERENCED_PARAMETER(hSyncCheckpointBlock);
+ PVR_UNREFERENCED_PARAMETER(ui32FwBlockAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32SyncOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32ClassNameSize);
+ PVR_UNREFERENCED_PARAMETER(pszClassName);
+ return PVRSRV_OK;
+}
+PVRSRV_ERROR
+SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord)
+{
+ PVR_UNREFERENCED_PARAMETER(hRecord);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _SyncCheckpointRecordListInit(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+ PVR_UNREFERENCED_PARAMETER(psContext);
+ return PVRSRV_OK;
+}
+static void _SyncCheckpointRecordListDeinit(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+ PVR_UNREFERENCED_PARAMETER(psContext);
+}
+#endif /* defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+#if defined(PDUMP)
+PVRSRV_ERROR
+SyncCheckpointSignalPDump(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+ /*
+ We might be ask to PDump sync state outside of capture range
+ (e.g. texture uploads) so make this continuous.
+ */
+ DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc,
+ _SyncCheckpointGetOffset(psSyncCheckpoint),
+ PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SyncCheckpointErrorPDump(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+ /*
+ We might be ask to PDump sync state outside of capture range
+ (e.g. texture uploads) so make this continuous.
+ */
+ DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc,
+ _SyncCheckpointGetOffset(psSyncCheckpoint),
+ PVRSRV_SYNC_CHECKPOINT_ERRORED,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+}
+
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Synchronisation checkpoint interface header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the client side interface for synchronisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_
+#define _SYNC_CHECKPOINT_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_sync_km.h"
+#include "pdumpdefs.h"
+#include "dllist.h"
+#include "pvr_debug.h"
+
+#include "device_connection.h"
+
+typedef struct _PVRSRV_DEVICE_NODE_ PVRSRV_DEVICE_NODE;
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT *PSYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT *PSYNC_CHECKPOINT;
+
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointContextCreate
+
+@Description Create a new synchronisation checkpoint context
+
+@Input psDevNode Device node
+
+@Output ppsSyncCheckpointContext Handle to the created synchronisation
+ checkpoint context
+
+@Return PVRSRV_OK if the synchronisation checkpoint context was
+ successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointContextCreate(PVRSRV_DEVICE_NODE *psDevNode,
+ PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointContextDestroy
+
+@Description Destroy a synchronisation checkpoint context
+
+@Input psSyncCheckpointContext Handle to the synchronisation
+ checkpoint context to destroy
+
+@Return PVRSRV_OK if the synchronisation checkpoint context was
+ successfully destroyed.
+ PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT if the context still
+ has sync checkpoints defined
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointAlloc
+
+@Description Allocate a new synchronisation checkpoint on the specified
+ synchronisation checkpoint context
+
+@Input hSyncCheckpointContext Handle to the synchronisation
+ checkpoint context
+
+@Input pszClassName Sync checkpoint source annotation
+ (will be truncated to at most
+ SYNC_CHECKPOINT_NAME_SIZE chars)
+
+@Output ppsSyncCheckpoint Created synchronisation checkpoint
+
+@Return PVRSRV_OK if the synchronisation checkpoint was
+ successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext,
+ const IMG_CHAR *pszCheckpointName,
+ PSYNC_CHECKPOINT *ppsSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointFree
+
+@Description Free a synchronization checkpoint
+ The reference count held for the synchronization checkpoint
+ is decremented - if it has becomes zero, it is also freed.
+
+@Input psSyncCheckpoint The synchronisation checkpoint to free
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointSignal
+
+@Description Signal the synchronisation checkpoint
+
+@Input psSyncCheckpoint The synchronisation checkpoint to signal
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointError
+
+@Description Error the synchronisation checkpoint
+
+@Input psSyncCheckpoint The synchronisation checkpoint to error
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointErrorFromUFO
+
+@Description Error the synchronisation checkpoint which has the
+ given UFO firmware address
+
+@Input hSyncCheckpointContext Handle to the synchronisation checkpoint
+ context to which the checkpoint belongs
+
+@Input ui32FwAddr The firmware address of the sync
+ checkpoint to be errored
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointErrorFromUFO(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+ IMG_UINT32 ui32FwAddr);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointIsSignalled
+
+@Description Returns IMG_TRUE if the synchronisation checkpoint is
+ signalled or errored
+
+@Input psSyncCheckpoint The synchronisation checkpoint to test
+
+@Return None
+*/
+/*****************************************************************************/
+IMG_BOOL
+SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointIsErrored
+
+@Description Returns IMG_TRUE if the synchronisation checkpoint is
+ errored
+
+@Input psSyncCheckpoint The synchronisation checkpoint to test
+
+@Return None
+*/
+/*****************************************************************************/
+IMG_BOOL
+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointTakeRef
+
+@Description Take a reference on a synchronisation checkpoint
+
+@Input psSyncCheckpoint Synchronisation checkpoint to take a
+ reference on
+
+@Return PVRSRV_OK if a reference was taken on the synchronisation
+ primitive
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointDropRef
+
+@Description Drop a reference on a synchronisation checkpoint
+
+@Input psSyncCheckpoint Synchronisation checkpoint to drop a
+ reference on
+
+@Return PVRSRV_OK if a reference was dropped on the synchronisation
+ primitive
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointResolveFence
+
+@Description Resolve a fence, returning a list of the sync checkpoints
+ that fence contains.
+ This function in turn calls a function provided by the
+ OS native sync implementation.
+
+@Input hFence The fence to be resolved
+
+@Output pui32NumSyncCheckpoints The number of sync checkpoints the
+ fence contains. Can return 0 if
+ passed a null (-1) fence.
+
+@Output psSyncCheckpoints List of sync checkpoints the fence
+ contains
+
+@Return PVRSRV_OK if a valid fence was provided.
+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+ sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointResolveFence(PVRSRV_FENCE_KM hFence, IMG_UINT32 *pui32NumSyncCheckpoints, PSYNC_CHECKPOINT *psSyncCheckpoints);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointCreateFence
+
+@Description Create a fence containing a single sync checkpoint.
+ Return the fence and a ptr to sync checkpoint it contains.
+ This function in turn calls a function provided by the
+ OS native sync implementation.
+
+@Input pszFenceName String to assign to the new fence
+ (for debugging purposes)
+
+@Input hTimeline Timeline on which the new fence is
+ to be created
+
+@Output phNewFence The newly created fence
+
+@Output psNewSyncCheckpoint The sync checkpoint contained in
+ the new fence
+
+@Return PVRSRV_OK if a valid fence was provided.
+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+ sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointCreateFence(const IMG_CHAR *pszFenceName, PVRSRV_TIMELINE_KM hTimeline, PVRSRV_FENCE_KM *phNewFence, PSYNC_CHECKPOINT *psNewSyncCheckpoint);
+
+#endif /* _SYNC_CHECKPOINT_ */
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services external synchronisation checkpoint interface header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines synchronisation checkpoint structures that are visible
+ internally and externally
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_EXTERNAL_
+#define _SYNC_CHECKPOINT_EXTERNAL_
+
+#define SYNC_CHECKPOINT_MAX_CLASS_NAME_LEN 32
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT *PSYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT *PSYNC_CHECKPOINT;
+
+/* PVRSRV_SYNC_CHECKPOINT states.
+ * The OS native sync implementation should call pfnIsSignalled() to determine if a
+ * PVRSRV_SYNC_CHECKPOINT has signalled (which will return an IMG_BOOL), but can set the
+ * state for a PVRSRV_SYNC_CHECKPOINT (which is currently in the NOT_SIGNALLED state)
+ * where that PVRSRV_SYNC_CHECKPOINT is representing a foreign sync.
+ */
+typedef enum
+{
+ PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED = 0x0, /*!< checkpoint has not signalled */
+ PVRSRV_SYNC_CHECKPOINT_SIGNALLED = 0x1, /*!< checkpoint has signalled */
+ PVRSRV_SYNC_CHECKPOINT_ERRORED = 0x3 /*!< checkpoint has been errored */
+} PVRSRV_SYNC_CHECKPOINT_STATE;
+
+#if defined(PVR_USE_SYNC_CHECKPOINTS)
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+#endif
+#endif /* _SYNC_CHECKPOINT_EXTERNAL_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services internal synchronisation checkpoint interface header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the internal server interface for services
+ synchronisation checkpoints.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SYNC_CHECKPOINT__
+#define __SYNC_CHECKPOINT__
+
+#include "img_types.h"
+#include "sync_checkpoint_internal_fw.h"
+#include "sync_checkpoint.h"
+#include "ra.h"
+#include "dllist.h"
+#include "lock.h"
+#include "devicemem.h"
+
+typedef struct _PVRSRV_DEVICE_NODE_ PVRSRV_DEVICE_NODE;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+struct SYNC_CHECKPOINT_RECORD;
+#endif
+
+/*
+ Private structures
+*/
+#define SYNC_CHECKPOINT_NAME_SIZE SYNC_CHECKPOINT_MAX_CLASS_NAME_LEN
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT_
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ IMG_CHAR azName[SYNC_CHECKPOINT_NAME_SIZE]; /*!< Name of the RA */
+ RA_ARENA *psSubAllocRA; /*!< RA context */
+ IMG_CHAR azSpanName[SYNC_CHECKPOINT_NAME_SIZE]; /*!< Name of the span RA */
+ RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */
+ ATOMIC_T hRefCount; /*!< Ref count for this context */
+ ATOMIC_T hCheckpointCount; /*!< Checkpoint count for this context */
+ POS_LOCK hCheckpointListLock; /*!< Checkpoint list lock */
+ DLLIST_NODE sCheckpointList; /*!< List of checkpoints created on this context */
+ IMG_HANDLE hCheckpointNotify; /*!< Handle for debug notifier callback */
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ POS_LOCK hCheckpointRecordLock;
+ DLLIST_NODE sCheckpointRecordList;
+ struct SYNC_CHECKPOINT_RECORD *apsCheckpointRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN];
+ IMG_UINT32 uiCheckpointRecordFreeIdx;
+ IMG_HANDLE hCheckpointRecordNotify;
+#endif
+} _SYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT_BLOCK_
+{
+ ATOMIC_T hRefCount; /*!< Ref count for this sync block */
+ _SYNC_CHECKPOINT_CONTEXT *psContext; /*!< Our copy of the services connection */
+ PVRSRV_DEVICE_NODE *psDevNode;
+ IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync checkpoint block */
+ IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */
+ DEVMEM_MEMDESC *hMemDesc; /*!< DevMem allocation for block */
+ volatile IMG_UINT32 *pui32LinAddr; /*!< Server-code CPU mapping */
+ IMG_UINT64 uiSpanBase; /*!< Base of this import (FW DevMem) in the span RA */
+ DLLIST_NODE sListNode; /*!< List node for the sync chkpt block list */
+} SYNC_CHECKPOINT_BLOCK;
+
+typedef struct SYNC_CHECKPOINT_RECORD* PSYNC_CHECKPOINT_RECORD_HANDLE;
+
+typedef struct _SYNC_CHECKPOINT_
+{
+ /* A sync checkpoint is assigned a unique ID, to avoid any confusion should
+ * the same memory be re-used later for a different checkpoint
+ */
+ IMG_UINT32 ui32UID; /*!< Unique ID assigned to sync checkpoint (to distinguish checkpoints if memory is re-used)*/
+ ATOMIC_T hRefCount; /*!< Ref count for this sync */
+ ATOMIC_T hEnqueuedCCBCount; /*!< Num times sync has been put in CCBs */
+ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< Synchronisation block this checkpoint is allocated on */
+ IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */
+ volatile _SYNC_CHECKPOINT_FW_OBJ *psSyncCheckpointFwObj; /*!< CPU view of the data held in the sync block */
+ IMG_CHAR azName[SYNC_CHECKPOINT_NAME_SIZE]; /*!< Name of the checkpoint */
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ PSYNC_CHECKPOINT_RECORD_HANDLE hRecord; /*!< Sync record handle */
+#endif
+ DLLIST_NODE sListNode; /*!< List node for the sync chkpt list */
+} _SYNC_CHECKPOINT;
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointGetFirmwareAddr
+
+@Description .
+
+@Input psSyncCheckpoint Synchronisation checkpoint to get
+ the firmware address of
+
+@Return None
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointCCBEnqueued
+
+@Description Increment the CCB enqueued reference count for a
+ synchronisation checkpoint. This indicates how many FW
+ operations (checks/update) have been placed into CCBs for the
+ sync checkpoint.
+ When the FW services these operation, it increments its own
+ reference count. When these two values are equal, we know
+ there are not outstanding FW operating for the checkpoint
+ in any CCB.
+
+@Input psSyncCheckpoint Synchronisation checkpoint for which
+ to increment the enqueued reference
+ count
+
+@Return None
+
+*/
+/*****************************************************************************/
+void
+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+#endif /* __SYNC_CHECKPOINT__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services internal synchronisation checkpoint FW obj header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the internal FW object structure for services
+ synchronisation checkpoints.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_INTERNAL_FW_
+#define _SYNC_CHECKPOINT_INTERNAL_FW_
+
+#include "img_types.h"
+
+/* Sync_checkpoint firmware object.
+ * This is the FW-addressable structure use to hold the sync checkpoint's
+ * state and other information which needs to be accessed by the firmware.
+ */
+typedef struct _SYNC_CHECKPOINT_FW_OBJ_
+{
+ IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */
+ IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */
+} _SYNC_CHECKPOINT_FW_OBJ;
+
+/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */
+#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0)
+
+#endif /* _SYNC_CHECKPOINT_INTERNAL_FW_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services internal synchronisation interface header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the internal client side interface for services
+ synchronisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_INTERNAL_
+#define _SYNC_INTERNAL_
+
+#include "img_types.h"
+#include <powervr/sync_external.h>
+#include "ra.h"
+#include "dllist.h"
+#include "lock.h"
+#include "devicemem.h"
+
+
+#define LOCAL_SYNC_PRIM_RESET_VALUE 0
+
+/*
+ Private structure's
+*/
+#define SYNC_PRIM_NAME_SIZE 50
+typedef struct SYNC_PRIM_CONTEXT
+{
+ SHARED_DEV_CONNECTION hDevConnection;
+ IMG_CHAR azName[SYNC_PRIM_NAME_SIZE]; /*!< Name of the RA */
+ RA_ARENA *psSubAllocRA; /*!< RA context */
+ IMG_CHAR azSpanName[SYNC_PRIM_NAME_SIZE];/*!< Name of the span RA */
+ RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */
+ ATOMIC_T hRefCount; /*!< Ref count for this context */
+} SYNC_PRIM_CONTEXT;
+
+typedef struct _SYNC_PRIM_BLOCK_
+{
+ SYNC_PRIM_CONTEXT *psContext; /*!< Our copy of the services connection */
+ IMG_HANDLE hServerSyncPrimBlock; /*!< Server handle for this block */
+ IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync prim block */
+ IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */
+ DEVMEM_MEMDESC *hMemDesc; /*!< Host mapping handle */
+ IMG_UINT32 *pui32LinAddr; /*!< User CPU mapping */
+ IMG_UINT64 uiSpanBase; /*!< Base of this import in the span RA */
+ DLLIST_NODE sListNode; /*!< List node for the sync block list */
+} SYNC_PRIM_BLOCK;
+
+typedef enum _SYNC_PRIM_TYPE_
+{
+ SYNC_PRIM_TYPE_UNKNOWN = 0,
+ SYNC_PRIM_TYPE_LOCAL,
+ SYNC_PRIM_TYPE_SERVER,
+} SYNC_PRIM_TYPE;
+
+typedef struct _SYNC_PRIM_LOCAL_
+{
+ ATOMIC_T hRefCount; /*!< Ref count for this sync */
+ SYNC_PRIM_BLOCK *psSyncBlock; /*!< Synchronisation block this primitive is allocated on */
+ IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ IMG_HANDLE hRecord; /*!< Sync record handle */
+#endif
+} SYNC_PRIM_LOCAL;
+
+typedef struct _SYNC_PRIM_SERVER_
+{
+ SYNC_BRIDGE_HANDLE hBridge; /*!< Bridge handle */
+ IMG_HANDLE hServerSync; /*!< Handle to the server sync */
+ IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address of the sync */
+} SYNC_PRIM_SERVER;
+
+typedef struct _SYNC_PRIM_
+{
+ PVRSRV_CLIENT_SYNC_PRIM sCommon; /*!< Client visible part of the sync prim */
+ SYNC_PRIM_TYPE eType; /*!< Sync primitive type */
+ union {
+ SYNC_PRIM_LOCAL sLocal; /*!< Local sync primitive data */
+ SYNC_PRIM_SERVER sServer; /*!< Server sync primitive data */
+ } u;
+} SYNC_PRIM;
+
+
+/* FIXME this must return a correctly typed pointer */
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr);
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_HANDLE *phBlock,
+ IMG_UINT32 *pui32Offset);
+
+
+#endif /* _SYNC_INTERNAL_ */
--- /dev/null
+/*************************************************************************/ /*!
+@File sync_server.c
+@Title Server side synchronisation functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side functions that for synchronisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_types.h"
+#include "sync_server.h"
+#include "sync_server_internal.h"
+#include "allocmem.h"
+#include "device.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pdump.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "pdump_km.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "htbuffer.h"
+#include "rgxhwperf.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "ossecure_export.h"
+#endif
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+#include "rgxdebug.h"
+#endif
+
+struct _SYNC_PRIMITIVE_BLOCK_
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ DEVMEM_MEMDESC *psMemDesc;
+ IMG_UINT32 *pui32LinAddr;
+ IMG_UINT32 ui32BlockSize; /*!< Size of the Sync Primitive Block */
+ IMG_UINT32 ui32RefCount;
+ POS_LOCK hLock;
+ DLLIST_NODE sConnectionNode;
+ SYNC_CONNECTION_DATA *psSyncConnectionData; /*!< Link back to the sync connection data if there is one */
+ PRGXFWIF_UFO_ADDR uiFWAddr; /*!< The firmware address of the sync prim block */
+};
+
+struct _SERVER_SYNC_PRIMITIVE_
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PVRSRV_CLIENT_SYNC_PRIM *psSync;
+ IMG_UINT32 ui32NextOp;
+ IMG_UINT32 ui32RefCount;
+ IMG_UINT32 ui32UID;
+ IMG_UINT32 ui32LastSyncRequesterID;
+ DLLIST_NODE sNode;
+ /* PDump only data */
+ IMG_BOOL bSWOperation;
+ IMG_BOOL bSWOpStartedInCaptRange;
+ IMG_UINT32 ui32LastHWUpdate;
+ IMG_BOOL bPDumped;
+ POS_LOCK hLock;
+ IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+};
+
+struct _SERVER_SYNC_EXPORT_
+{
+ SERVER_SYNC_PRIMITIVE *psSync;
+};
+
+struct _SERVER_OP_COOKIE_
+{
+ IMG_BOOL bActive;
+ /*
+ Client syncblock(s) info.
+ If this changes update the calculation of ui32BlockAllocSize
+ */
+ IMG_UINT32 ui32SyncBlockCount;
+ SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock;
+
+ /*
+ Client sync(s) info.
+ If this changes update the calculation of ui32ClientAllocSize
+ */
+ IMG_UINT32 ui32ClientSyncCount;
+ IMG_UINT32 *paui32SyncBlockIndex;
+ IMG_UINT32 *paui32Index;
+ IMG_UINT32 *paui32Flags;
+ IMG_UINT32 *paui32FenceValue;
+ IMG_UINT32 *paui32UpdateValue;
+
+ /*
+ Server sync(s) info
+ If this changes update the calculation of ui32ServerAllocSize
+ */
+ IMG_UINT32 ui32ServerSyncCount;
+ SERVER_SYNC_PRIMITIVE **papsServerSync;
+ IMG_UINT32 *paui32ServerFenceValue;
+ IMG_UINT32 *paui32ServerUpdateValue;
+
+};
+
+struct _SYNC_CONNECTION_DATA_
+{
+ DLLIST_NODE sListHead;
+ IMG_UINT32 ui32RefCount;
+ POS_LOCK hLock;
+};
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1))
+
+enum SYNC_RECORD_TYPE
+{
+ SYNC_RECORD_TYPE_UNKNOWN = 0,
+ SYNC_RECORD_TYPE_CLIENT,
+ SYNC_RECORD_TYPE_SERVER,
+};
+
+struct SYNC_RECORD
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ SYNC_PRIMITIVE_BLOCK *psServerSyncPrimBlock; /*!< handle to _SYNC_PRIMITIVE_BLOCK_ */
+ IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */
+ IMG_UINT32 ui32FwBlockAddr;
+ IMG_PID uiPID;
+ IMG_UINT64 ui64OSTime;
+ enum SYNC_RECORD_TYPE eRecordType;
+ DLLIST_NODE sNode;
+ IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+};
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+static IMG_UINT32 g_ServerSyncUID = 0;
+
+#define SYNC_REQUESTOR_UNKNOWN 0
+static IMG_UINT32 g_ui32NextSyncRequestorID = 1;
+
+#if defined(SYNC_DEBUG) || defined(REFCOUNT_DEBUG)
+#define SYNC_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+#if defined(SYNC_DEBUG)
+#define SYNC_UPDATES_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_UPDATES_PRINT(fmt, ...)
+#endif
+
+/*!
+*****************************************************************************
+ @Function : SyncPrimitiveBlockToFWAddr
+
+ @Description : Given a pointer to a sync primitive block and an offset,
+ returns the firmware address of the sync.
+
+ @Input psSyncPrimBlock : Sync primitive block which contains the sync
+ @Input ui32Offset : Offset of sync within the sync primitive block
+ @Output psAddrOut : Absolute FW address of the sync is written out through
+ this pointer
+ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+ parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock,
+ IMG_UINT32 ui32Offset,
+ PRGXFWIF_UFO_ADDR *psAddrOut)
+{
+ /* check offset is legal */
+ if((ui32Offset >= psSyncPrimBlock->ui32BlockSize) ||
+ (ui32Offset % sizeof(IMG_UINT32)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimitiveBlockToFWAddr: parameters check failed"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psAddrOut->ui32Addr = psSyncPrimBlock->uiFWAddr.ui32Addr + ui32Offset;
+ return PVRSRV_OK;
+}
+
+/*!
+*****************************************************************************
+ @Function : SyncAddrListGrow
+
+ @Description : Grow the SYNC_ADDR_LIST so it can accommodate the given
+ number of syncs
+
+ @Input psList : The SYNC_ADDR_LIST to grow
+ @Input ui32NumSyncs : The number of sync addresses to be able to hold
+ @Return : PVRSRV_OK on success
+*****************************************************************************/
+
+static PVRSRV_ERROR SyncAddrListGrow(SYNC_ADDR_LIST *psList, IMG_UINT32 ui32NumSyncs)
+{
+ PVR_ASSERT(ui32NumSyncs <= PVRSRV_MAX_SYNC_PRIMS);
+
+ if(ui32NumSyncs > psList->ui32NumSyncs)
+ {
+ if(psList->pasFWAddrs == NULL)
+ {
+ psList->pasFWAddrs = OSAllocMem(sizeof(PRGXFWIF_UFO_ADDR) * PVRSRV_MAX_SYNC_PRIMS);
+ if(psList->pasFWAddrs == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ psList->ui32NumSyncs = ui32NumSyncs;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+*****************************************************************************
+ @Function : SyncAddrListInit
+
+ @Description : Initialise a SYNC_ADDR_LIST structure ready for use
+
+ @Input psList : The SYNC_ADDR_LIST structure to initialise
+ @Return : None
+*****************************************************************************/
+
+void
+SyncAddrListInit(SYNC_ADDR_LIST *psList)
+{
+ psList->ui32NumSyncs = 0;
+ psList->pasFWAddrs = NULL;
+}
+
+/*!
+*****************************************************************************
+ @Function : SyncAddrListDeinit
+
+ @Description : Frees any resources associated with the given SYNC_ADDR_LIST
+
+ @Input psList : The SYNC_ADDR_LIST structure to deinitialise
+ @Return : None
+*****************************************************************************/
+
+void
+SyncAddrListDeinit(SYNC_ADDR_LIST *psList)
+{
+ if(psList->pasFWAddrs != NULL)
+ {
+ OSFreeMem(psList->pasFWAddrs);
+ }
+}
+
+/*!
+*****************************************************************************
+ @Function : SyncAddrListPopulate
+
+ @Description : Populate the given SYNC_ADDR_LIST with the FW addresses
+ of the syncs given by the SYNC_PRIMITIVE_BLOCKs and sync offsets
+
+ @Input ui32NumSyncs : The number of syncs being passed in
+ @Input apsSyncPrimBlock: Array of pointers to SYNC_PRIMITIVE_BLOCK structures
+ in which the syncs are based
+ @Input paui32SyncOffset: Array of offsets within each of the sync primitive blocks
+ where the syncs are located
+ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+ parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncAddrListPopulate(SYNC_ADDR_LIST *psList,
+ IMG_UINT32 ui32NumSyncs,
+ SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock,
+ IMG_UINT32 *paui32SyncOffset)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+
+ if(ui32NumSyncs > psList->ui32NumSyncs)
+ {
+ eError = SyncAddrListGrow(psList, ui32NumSyncs);
+
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ for(i = 0; i < ui32NumSyncs; i++)
+ {
+ eError = SyncPrimitiveBlockToFWAddr(apsSyncPrimBlock[i],
+ paui32SyncOffset[i],
+ &psList->pasFWAddrs[i]);
+
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SYNC_RECORD_HANDLE *phRecord,
+ SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+ IMG_UINT32 ui32FwBlockAddr,
+ IMG_UINT32 ui32SyncOffset,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName)
+{
+ struct SYNC_RECORD * psSyncRec;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ RGX_HWPERF_HOST_ALLOC(SYNC,
+ ui32FwBlockAddr + ui32SyncOffset,
+ pszClassName,
+ ui32ClassNameSize);
+
+ if (!phRecord)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ *phRecord = NULL;
+
+ psSyncRec = OSAllocMem(sizeof(*psSyncRec));
+ if (!psSyncRec)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ psSyncRec->psDevNode = psDevNode;
+ psSyncRec->psServerSyncPrimBlock = hServerSyncPrimBlock;
+ psSyncRec->ui32SyncOffset = ui32SyncOffset;
+ psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
+ psSyncRec->ui64OSTime = OSClockns64();
+ psSyncRec->uiPID = OSGetCurrentProcessID();
+ psSyncRec->eRecordType = bServerSync? SYNC_RECORD_TYPE_SERVER: SYNC_RECORD_TYPE_CLIENT;
+
+ if(pszClassName)
+ {
+ if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+ ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+ /* Copy over the class name annotation */
+ OSStringNCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize);
+ psSyncRec->szClassName[ui32ClassNameSize] = 0;
+ }
+ else
+ {
+ /* No class name annotation */
+ psSyncRec->szClassName[0] = 0;
+ }
+
+ OSLockAcquire(psDevNode->hSyncServerRecordLock);
+ dllist_add_to_head(&psDevNode->sSyncServerRecordList, &psSyncRec->sNode);
+ OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+ *phRecord = (SYNC_RECORD_HANDLE)psSyncRec;
+
+fail_alloc:
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+ SYNC_RECORD_HANDLE hRecord)
+{
+ struct SYNC_RECORD **ppFreedSync;
+ struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord;
+ PVRSRV_DEVICE_NODE *psDevNode = pSync->psDevNode;
+
+ if (!hRecord)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ OSLockAcquire(psDevNode->hSyncServerRecordLock);
+
+ RGX_HWPERF_HOST_FREE(SYNC, pSync->ui32FwBlockAddr + pSync->ui32SyncOffset);
+
+ dllist_remove_node(&pSync->sNode);
+
+ if (psDevNode->uiSyncServerRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: freed sync record index out of range",
+ __func__));
+ psDevNode->uiSyncServerRecordFreeIdx = 0;
+ }
+ ppFreedSync = &psDevNode->apsSyncServerRecordsFreed[psDevNode->uiSyncServerRecordFreeIdx];
+ psDevNode->uiSyncServerRecordFreeIdx =
+ (psDevNode->uiSyncServerRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN;
+
+ if (*ppFreedSync)
+ {
+ OSFreeMem(*ppFreedSync);
+ }
+ pSync->psServerSyncPrimBlock = NULL;
+ pSync->ui64OSTime = OSClockns64();
+ *ppFreedSync = pSync;
+
+ OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+ return PVRSRV_OK;
+}
+#else
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SYNC_RECORD_HANDLE *phRecord,
+ SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+ IMG_UINT32 ui32FwBlockAddr,
+ IMG_UINT32 ui32SyncOffset,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName)
+{
+ *phRecord = NULL;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(phRecord);
+ PVR_UNREFERENCED_PARAMETER(hServerSyncPrimBlock);
+ PVR_UNREFERENCED_PARAMETER(ui32FwBlockAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32SyncOffset);
+ PVR_UNREFERENCED_PARAMETER(bServerSync);
+ PVR_UNREFERENCED_PARAMETER(ui32ClassNameSize);
+ PVR_UNREFERENCED_PARAMETER(pszClassName);
+ return PVRSRV_OK;
+}
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+ SYNC_RECORD_HANDLE hRecord)
+{
+ PVR_UNREFERENCED_PARAMETER(hRecord);
+ return PVRSRV_OK;
+}
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+PVRSRV_ERROR
+PVRSRVSyncAllocEventKM(
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32FWAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName)
+{
+ RGX_HWPERF_HOST_ALLOC(SYNC, ui32FWAddr, pszClassName, ui32ClassNameSize);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncFreeEventKM(IMG_UINT32 ui32FWAddr)
+{
+ RGX_HWPERF_HOST_FREE(SYNC, ui32FWAddr);
+
+ return PVRSRV_OK;
+}
+
+static
+void _SyncConnectionRef(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+ IMG_UINT32 ui32RefCount;
+
+ OSLockAcquire(psSyncConnectionData->hLock);
+ ui32RefCount = ++psSyncConnectionData->ui32RefCount;
+ OSLockRelease(psSyncConnectionData->hLock);
+
+ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+ __FUNCTION__, psSyncConnectionData, ui32RefCount);
+}
+
+static
+void _SyncConnectionUnref(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+ IMG_UINT32 ui32RefCount;
+
+ OSLockAcquire(psSyncConnectionData->hLock);
+ ui32RefCount = --psSyncConnectionData->ui32RefCount;
+ OSLockRelease(psSyncConnectionData->hLock);
+
+ if (ui32RefCount == 0)
+ {
+ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+ __FUNCTION__, psSyncConnectionData, ui32RefCount);
+
+ PVR_ASSERT(dllist_is_empty(&psSyncConnectionData->sListHead));
+ OSLockDestroy(psSyncConnectionData->hLock);
+ OSFreeMem(psSyncConnectionData);
+ }
+ else
+ {
+ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+ __FUNCTION__, psSyncConnectionData, ui32RefCount);
+ }
+}
+
+static
+void _SyncConnectionAddBlock(CONNECTION_DATA *psConnection, SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+ if (psConnection)
+ {
+ SYNC_CONNECTION_DATA *psSyncConnectionData = psConnection->psSyncConnectionData;
+
+ /*
+ Make sure the connection doesn't go away. It doesn't matter that we will release
+ the lock between as the refcount and list don't have to be atomic w.r.t. to each other
+ */
+ _SyncConnectionRef(psSyncConnectionData);
+
+ OSLockAcquire(psSyncConnectionData->hLock);
+ if (psConnection != NULL)
+ {
+ dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode);
+ }
+ OSLockRelease(psSyncConnectionData->hLock);
+ psBlock->psSyncConnectionData = psSyncConnectionData;
+ }
+ else
+ {
+ psBlock->psSyncConnectionData = NULL;
+ }
+}
+
+static
+void _SyncConnectionRemoveBlock(SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+ SYNC_CONNECTION_DATA *psSyncConnectionData = psBlock->psSyncConnectionData;
+
+ if (psBlock->psSyncConnectionData)
+ {
+ OSLockAcquire(psSyncConnectionData->hLock);
+ dllist_remove_node(&psBlock->sConnectionNode);
+ OSLockRelease(psSyncConnectionData->hLock);
+
+ _SyncConnectionUnref(psBlock->psSyncConnectionData);
+ }
+}
+
+static
+void _SyncPrimitiveBlockRef(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+ IMG_UINT32 ui32RefCount;
+
+ OSLockAcquire(psSyncBlk->hLock);
+ ui32RefCount = ++psSyncBlk->ui32RefCount;
+ OSLockRelease(psSyncBlk->hLock);
+
+ SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d",
+ __FUNCTION__, psSyncBlk, ui32RefCount);
+}
+
+static
+void _SyncPrimitiveBlockUnref(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+ IMG_UINT32 ui32RefCount;
+
+ OSLockAcquire(psSyncBlk->hLock);
+ ui32RefCount = --psSyncBlk->ui32RefCount;
+ OSLockRelease(psSyncBlk->hLock);
+
+ if (ui32RefCount == 0)
+ {
+ PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+ SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)",
+ __FUNCTION__, psSyncBlk, ui32RefCount);
+
+ _SyncConnectionRemoveBlock(psSyncBlk);
+ OSLockDestroy(psSyncBlk->hLock);
+ DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc);
+ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc);
+ OSFreeMem(psSyncBlk);
+ }
+ else
+ {
+ SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d",
+ __FUNCTION__, psSyncBlk, ui32RefCount);
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+ IMG_UINT32 *puiSyncPrimVAddr,
+ IMG_UINT32 *puiSyncPrimBlockSize,
+ PMR **ppsSyncPMR)
+{
+ SYNC_PRIMITIVE_BLOCK *psNewSyncBlk;
+ PVRSRV_ERROR eError;
+
+ psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK));
+ if (psNewSyncBlk == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ psNewSyncBlk->psDevNode = psDevNode;
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block");
+
+ eError = psDevNode->pfnAllocUFOBlock(psDevNode,
+ &psNewSyncBlk->psMemDesc,
+ &psNewSyncBlk->uiFWAddr.ui32Addr,
+ &psNewSyncBlk->ui32BlockSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ *puiSyncPrimVAddr = psNewSyncBlk->uiFWAddr.ui32Addr;
+
+ eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc,
+ (void **) &psNewSyncBlk->pui32LinAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ eError = DevmemLocalGetImportHandle(psNewSyncBlk->psMemDesc, (void **) ppsSyncPMR);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e3;
+ }
+
+ eError = OSLockCreate(&psNewSyncBlk->hLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e3;
+ }
+
+ psNewSyncBlk->ui32RefCount = 1;
+
+ /* If there is a connection pointer then add the new block onto it's list */
+ _SyncConnectionAddBlock(psConnection, psNewSyncBlk);
+
+ *ppsSyncBlk = psNewSyncBlk;
+ *puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize;
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Allocated UFO block (FirmwareVAddr = 0x%08x)",
+ *puiSyncPrimVAddr);
+
+ return PVRSRV_OK;
+
+e3:
+ DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc);
+e2:
+ psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc);
+e1:
+ OSFreeMem(psNewSyncBlk);
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+ _SyncPrimitiveBlockUnref(psSyncBlk);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+ IMG_UINT32 ui32Value)
+{
+ if((ui32Index * sizeof(IMG_UINT32)) < psSyncBlk->ui32BlockSize)
+ {
+ psSyncBlk->pui32LinAddr[ui32Index] = ui32Value;
+ return PVRSRV_OK;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimSetKM: Index %u out of range for "
+ "0x%08X byte sync block (value 0x%08X)",
+ ui32Index,
+ psSyncBlk->ui32BlockSize,
+ ui32Value));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncPrimSetKM(SERVER_SYNC_PRIMITIVE *psServerSync, IMG_UINT32 ui32Value)
+{
+ *psServerSync->psSync->pui32LinAddr = ui32Value;
+
+ return PVRSRV_OK;
+}
+
+static void
+_ServerSyncRef(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ IMG_UINT32 ui32RefCount;
+
+ OSLockAcquire(psSync->hLock);
+ ui32RefCount = ++psSync->ui32RefCount;
+ OSLockRelease(psSync->hLock);
+
+ SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+ __FUNCTION__, psSync, ui32RefCount);
+}
+
+static void
+_ServerSyncUnref(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ PVRSRV_DEVICE_NODE *psDevNode = psSync->psDevNode;
+ IMG_UINT32 ui32RefCount;
+
+ OSLockAcquire(psSync->hLock);
+ ui32RefCount = --psSync->ui32RefCount;
+ OSLockRelease(psSync->hLock);
+
+ if (ui32RefCount == 0)
+ {
+ IMG_UINT32 ui32SyncAddr;
+
+ (void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+ SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+ __FUNCTION__, psSync, ui32RefCount);
+ HTBLOGK(HTB_SF_SYNC_SERVER_UNREF, ui32SyncAddr);
+
+ /* Remove the sync from the global list */
+ OSLockAcquire(psDevNode->hSyncServerListLock);
+ dllist_remove_node(&psSync->sNode);
+ OSLockRelease(psDevNode->hSyncServerListLock);
+
+ OSLockDestroy(psSync->hLock);
+ /* safe to ignore return value as an error indicates
+ * the sync is either already freed or not a sync
+ */
+ (void)SyncPrimFree(psSync->psSync);
+ OSFreeMem(psSync);
+ }
+ else
+ {
+ SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+ __FUNCTION__, psSync, ui32RefCount);
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncAllocKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName)
+{
+ SERVER_SYNC_PRIMITIVE *psNewSync;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psNewSync = OSAllocMem(sizeof(SERVER_SYNC_PRIMITIVE));
+ if (psNewSync == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* szClassName must be setup now and used for the SyncPrimAlloc call because
+ * pszClassName is allocated in the bridge code is not NULL terminated
+ */
+ if(pszClassName)
+ {
+ if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+ ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+ /* Copy over the class name annotation */
+ OSStringNCopy(psNewSync->szClassName, pszClassName, ui32ClassNameSize);
+ psNewSync->szClassName[ui32ClassNameSize] = 0;
+ }
+ else
+ {
+ /* No class name annotation */
+ psNewSync->szClassName[0] = 0;
+ }
+
+ eError = SyncPrimAllocForServerSync(psDevNode->hSyncPrimContext,
+ &psNewSync->psSync,
+ psNewSync->szClassName);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_sync_alloc;
+ }
+
+ eError = OSLockCreate(&psNewSync->hLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lock_create;
+ }
+
+ eError = SyncPrimSet(psNewSync->psSync, 0);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_sync_op;
+ }
+
+ psNewSync->psDevNode = psDevNode;
+ psNewSync->ui32NextOp = 0;
+ psNewSync->ui32RefCount = 1;
+ psNewSync->ui32UID = g_ServerSyncUID++;
+ psNewSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN;
+ psNewSync->bSWOperation = IMG_FALSE;
+ psNewSync->ui32LastHWUpdate = 0x0bad592c;
+ psNewSync->bPDumped = IMG_FALSE;
+
+ eError = SyncPrimGetFirmwareAddr(psNewSync->psSync, pui32SyncPrimVAddr);
+ if (PVRSRV_OK != eError)
+ {
+ goto fail_sync_op;
+ }
+
+ /* Add the sync to the global list */
+ OSLockAcquire(psDevNode->hSyncServerListLock);
+ dllist_add_to_head(&psDevNode->sSyncServerSyncsList, &psNewSync->sNode);
+ OSLockRelease(psDevNode->hSyncServerListLock);
+
+ HTBLOGK(HTB_SF_SYNC_SERVER_ALLOC, *pui32SyncPrimVAddr);
+ SYNC_UPDATES_PRINT("%s: sync: %p, fwaddr: %8.8X", __FUNCTION__, psNewSync, *pui32SyncPrimVAddr);
+ *ppsSync = psNewSync;
+ return PVRSRV_OK;
+
+fail_sync_op:
+ OSLockDestroy(psNewSync->hLock);
+
+fail_lock_create:
+ SyncPrimFree(psNewSync->psSync);
+
+fail_sync_alloc:
+ OSFreeMem(psNewSync);
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncFreeKM(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ _ServerSyncUnref(psSync);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncGetStatusKM(IMG_UINT32 ui32SyncCount,
+ SERVER_SYNC_PRIMITIVE **papsSyncs,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp)
+{
+ IMG_UINT32 i, ui32SyncAddr;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eReturn = PVRSRV_OK;
+
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ PVRSRV_CLIENT_SYNC_PRIM *psClientSync = papsSyncs[i]->psSync;
+
+ eError = SyncPrimGetFirmwareAddr(psClientSync, &ui32SyncAddr);
+ if (PVRSRV_OK != eError)
+ {
+ pui32FWAddr[i] = 0;
+ pui32CurrentOp[i] = 0;
+ eReturn = eError;
+ }
+ else
+ {
+ pui32FWAddr[i] = ui32SyncAddr;
+ pui32CurrentOp[i] = *psClientSync->pui32LinAddr;
+ }
+ pui32NextOp[i] = papsSyncs[i]->ui32NextOp;
+ pui32UID[i] = papsSyncs[i]->ui32UID;
+ }
+ return eReturn;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT) || defined(SUPPORT_SECURE_EXPORT)
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+ SERVER_SYNC_EXPORT **ppsExport)
+{
+ SERVER_SYNC_EXPORT *psNewExport;
+ PVRSRV_ERROR eError;
+
+ psNewExport = OSAllocMem(sizeof(SERVER_SYNC_EXPORT));
+ if (!psNewExport)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ _ServerSyncRef(psSync);
+
+ psNewExport->psSync = psSync;
+ *ppsExport = psNewExport;
+
+ return PVRSRV_OK;
+e0:
+ return eError;
+}
+
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+ _ServerSyncUnref(psExport->psSync);
+
+ OSFreeMem(psExport);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerImportKM(PVRSRV_DEVICE_NODE *psDevNode,
+ SERVER_SYNC_EXPORT *psExport,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr)
+{
+ SERVER_SYNC_PRIMITIVE *psSync = psExport->psSync;
+ PVRSRV_ERROR eError;
+
+ if (psSync->psDevNode != psDevNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: server sync invalid for this device\n",
+ __func__));
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+
+ _ServerSyncRef(psSync);
+
+ *ppsSync = psSync;
+ eError = SyncPrimGetFirmwareAddr(psSync->psSync,
+ pui32SyncPrimVAddr);
+ return eError;
+}
+#endif /* defined(SUPPORT_INSECURE_EXPORT) || defined(SUPPORT_SECURE_EXPORT) */
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+ SERVER_SYNC_EXPORT **ppsExport)
+{
+ return _PVRSRVSyncPrimServerExportKM(psSync, ppsExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+ return _PVRSRVSyncPrimServerUnexportKM(psExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerImportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SERVER_SYNC_EXPORT *psExport,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return _PVRSRVSyncPrimServerImportKM(psDevNode, psExport, ppsSync,
+ pui32SyncPrimVAddr);
+}
+#endif /* defined(SUPPORT_INSECURE_EXPORT) */
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureExportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_SECURE_TYPE *phSecure,
+ SERVER_SYNC_EXPORT **ppsExport,
+ CONNECTION_DATA **ppsSecureConnection)
+{
+ SERVER_SYNC_EXPORT *psNewExport;
+ PVRSRV_ERROR eError;
+
+ /* Create an export server sync */
+ eError = _PVRSRVSyncPrimServerExportKM(psSync,
+ &psNewExport);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Transform it into a secure export */
+ eError = OSSecureExport(psConnection,
+ (void *) psNewExport,
+ phSecure,
+ ppsSecureConnection);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ *ppsExport = psNewExport;
+ return PVRSRV_OK;
+e1:
+ _PVRSRVSyncPrimServerUnexportKM(psNewExport);
+e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/* FIXME: This is the same as the non-secure version. */
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+ _PVRSRVSyncPrimServerUnexportKM(psExport);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureImportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_SECURE_TYPE hSecure,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_EXPORT *psImport;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* Retrieve the data from the secure import */
+ eError = OSSecureImport(hSecure, (void **) &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ eError = _PVRSRVSyncPrimServerImportKM(psDevNode, psImport, ppsSync,
+ pui32SyncPrimVAddr);
+e0:
+ return eError;
+}
+#endif /* defined(SUPPORT_SECURE_EXPORT) */
+
+IMG_UINT32 PVRSRVServerSyncRequesterRegisterKM(IMG_UINT32 *pui32SyncRequesterID)
+{
+ *pui32SyncRequesterID = g_ui32NextSyncRequestorID++;
+
+ return PVRSRV_OK;
+}
+
+void PVRSRVServerSyncRequesterUnregisterKM(IMG_UINT32 ui32SyncRequesterID)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32SyncRequesterID);
+}
+
+static void
+_ServerSyncTakeOperation(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue)
+{
+ IMG_BOOL bInCaptureRange;
+
+ /* Only advance the pending if an update is required */
+ if (bUpdate)
+ {
+ *pui32FenceValue = psSync->ui32NextOp++;
+ }
+ else
+ {
+ *pui32FenceValue = psSync->ui32NextOp;
+ }
+
+ *pui32UpdateValue = psSync->ui32NextOp;
+
+ PDumpIsCaptureFrameKM(&bInCaptureRange);
+ /*
+ If this is the 1st operation (in this capture range) then PDump
+ this sync
+ */
+ if (!psSync->bPDumped && bInCaptureRange)
+ {
+#if defined(PDUMP)
+ {
+ IMG_UINT32 ui32SyncAddr;
+ (void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+ PDumpCommentWithFlags(0,
+ "Dump initial sync state (0x%p, FW VAddr = 0x%08x) = 0x%08x\n",
+ psSync,
+ ui32SyncAddr,
+ *psSync->psSync->pui32LinAddr);
+ }
+#endif
+
+ SyncPrimPDump(psSync->psSync);
+ psSync->bPDumped = IMG_TRUE;
+ }
+
+ /*
+ When exiting capture range clear down bPDumped as we might re-enter
+ capture range and thus need to PDump this sync again
+ */
+ if (!bInCaptureRange)
+ {
+ psSync->bPDumped = IMG_FALSE;
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue,
+ IMG_UINT32 ui32SyncRequesterID,
+ IMG_BOOL bUpdate,
+ IMG_BOOL *pbFenceRequired)
+{
+
+ _ServerSyncRef(psSync);
+
+ /*
+ _ServerSyncRef will acquire and release the lock but we need to
+ reacquire here to ensure the state that we're modifying below
+ will be consistent with itself. But it doesn't matter if another
+ thread acquires the lock in between as we've ensured the sync
+ won't go away
+ */
+ OSLockAcquire(psSync->hLock);
+ _ServerSyncTakeOperation(psSync,
+ bUpdate,
+ pui32FenceValue,
+ pui32UpdateValue);
+
+ /*
+ The caller want to know if a fence command is required
+ i.e. was the last operation done on this sync done by
+ the same sync requester
+ */
+ if (pbFenceRequired)
+ {
+ if (ui32SyncRequesterID == psSync->ui32LastSyncRequesterID)
+ {
+ *pbFenceRequired = IMG_FALSE;
+ }
+ else
+ {
+ *pbFenceRequired = IMG_TRUE;
+ }
+ }
+ /*
+ If we're transitioning from a HW operation to a SW operation we
+ need to save the last update the HW will do so that when we PDump
+ we can issue a POL for it before the next HW operation and then
+ LDB in the last SW fence update
+ */
+ if (psSync->bSWOperation == IMG_FALSE)
+ {
+ psSync->bSWOperation = IMG_TRUE;
+ psSync->ui32LastHWUpdate = *pui32FenceValue;
+ PDumpIsCaptureFrameKM(&psSync->bSWOpStartedInCaptRange);
+ }
+
+ if (pbFenceRequired)
+ {
+ if (*pbFenceRequired)
+ {
+ SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __FUNCTION__, psSync, *pui32FenceValue, *pui32UpdateValue);
+ }
+ }
+
+ /* Only update the last requester id if we are make changes to this sync
+ * object. */
+ if (bUpdate)
+ psSync->ui32LastSyncRequesterID = ui32SyncRequesterID;
+
+ OSLockRelease(psSync->hLock);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue)
+{
+ /*
+ For HW operations the client is required to ensure the
+ operation has completed before freeing the sync as we
+ no way of dropping the refcount if we where to acquire it
+ here.
+
+ Take the lock to ensure the state that we're modifying below
+ will be consistent with itself.
+ */
+ OSLockAcquire(psSync->hLock);
+ _ServerSyncTakeOperation(psSync,
+ bUpdate,
+ pui32FenceValue,
+ pui32UpdateValue);
+
+ /*
+ Note:
+
+ We might want to consider optimising the fences that we write for
+ HW operations but for now just clear it back to unknown
+ */
+ psSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN;
+
+ if (psSync->bSWOperation)
+ {
+#if defined(PDUMP)
+ {
+ IMG_UINT32 ui32SyncAddr;
+ (void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+ PDumpCommentWithFlags(0,
+ "Wait for HW ops and dummy update for SW ops (0x%p, FW VAddr = 0x%08x, value = 0x%08x)\n",
+ psSync,
+ ui32SyncAddr,
+ *pui32FenceValue);
+ }
+#endif
+
+ if (psSync->bSWOpStartedInCaptRange)
+ {
+ /* Dump a POL for the previous HW operation */
+ SyncPrimPDumpPol(psSync->psSync,
+ psSync->ui32LastHWUpdate,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0);
+ }
+
+ /* Dump the expected value (i.e. the value after all the SW operations) */
+ SyncPrimPDumpValue(psSync->psSync, *pui32FenceValue);
+
+ /* Reset the state as we've just done a HW operation */
+ psSync->bSWOperation = IMG_FALSE;
+ }
+ OSLockRelease(psSync->hLock);
+
+ SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __FUNCTION__, psSync, *pui32FenceValue, *pui32UpdateValue);
+
+ return PVRSRV_OK;
+}
+
+IMG_BOOL ServerSyncFenceIsMet(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_UINT32 ui32FenceValue)
+{
+ SYNC_UPDATES_PRINT("%s: sync: %p, value(%d) == fence(%d)?", __FUNCTION__, psSync, *psSync->psSync->pui32LinAddr, ui32FenceValue);
+ return (*psSync->psSync->pui32LinAddr == ui32FenceValue);
+}
+
+void
+ServerSyncCompleteOp(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bDoUpdate,
+ IMG_UINT32 ui32UpdateValue)
+{
+ if (bDoUpdate)
+ {
+ SYNC_UPDATES_PRINT("%s: sync: %p (%d) = %d", __FUNCTION__, psSync, *psSync->psSync->pui32LinAddr, ui32UpdateValue);
+
+ *psSync->psSync->pui32LinAddr = ui32UpdateValue;
+ }
+
+ _ServerSyncUnref(psSync);
+}
+
+IMG_UINT32 ServerSyncGetId(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ return psSync->ui32UID;
+}
+
+PVRSRV_ERROR
+ServerSyncGetFWAddr(SERVER_SYNC_PRIMITIVE *psSync, IMG_UINT32 *pui32SyncAddr)
+{
+ return SyncPrimGetFirmwareAddr(psSync->psSync, pui32SyncAddr);
+}
+
+IMG_UINT32 ServerSyncGetValue(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ return *psSync->psSync->pui32LinAddr;
+}
+
+IMG_UINT32 ServerSyncGetNextValue(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ return psSync->ui32NextOp;
+}
+
+static void _ServerSyncState(PDLLIST_NODE psNode,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ SERVER_SYNC_PRIMITIVE *psSync = IMG_CONTAINER_OF(psNode, SERVER_SYNC_PRIMITIVE, sNode);
+
+ if (*psSync->psSync->pui32LinAddr != psSync->ui32NextOp)
+ {
+ IMG_UINT32 ui32SyncAddr;
+
+ (void)ServerSyncGetFWAddr(psSync, &ui32SyncAddr);
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+ PVR_DUMPDEBUG_LOG("\tPending server sync (ID = %d, FWAddr = 0x%08x): Current = 0x%08x, NextOp = 0x%08x (%s)",
+ psSync->ui32UID,
+ ui32SyncAddr,
+ ServerSyncGetValue(psSync),
+ psSync->ui32NextOp,
+ psSync->szClassName);
+#else
+ PVR_DUMPDEBUG_LOG("\tPending server sync (ID = %d, FWAddr = 0x%08x): Value (Host) = 0x%08x, Value (FW) = 0x%08x, NextOp = 0x%08x (%s)",
+ psSync->ui32UID,
+ ui32SyncAddr,
+ ServerSyncGetValue(psSync),
+ RGXReadWithSP(ui32SyncAddr),
+ psSync->ui32NextOp,
+ psSync->szClassName);
+#endif
+ }
+}
+
+static void _ServerSyncDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+ DLLIST_NODE *psNode, *psNext;
+
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH)
+ {
+ PVR_DUMPDEBUG_LOG("------[ Pending Server Syncs ]------");
+ OSLockAcquire(psDevNode->hSyncServerListLock);
+ dllist_foreach_node(&psDevNode->sSyncServerSyncsList, psNode, psNext)
+ {
+ _ServerSyncState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ OSLockRelease(psDevNode->hSyncServerListLock);
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCreateKM(IMG_UINT32 ui32SyncBlockCount,
+ SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *paui32SyncBlockIndex,
+ IMG_UINT32 *paui32Index,
+ IMG_UINT32 ui32ServerSyncCount,
+ SERVER_SYNC_PRIMITIVE **papsServerSync,
+ SERVER_OP_COOKIE **ppsServerCookie)
+{
+ SERVER_OP_COOKIE *psNewCookie;
+ IMG_UINT32 ui32BlockAllocSize;
+ IMG_UINT32 ui32ServerAllocSize;
+ IMG_UINT32 ui32ClientAllocSize;
+ IMG_UINT32 ui32TotalAllocSize;
+ IMG_UINT32 i;
+ IMG_CHAR *pcPtr;
+ PVRSRV_ERROR eError;
+
+ /* Allocate space for all the sync block list */
+ ui32BlockAllocSize = ui32SyncBlockCount * (sizeof(SYNC_PRIMITIVE_BLOCK *));
+
+ /* Allocate space for all the client sync size elements */
+ ui32ClientAllocSize = ui32ClientSyncCount * (5 * sizeof(IMG_UINT32));
+
+ /* Allocate space for all the server sync size elements */
+ ui32ServerAllocSize = ui32ServerSyncCount * (sizeof(SERVER_SYNC_PRIMITIVE *)
+ + (2 * sizeof(IMG_UINT32)));
+
+ ui32TotalAllocSize = sizeof(SERVER_OP_COOKIE) +
+ ui32BlockAllocSize +
+ ui32ServerAllocSize +
+ ui32ClientAllocSize;
+
+ psNewCookie = OSAllocZMem(ui32TotalAllocSize);
+ pcPtr = (IMG_CHAR *) psNewCookie;
+
+ if (!psNewCookie)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ /* Setup the pointers */
+ pcPtr += sizeof(SERVER_OP_COOKIE);
+ psNewCookie->papsSyncPrimBlock = (SYNC_PRIMITIVE_BLOCK **) pcPtr;
+
+ pcPtr += sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount;
+ psNewCookie->paui32SyncBlockIndex = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32Index = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32Flags = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32FenceValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32UpdateValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->papsServerSync =(SERVER_SYNC_PRIMITIVE **) pcPtr;
+
+ pcPtr += sizeof(SERVER_SYNC_PRIMITIVE *) * ui32ServerSyncCount;
+ psNewCookie->paui32ServerFenceValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+ psNewCookie->paui32ServerUpdateValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+
+ /* Check the pointer setup went ok */
+ PVR_ASSERT(pcPtr == (((IMG_CHAR *) psNewCookie) + ui32TotalAllocSize));
+
+ psNewCookie->ui32SyncBlockCount= ui32SyncBlockCount;
+ psNewCookie->ui32ServerSyncCount = ui32ServerSyncCount;
+ psNewCookie->ui32ClientSyncCount = ui32ClientSyncCount;
+ psNewCookie->bActive = IMG_FALSE;
+ HTBLOGK(HTB_SF_SYNC_PRIM_OP_CREATE, psNewCookie, ui32SyncBlockCount,
+ ui32ServerSyncCount, ui32ClientSyncCount);
+
+ /* Copy all the data into our server cookie */
+ OSCachedMemCopy(psNewCookie->papsSyncPrimBlock,
+ papsSyncPrimBlock,
+ sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount);
+
+ OSCachedMemCopy(psNewCookie->paui32SyncBlockIndex,
+ paui32SyncBlockIndex,
+ sizeof(IMG_UINT32) * ui32ClientSyncCount);
+ OSCachedMemCopy(psNewCookie->paui32Index,
+ paui32Index,
+ sizeof(IMG_UINT32) * ui32ClientSyncCount);
+
+ OSCachedMemCopy(psNewCookie->papsServerSync,
+ papsServerSync,
+ sizeof(SERVER_SYNC_PRIMITIVE *) *ui32ServerSyncCount);
+
+ /*
+ Take a reference on all the sync blocks and server syncs so they can't
+ be freed while we're using them
+ */
+ for (i=0;i<ui32SyncBlockCount;i++)
+ {
+ _SyncPrimitiveBlockRef(psNewCookie->papsSyncPrimBlock[i]);
+ }
+
+ for (i=0;i<ui32ServerSyncCount;i++)
+ {
+ _ServerSyncRef(psNewCookie->papsServerSync[i]);
+ }
+
+ *ppsServerCookie = psNewCookie;
+ return PVRSRV_OK;
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpTakeKM(SERVER_OP_COOKIE *psServerCookie,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *paui32Flags,
+ IMG_UINT32 *paui32FenceValue,
+ IMG_UINT32 *paui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *paui32ServerFlags)
+{
+ IMG_UINT32 i;
+
+ if ((ui32ClientSyncCount != psServerCookie->ui32ClientSyncCount) ||
+ (ui32ServerSyncCount != psServerCookie->ui32ServerSyncCount))
+ {
+ /* The bridge layer should have stopped us getting here but check in case */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync counts", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ for (i=0;i<ui32ServerSyncCount;i++)
+ {
+ /* Server syncs must fence */
+ if ((paui32ServerFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK) == 0)
+ {
+ return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ }
+ }
+
+ /*
+ For client syncs all we need to do is save the values
+ that we've been passed
+ */
+ OSCachedMemCopy(psServerCookie->paui32Flags,
+ paui32Flags,
+ sizeof(IMG_UINT32) * ui32ClientSyncCount);
+ OSCachedMemCopy(psServerCookie->paui32FenceValue,
+ paui32FenceValue,
+ sizeof(IMG_UINT32) * ui32ClientSyncCount);
+ OSCachedMemCopy(psServerCookie->paui32UpdateValue,
+ paui32UpdateValue,
+ sizeof(IMG_UINT32) * ui32ClientSyncCount);
+
+ /*
+ For server syncs we just take an operation
+ */
+ for (i=0;i<ui32ServerSyncCount;i++)
+ {
+ /*
+ Take op can only take one operation at a time so we can't
+ optimise away fences so just report the requester as unknown
+ */
+ PVRSRVServerSyncQueueSWOpKM(psServerCookie->papsServerSync[i],
+ &psServerCookie->paui32ServerFenceValue[i],
+ &psServerCookie->paui32ServerUpdateValue[i],
+ SYNC_REQUESTOR_UNKNOWN,
+ (paui32ServerFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE) ? IMG_TRUE:IMG_FALSE,
+ NULL);
+ }
+
+ HTBLOGK(HTB_SF_SYNC_PRIM_OP_TAKE, psServerCookie,
+ ui32ServerSyncCount, ui32ClientSyncCount);
+ psServerCookie->bActive = IMG_TRUE;
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpReadyKM(SERVER_OP_COOKIE *psServerCookie,
+ IMG_BOOL *pbReady)
+{
+ IMG_UINT32 i;
+ IMG_BOOL bReady = IMG_TRUE;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!psServerCookie->bActive)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Operation cookie not active (no take operation performed)", __FUNCTION__));
+
+ bReady = IMG_FALSE;
+ eError = PVRSRV_ERROR_BAD_SYNC_STATE;
+ goto e0;
+ }
+
+ /* Check the client syncs */
+ for (i=0;i<psServerCookie->ui32ClientSyncCount;i++)
+ {
+ if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+ {
+ IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+ IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+ SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+ if (psSyncBlock->pui32LinAddr[ui32Index] !=
+ psServerCookie->paui32FenceValue[i])
+ {
+ bReady = IMG_FALSE;
+ goto e0;
+ }
+ }
+ }
+
+ for (i=0;i<psServerCookie->ui32ServerSyncCount;i++)
+ {
+ bReady = ServerSyncFenceIsMet(psServerCookie->papsServerSync[i],
+ psServerCookie->paui32ServerFenceValue[i]);
+ if (!bReady)
+ {
+ break;
+ }
+ }
+
+e0:
+ *pbReady = bReady;
+ return eError;
+}
+
+static
+PVRSRV_ERROR _SyncPrimOpComplete(SERVER_OP_COOKIE *psServerCookie)
+{
+ RGX_HWPERF_UFO_DATA_ELEMENT asUFOData[PVRSRV_MAX_SYNC_PRIMS];
+ IMG_UINT32 i, ui32UFOIdx = 0;
+
+ for (i=0;i<psServerCookie->ui32ClientSyncCount;i++)
+ {
+ if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+ {
+ IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+ IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+ SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+ asUFOData[ui32UFOIdx].sUpdate.ui32FWAddr = psSyncBlock->uiFWAddr.ui32Addr + ui32Index * sizeof(IMG_UINT32);
+ asUFOData[ui32UFOIdx].sUpdate.ui32OldValue = psSyncBlock->pui32LinAddr[ui32Index];
+ asUFOData[ui32UFOIdx].sUpdate.ui32NewValue = psServerCookie->paui32UpdateValue[i];
+ ui32UFOIdx++;
+
+ psSyncBlock->pui32LinAddr[ui32Index] = psServerCookie->paui32UpdateValue[i];
+ }
+ }
+
+ for (i=0;i<psServerCookie->ui32ServerSyncCount;i++)
+ {
+ IMG_BOOL bUpdate = psServerCookie->paui32ServerFenceValue[i] != psServerCookie->paui32ServerUpdateValue[i];
+
+ if (bUpdate)
+ {
+ IMG_UINT32 ui32SyncAddr;
+
+ (void)ServerSyncGetFWAddr(psServerCookie->papsServerSync[i], &ui32SyncAddr);
+ asUFOData[ui32UFOIdx].sUpdate.ui32FWAddr = ui32SyncAddr;
+ asUFOData[ui32UFOIdx].sUpdate.ui32OldValue = ServerSyncGetValue(psServerCookie->papsServerSync[i]);
+ asUFOData[ui32UFOIdx].sUpdate.ui32NewValue = psServerCookie->paui32ServerUpdateValue[i];
+ ui32UFOIdx++;
+ }
+
+ ServerSyncCompleteOp(psServerCookie->papsServerSync[i],
+ bUpdate,
+ psServerCookie->paui32ServerUpdateValue[i]);
+ }
+
+ if (ui32UFOIdx > 0)
+ {
+ RGX_HWPERF_HOST_UFO(RGX_HWPERF_UFO_EV_UPDATE, asUFOData, ui32UFOIdx);
+ }
+
+ psServerCookie->bActive = IMG_FALSE;
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCompleteKM(SERVER_OP_COOKIE *psServerCookie)
+{
+ IMG_BOOL bReady;
+
+ PVRSRVSyncPrimOpReadyKM(psServerCookie, &bReady);
+
+ /* Check the client is playing ball */
+ if (!bReady)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: sync op still not ready", __FUNCTION__));
+
+ return PVRSRV_ERROR_BAD_SYNC_STATE;
+ }
+
+ HTBLOGK(HTB_SF_SYNC_PRIM_OP_COMPLETE, psServerCookie);
+ return _SyncPrimOpComplete(psServerCookie);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpDestroyKM(SERVER_OP_COOKIE *psServerCookie)
+{
+ IMG_UINT32 i;
+
+ /* If the operation is still active then check if it's finished yet */
+ if (psServerCookie->bActive)
+ {
+ if (PVRSRVSyncPrimOpCompleteKM(psServerCookie) == PVRSRV_ERROR_BAD_SYNC_STATE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Not ready, ask for retry", __FUNCTION__));
+ return PVRSRV_ERROR_RETRY;
+ }
+ }
+
+ /* Drop our references on the sync blocks and server syncs*/
+ for (i = 0; i < psServerCookie->ui32SyncBlockCount; i++)
+ {
+ _SyncPrimitiveBlockUnref(psServerCookie->papsSyncPrimBlock[i]);
+ }
+
+ for (i = 0; i < psServerCookie->ui32ServerSyncCount; i++)
+ {
+ _ServerSyncUnref(psServerCookie->papsServerSync[i]);
+ }
+
+ HTBLOGK(HTB_SF_SYNC_PRIM_OP_DESTROY, psServerCookie);
+ OSFreeMem(psServerCookie);
+ return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+ /*
+ We might be ask to PDump sync state outside of capture range
+ (e.g. texture uploads) so make this continuous.
+ */
+ DevmemPDumpLoadMemValue32(psSyncBlk->psMemDesc,
+ ui32Offset,
+ ui32Value,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+ /*
+ We might be ask to PDump sync state outside of capture range
+ (e.g. texture uploads) so make this continuous.
+ */
+ DevmemPDumpLoadMem(psSyncBlk->psMemDesc,
+ ui32Offset,
+ sizeof(IMG_UINT32),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags)
+{
+ DevmemPDumpDevmemPol32(psSyncBlk->psMemDesc,
+ ui32Offset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ ui32PDumpFlags);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!psServerCookie->bActive)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Operation cookie not active (no take operation performed)", __FUNCTION__));
+
+ eError = PVRSRV_ERROR_BAD_SYNC_STATE;
+ goto e0;
+ }
+
+ /* PDump POL on the client syncs */
+ for (i = 0; i < psServerCookie->ui32ClientSyncCount; i++)
+ {
+ if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+ {
+ IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+ IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+ SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+ PVRSRVSyncPrimPDumpPolKM(psSyncBlock,
+ ui32Index*sizeof(IMG_UINT32),
+ psServerCookie->paui32FenceValue[i],
+ 0xFFFFFFFFU,
+ eOperator,
+ ui32PDumpFlags);
+ }
+ }
+
+ /* PDump POL on the server syncs */
+ for (i = 0; i < psServerCookie->ui32ServerSyncCount; i++)
+ {
+ SERVER_SYNC_PRIMITIVE *psServerSync = psServerCookie->papsServerSync[i];
+ IMG_UINT32 ui32FenceValue = psServerCookie->paui32ServerFenceValue[i];
+
+ SyncPrimPDumpPol(psServerSync->psSync,
+ ui32FenceValue,
+ 0xFFFFFFFFU,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+ }
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize)
+{
+ DevmemPDumpCBP(psSyncBlk->psMemDesc,
+ ui32Offset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+ return PVRSRV_OK;
+}
+#endif
+
+/* SyncRegisterConnection */
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData)
+{
+ SYNC_CONNECTION_DATA *psSyncConnectionData;
+ PVRSRV_ERROR eError;
+
+ psSyncConnectionData = OSAllocMem(sizeof(SYNC_CONNECTION_DATA));
+ if (psSyncConnectionData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ eError = OSLockCreate(&psSyncConnectionData->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lockcreate;
+ }
+ dllist_init(&psSyncConnectionData->sListHead);
+ psSyncConnectionData->ui32RefCount = 1;
+
+ *ppsSyncConnectionData = psSyncConnectionData;
+ return PVRSRV_OK;
+
+fail_lockcreate:
+ OSFreeMem(psSyncConnectionData);
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/* SyncUnregisterConnection */
+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+ _SyncConnectionUnref(psSyncConnectionData);
+}
+
+void SyncConnectionPDumpSyncBlocks(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+ DLLIST_NODE *psNode, *psNext;
+
+ OSLockAcquire(psSyncConnectionData->hLock);
+
+ PDUMPCOMMENT("Dump client Sync Prim state");
+ dllist_foreach_node(&psSyncConnectionData->sListHead, psNode, psNext)
+ {
+ SYNC_PRIMITIVE_BLOCK *psSyncBlock =
+ IMG_CONTAINER_OF(psNode, SYNC_PRIMITIVE_BLOCK, sConnectionNode);
+
+ DevmemPDumpLoadMem(psSyncBlock->psMemDesc,
+ 0,
+ psSyncBlock->ui32BlockSize,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+
+ OSLockRelease(psSyncConnectionData->hLock);
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr,
+ IMG_CHAR * pszSyncInfo, size_t len)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_INT iEnd;
+
+ if (!pszSyncInfo)
+ {
+ return;
+ }
+
+ OSLockAcquire(psDevNode->hSyncServerRecordLock);
+ pszSyncInfo[0] = '\0';
+
+ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+ {
+ struct SYNC_RECORD *psSyncRec =
+ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+ if ((psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset) == ui32FwAddr
+ && SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType
+ && psSyncRec->psServerSyncPrimBlock
+ && psSyncRec->psServerSyncPrimBlock->pui32LinAddr
+ )
+ {
+ IMG_UINT32 *pui32SyncAddr;
+ pui32SyncAddr = psSyncRec->psServerSyncPrimBlock->pui32LinAddr
+ + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32));
+ iEnd = OSSNPrintf(pszSyncInfo, len, "Cur=0x%08x %s:%05u (%s)",
+ *pui32SyncAddr,
+ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+ psSyncRec->uiPID,
+ psSyncRec->szClassName
+ );
+ if (iEnd >= 0 && iEnd < len)
+ {
+ pszSyncInfo[iEnd] = '\0';
+ }
+ break;
+ }
+ }
+
+ OSLockRelease(psDevNode->hSyncServerRecordLock);
+}
+
+#define NS_IN_S (1000000000UL)
+static void _SyncRecordPrint(struct SYNC_RECORD *psSyncRec,
+ IMG_UINT64 ui64TimeNow,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ SYNC_PRIMITIVE_BLOCK *psSyncBlock = psSyncRec->psServerSyncPrimBlock;
+
+ if (SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType)
+ {
+ IMG_UINT64 ui64DeltaS;
+ IMG_UINT32 ui32DeltaF;
+ IMG_UINT64 ui64Delta = ui64TimeNow - psSyncRec->ui64OSTime;
+ ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF);
+
+ if (psSyncBlock && psSyncBlock->pui32LinAddr)
+ {
+ IMG_UINT32 *pui32SyncAddr;
+ pui32SyncAddr = psSyncBlock->pui32LinAddr
+ + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32));
+
+ PVR_DUMPDEBUG_LOG("\t%s %05u %05llu.%09u FWAddr=0x%08x Val=0x%08x (%s)",
+ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+ psSyncRec->uiPID,
+ ui64DeltaS, ui32DeltaF,
+ (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+ *pui32SyncAddr,
+ psSyncRec->szClassName
+ );
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("\t%s %05u %05llu.%09u FWAddr=0x%08x Val=<null_ptr> (%s)",
+ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+ psSyncRec->uiPID,
+ ui64DeltaS, ui32DeltaF,
+ (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+ psSyncRec->szClassName
+ );
+ }
+ }
+}
+
+static void _SyncRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+ IMG_UINT64 ui64TimeNowS;
+ IMG_UINT32 ui32TimeNowF;
+ IMG_UINT64 ui64TimeNow = OSClockns64();
+ DLLIST_NODE *psNode, *psNext;
+
+ ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF);
+
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH)
+ {
+ IMG_UINT32 i;
+ OSLockAcquire(psDevNode->hSyncServerRecordLock);
+
+ PVR_DUMPDEBUG_LOG("Dumping all allocated syncs @ %05llu.%09u", ui64TimeNowS, ui32TimeNowF);
+ PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+ "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation");
+
+ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+ {
+ struct SYNC_RECORD *psSyncRec =
+ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+ _SyncRecordPrint(psSyncRec, ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+ PVR_DUMPDEBUG_LOG("Dumping all recently freed syncs @ %05llu.%09u", ui64TimeNowS, ui32TimeNowF);
+ PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+ "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation");
+ for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncServerRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN);
+ i != psDevNode->uiSyncServerRecordFreeIdx;
+ i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN))
+ {
+ if (psDevNode->apsSyncServerRecordsFreed[i])
+ {
+ _SyncRecordPrint(psDevNode->apsSyncServerRecordsFreed[i],
+ ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ OSLockRelease(psDevNode->hSyncServerRecordLock);
+ }
+}
+#undef NS_IN_S
+
+static PVRSRV_ERROR SyncRecordListInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVRSRV_ERROR eError;
+
+ eError = OSLockCreate(&psDevNode->hSyncServerRecordLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lock_create;
+ }
+ dllist_init(&psDevNode->sSyncServerRecordList);
+
+ eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncServerRecordNotify,
+ psDevNode,
+ _SyncRecordRequest,
+ DEBUG_REQUEST_SERVERSYNC,
+ psDevNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_dbg_register;
+ }
+
+ return PVRSRV_OK;
+
+fail_dbg_register:
+ OSLockDestroy(psDevNode->hSyncServerRecordLock);
+fail_lock_create:
+ return eError;
+}
+
+static void SyncRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ DLLIST_NODE *psNode, *psNext;
+ int i;
+
+ OSLockAcquire(psDevNode->hSyncServerRecordLock);
+ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+ {
+ struct SYNC_RECORD *pSyncRec =
+ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+
+ dllist_remove_node(psNode);
+ OSFreeMem(pSyncRec);
+ }
+
+ for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++)
+ {
+ if (psDevNode->apsSyncServerRecordsFreed[i])
+ {
+ OSFreeMem(psDevNode->apsSyncServerRecordsFreed[i]);
+ psDevNode->apsSyncServerRecordsFreed[i] = NULL;
+ }
+ }
+ OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+ PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerRecordNotify);
+ OSLockDestroy(psDevNode->hSyncServerRecordLock);
+}
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+PVRSRV_ERROR ServerSyncInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVRSRV_ERROR eError;
+
+ eError = OSLockCreate(&psDevNode->hSyncServerListLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lock_create;
+ }
+ dllist_init(&psDevNode->sSyncServerSyncsList);
+
+ eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncServerNotify,
+ psDevNode,
+ _ServerSyncDebugRequest,
+ DEBUG_REQUEST_SERVERSYNC,
+ psDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_dbg_register;
+ }
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ eError = SyncRecordListInit(psDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_record_list;
+ }
+#endif
+
+ return PVRSRV_OK;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+fail_record_list:
+ PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerNotify);
+#endif
+fail_dbg_register:
+ OSLockDestroy(psDevNode->hSyncServerListLock);
+fail_lock_create:
+ return eError;
+}
+
+void ServerSyncDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerNotify);
+ psDevNode->hSyncServerNotify = NULL;
+
+ OSLockDestroy(psDevNode->hSyncServerListLock);
+ psDevNode->hSyncServerListLock = NULL;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ SyncRecordListDeinit(psDevNode);
+#endif
+}
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Server side synchronisation interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Describes the server side synchronisation functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_types.h"
+#include "device.h"
+#include "devicemem.h"
+#include "pdump.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+
+#ifndef _SYNC_SERVER_H_
+#define _SYNC_SERVER_H_
+
+typedef struct _SERVER_OP_COOKIE_ SERVER_OP_COOKIE;
+typedef struct _SERVER_SYNC_PRIMITIVE_ SERVER_SYNC_PRIMITIVE;
+typedef struct _SYNC_PRIMITIVE_BLOCK_ SYNC_PRIMITIVE_BLOCK;
+typedef struct _SERVER_SYNC_EXPORT_ SERVER_SYNC_EXPORT;
+typedef struct _SYNC_CONNECTION_DATA_ SYNC_CONNECTION_DATA;
+typedef struct SYNC_RECORD* SYNC_RECORD_HANDLE;
+
+typedef struct _SYNC_ADDR_LIST_
+{
+ IMG_UINT32 ui32NumSyncs;
+ PRGXFWIF_UFO_ADDR *pasFWAddrs;
+} SYNC_ADDR_LIST;
+
+PVRSRV_ERROR
+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock,
+ IMG_UINT32 ui32Offset,
+ PRGXFWIF_UFO_ADDR *psAddrOut);
+
+void
+SyncAddrListInit(SYNC_ADDR_LIST *psList);
+
+void
+SyncAddrListDeinit(SYNC_ADDR_LIST *psList);
+
+PVRSRV_ERROR
+SyncAddrListPopulate(SYNC_ADDR_LIST *psList,
+ IMG_UINT32 ui32NumSyncs,
+ SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock,
+ IMG_UINT32 *paui32SyncOffset);
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+ IMG_UINT32 *puiSyncPrimVAddr,
+ IMG_UINT32 *puiSyncPrimBlockSize,
+ PMR **ppsSyncPMR);
+
+PVRSRV_ERROR
+PVRSRVExportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk,
+ DEVMEM_EXPORTCOOKIE **psExportCookie);
+
+PVRSRV_ERROR
+PVRSRVUnexportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *ppsSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+ IMG_UINT32 ui32Value);
+
+PVRSRV_ERROR
+PVRSRVServerSyncPrimSetKM(SERVER_SYNC_PRIMITIVE *psServerSync, IMG_UINT32 ui32Value);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+ SERVER_SYNC_EXPORT **ppsExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerImportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SERVER_SYNC_EXPORT *psExport,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr);
+#endif
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureExportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_SECURE_TYPE *phSecure,
+ SERVER_SYNC_EXPORT **ppsExport,
+ CONNECTION_DATA **ppsSecureConnection);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureUnexportKM(SERVER_SYNC_EXPORT *psExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureImportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_SECURE_TYPE hSecure,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr);
+#endif
+
+IMG_UINT32 PVRSRVServerSyncRequesterRegisterKM(IMG_UINT32 *pui32SyncRequesterID);
+void PVRSRVServerSyncRequesterUnregisterKM(IMG_UINT32 ui32SyncRequesterID);
+
+PVRSRV_ERROR
+PVRSRVSyncAllocEventKM(IMG_BOOL bServerSync,
+ IMG_UINT32 ui32FWAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName);
+
+PVRSRV_ERROR
+PVRSRVSyncFreeEventKM(IMG_UINT32 ui32FWAddr);
+
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SYNC_RECORD_HANDLE *phRecord,
+ SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+ IMG_UINT32 ui32FwBlockAddr,
+ IMG_UINT32 ui32SyncOffset,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName);
+
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+ SYNC_RECORD_HANDLE hRecord);
+
+PVRSRV_ERROR
+PVRSRVServerSyncAllocKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *szClassName);
+PVRSRV_ERROR
+PVRSRVServerSyncFreeKM(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_ERROR
+PVRSRVServerSyncGetStatusKM(IMG_UINT32 ui32SyncCount,
+ SERVER_SYNC_PRIMITIVE **papsSyncs,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue,
+ IMG_UINT32 ui32SyncRequesterID,
+ IMG_BOOL bUpdate,
+ IMG_BOOL *pbFenceRequired);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue);
+
+IMG_BOOL
+ServerSyncFenceIsMet(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_UINT32 ui32FenceValue);
+
+void
+ServerSyncCompleteOp(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bDoUpdate,
+ IMG_UINT32 ui32UpdateValue);
+
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCreateKM(IMG_UINT32 ui32SyncBlockCount,
+ SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *paui32SyncBlockIndex,
+ IMG_UINT32 *paui32Index,
+ IMG_UINT32 ui32ServerSyncCount,
+ SERVER_SYNC_PRIMITIVE **papsServerSync,
+ SERVER_OP_COOKIE **ppsServerCookie);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpTakeKM(SERVER_OP_COOKIE *psServerCookie,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *paui32Flags,
+ IMG_UINT32 *paui32FenceValue,
+ IMG_UINT32 *paui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *paui32ServerFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpReadyKM(SERVER_OP_COOKIE *psServerCookie,
+ IMG_BOOL *pbReady);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCompleteKM(SERVER_OP_COOKIE *psServerCookie);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpDestroyKM(SERVER_OP_COOKIE *psServerCookie);
+
+IMG_UINT32 ServerSyncGetId(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_ERROR
+ServerSyncGetFWAddr(SERVER_SYNC_PRIMITIVE *psSync, IMG_UINT32 *pui32SyncAddr);
+
+IMG_UINT32 ServerSyncGetValue(SERVER_SYNC_PRIMITIVE *psSync);
+
+IMG_UINT32 ServerSyncGetNextValue(SERVER_SYNC_PRIMITIVE *psSync);
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr,
+ IMG_CHAR * pszSyncInfo, size_t len);
+#endif
+
+void ServerSyncDumpPending(void);
+
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData);
+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *ppsSyncConnectionData);
+void SyncConnectionPDumpSyncBlocks(SYNC_CONNECTION_DATA *ppsSyncConnectionData);
+
+PVRSRV_ERROR ServerSyncInit(PVRSRV_DEVICE_NODE *psDevNode);
+void ServerSyncDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiDumpFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize);
+
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+ PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpValueKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(uiDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimOpPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psServerCookie);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(uiDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpCBPKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize)
+{
+ PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+ PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+ PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+#endif /*_SYNC_SERVER_H_ */
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Server side internal synchronisation interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Describes the server side internal synchronisation functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SYNC_SERVER_INTERNAL_H_
+#define _SYNC_SERVER_INTERNAL_H_
+
+#include "img_types.h"
+
+typedef struct _SERVER_SYNC_PRIMITIVE_ SERVER_SYNC_PRIMITIVE;
+
+#endif /*_SYNC_SERVER_INTERNAL_H_ */
--- /dev/null
+/**************************************************************************/ /*!
+@File
+@Title Common System APIs and structures
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides common system-specific declarations and
+ macros that are supported by all systems
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__SYSCOMMON_H__)
+#define __SYSCOMMON_H__
+
+#include "img_types.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_device.h"
+#include "pvrsrv_error.h"
+
+typedef IMG_BOOL (*PFN_LISR)(void *pvData);
+
+/**************************************************************************/ /*!
+@Function SysDevInit
+@Description System specific device initialisation function.
+@Input pvOSDevice pointer to the OS device reference
+@Input ppsDevConfig returned device configuration info
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig);
+
+/**************************************************************************/ /*!
+@Function SysDevDeInit
+@Description System specific device deinitialisation function.
+@Input psDevConfig device configuration info of the device to be
+ deinitialised
+@Return None.
+*/ /***************************************************************************/
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/**************************************************************************/ /*!
+@Function SysDebugInfo
+@Description Dump system specific device debug information.
+@Input psDevConfig pointer to device configuration info
+@Input pfnDumpDebugPrintf the 'printf' function to be called to
+ display the debug info
+@Input pvDumpDebugFile optional file identifier to be passed to
+ the 'printf' function if required
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/**************************************************************************/ /*!
+@Function SysInstallDeviceLISR
+@Description Installs the system Low-level Interrupt Service Routine (LISR)
+ which handles low-level processing of interrupts from the device
+ (GPU).
+ The LISR will be invoked when the device raises an interrupt. An
+ LISR may not be descheduled, so code which needs to do so should
+ be placed in an MISR.
+ The installed LISR will schedule any MISRs once it has completed
+ its interrupt processing, by calling OSScheduleMISR().
+@Input hSysData pointer to the system data of the device
+@Input ui32IRQ the IRQ on which the LISR is to be installed
+@Input pszName name of the module installing the LISR
+@Input pfnLISR pointer to the function to be installed as the
+ LISR
+@Input pvData private data provided to the LISR
+@Output phLISRData handle to the installed LISR (to be used for a
+ subsequent uninstall)
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData);
+
+/**************************************************************************/ /*!
+@Function SysUninstallDeviceLISR
+@Description Uninstalls the system Low-level Interrupt Service Routine (LISR)
+ which handles low-level processing of interrupts from the device
+ (GPU).
+@Input hLISRData handle of the LISR to be uninstalled
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+#endif /* !defined(__SYSCOMMON_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Validation System APIs and structures
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+ needed for hardware validation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSVALIDATION_H__)
+#define __SYSVALIDATION_H__
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "img_types.h"
+#include "rgxdefs_km.h"
+#include "virt_validation_defs.h"
+
+void SysSetOSidRegisters(IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS],
+ IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS]);
+void SysPrintAndResetFaultStatusRegister(void);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+void SysSetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
+void SysSetTrustedDeviceAceEnabled(void);
+#endif
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+#endif /* !defined(__SYSVALIDATION_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File tlclient.c
+@Title Services Transport Layer shared API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport layer common API used in both clients and server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* DESIGN NOTE
+ * This transport layer consumer-role API was created as a shared API when a
+ * client wanted to read the data of a TL stream from within the KM server
+ * driver. This was in addition to the existing clients supported externally
+ * by the UM client library component via PVR API layer.
+ * This shared API is thus used by the PVR TL API in the client library and
+ * by clients internal to the server driver module. It depends on
+ * client entry points of the TL and DEVMEM bridge modules. These entry points
+ * encapsulate from the TL shared API whether a direct bridge or an indirect
+ * (ioctl) bridge is used.
+ * One reason for needing this layer centres around the fact that some of the
+ * API functions make multiple bridge calls and the logic that glues these
+ * together is common regardless of client location. Further this layer has
+ * allowed the defensive coding that checks parameters to move into the PVR
+ * API layer where untrusted clients enter giving a more efficient KM code path.
+ */
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "tlclient.h"
+#include "pvr_tlcommon.h"
+#include "client_pvrtl_bridge.h"
+#include "pvrsrv_tlcommon.h"
+
+/* Defines/Constants
+ */
+
+#define NO_ACQUIRE 0xffffffffU
+
+/* User-side stream descriptor structure.
+ */
+typedef struct _TL_STREAM_DESC_
+{
+ /* Handle on kernel-side stream descriptor*/
+ IMG_HANDLE hServerSD;
+
+ /* Stream data buffer variables */
+ DEVMEM_MEMDESC* psUMmemDesc;
+ IMG_PBYTE pBaseAddr;
+
+ /* Offset in bytes into the circular buffer and valid only after
+ * an Acquire call and undefined after a release. */
+ IMG_UINT32 uiReadOffset;
+
+ /* Always a positive integer when the Acquire call returns and a release
+ * is outstanding. Undefined at all other times. */
+ IMG_UINT32 uiReadLen;
+
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(IMG_HANDLE hSrvHandle,
+ const IMG_CHAR* pszName,
+ IMG_UINT32 ui32Mode,
+ IMG_HANDLE* phSD)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ TL_STREAM_DESC* psSD = 0;
+ IMG_HANDLE hTLPMR;
+ IMG_HANDLE hTLImportHandle;
+ IMG_DEVMEM_SIZE_T uiImportSize;
+ IMG_UINT32 ui32MemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(pszName);
+ PVR_ASSERT(phSD);
+ *phSD = NULL;
+
+ /* Allocate memory for the stream descriptor object, initialise with
+ * "no data read" yet. */
+ psSD = OSAllocZMem(sizeof(TL_STREAM_DESC));
+ if (psSD == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF((PVR_DBG_ERROR, "BridgeTLOpenStream: KM returned %d", eError));
+ goto e0;
+ }
+ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+ /* Send open stream request to kernel server to get stream handle and
+ * buffer cookie so we can get access to the buffer in this process. */
+ eError = BridgeTLOpenStream(hSrvHandle, pszName, ui32Mode,
+ &psSD->hServerSD, &hTLPMR);
+ if (eError != PVRSRV_OK)
+ {
+ if ((ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT) &&
+ (eError == PVRSRV_ERROR_TIMEOUT))
+ {
+ goto e1;
+ }
+ PVR_LOGG_IF_ERROR(eError, "BridgeTLOpenStream", e1);
+ }
+
+ /* Convert server export cookie into a cookie for use by this client */
+ eError = DevmemMakeLocalImportHandle(hSrvHandle,
+ hTLPMR, &hTLImportHandle);
+ PVR_LOGG_IF_ERROR(eError, "DevmemMakeLocalImportHandle", e2);
+
+ ui32MemFlags |= ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ?
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0;
+ /* Now convert client cookie into a client handle on the buffer's
+ * physical memory region */
+ eError = DevmemLocalImport(hSrvHandle,
+ hTLImportHandle,
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+ &psSD->psUMmemDesc,
+ &uiImportSize,
+ "TLBuffer");
+ PVR_LOGG_IF_ERROR(eError, "DevmemImport", e3);
+
+ /* Now map the memory into the virtual address space of this process. */
+ eError = DevmemAcquireCpuVirtAddr(psSD->psUMmemDesc, (void **)
+ &psSD->pBaseAddr);
+ PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4);
+
+ /* Ignore error, not much that can be done */
+ (void) DevmemUnmakeLocalImportHandle(hSrvHandle,
+ hTLImportHandle);
+
+ /* Return client descriptor handle to caller */
+ *phSD = psSD;
+ return PVRSRV_OK;
+
+/* Clean up post buffer setup */
+e4:
+ DevmemFree(psSD->psUMmemDesc);
+e3:
+ (void) DevmemUnmakeLocalImportHandle(hSrvHandle,
+ &hTLImportHandle);
+/* Clean up post stream open */
+e2:
+ BridgeTLCloseStream(hSrvHandle, psSD->hServerSD);
+
+/* Cleanup post allocation of the descriptor object */
+e1:
+ OSFreeMem(psSD);
+
+e0:
+ return eError;
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+
+ /* Check the caller provided connection is valid */
+ if (!psSD->hServerSD)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLClientCloseStream: descriptor already closed/not open"));
+ return PVRSRV_ERROR_HANDLE_NOT_FOUND;
+ }
+
+ /* Check if acquire is outstanding, perform release if it is, ignore result
+ * as there is not much we can do if it is an error other than close */
+ if (psSD->uiReadLen != NO_ACQUIRE)
+ {
+ (void) BridgeTLReleaseData(hSrvHandle, psSD->hServerSD,
+ psSD->uiReadOffset, psSD->uiReadLen);
+ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+ }
+
+ /* Clean up DevMem resources used for this stream in this client */
+ DevmemReleaseCpuVirtAddr(psSD->psUMmemDesc);
+
+ DevmemFree(psSD->psUMmemDesc);
+
+ /* Send close to server to clean up kernel mode resources for this
+ * handle and release the memory. */
+ eError = BridgeTLCloseStream(hSrvHandle, psSD->hServerSD);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BridgeTLCloseStream: KM returned %d", eError));
+ /* Not much we can do with error, fall through to clean up
+ * return eError; */
+ }
+
+ OSCachedMemSet(psSD, 0x00, sizeof(TL_STREAM_DESC));
+ OSFreeMem (psSD);
+
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientDiscoverStreams(IMG_HANDLE hSrvHandle,
+ const IMG_CHAR *pszNamePattern,
+ IMG_UINT32 *pui32Streams,
+ IMG_UINT32 *pui32NumFound)
+{
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(pszNamePattern);
+ PVR_ASSERT(pui32NumFound);
+
+ return BridgeTLDiscoverStreams(hSrvHandle,
+ pszNamePattern,
+ *pui32NumFound,
+ pui32Streams,
+ pui32NumFound);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+ IMG_UINT32 ui32BufferOffset, ui32Dummy;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+ PVR_ASSERT(ppui8Data);
+ PVR_ASSERT(ui32Size);
+
+ eError = BridgeTLReserveStream(hSrvHandle, psSD->hServerSD,
+ &ui32BufferOffset, ui32Size, ui32Size,
+ &ui32Dummy);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ *ppui8Data = psSD->pBaseAddr + ui32BufferOffset;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream2(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32 *pui32Available)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+ IMG_UINT32 ui32BufferOffset;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+ PVR_ASSERT(ppui8Data);
+ PVR_ASSERT(ui32Size);
+
+ eError = BridgeTLReserveStream(hSrvHandle, psSD->hServerSD,
+ &ui32BufferOffset, ui32Size, ui32SizeMin,
+ pui32Available);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ *ppui8Data = psSD->pBaseAddr + ui32BufferOffset;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCommitStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+ PVR_ASSERT(ui32Size);
+
+ eError = BridgeTLCommitStream(hSrvHandle, psSD->hServerSD, ui32Size);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_PBYTE* ppPacketBuf,
+ IMG_UINT32* pui32BufLen)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+ PVR_ASSERT(ppPacketBuf);
+ PVR_ASSERT(pui32BufLen);
+
+ /* Check Acquire has not been called twice in a row without a release */
+ if (psSD->uiReadOffset != NO_ACQUIRE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLClientAcquireData: acquire already outstanding"));
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ *pui32BufLen = 0;
+ /* Ask the kernel server for the next chunk of data to read */
+ eError = BridgeTLAcquireData(hSrvHandle, psSD->hServerSD,
+ &psSD->uiReadOffset, &psSD->uiReadLen);
+ if (eError != PVRSRV_OK)
+ {
+ if ((eError != PVRSRV_ERROR_RESOURCE_UNAVAILABLE) &&
+ (eError != PVRSRV_ERROR_TIMEOUT))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BridgeTLAcquireData: KM returned %d", eError));
+ }
+ psSD->uiReadOffset = psSD->uiReadLen = NO_ACQUIRE;
+ return eError;
+ }
+
+ /* Return the data offset and length to the caller if bytes are available
+ * to be read. Could be zero for non-blocking mode. */
+ if (psSD->uiReadLen)
+ {
+ *ppPacketBuf = psSD->pBaseAddr + psSD->uiReadOffset;
+ *pui32BufLen = psSD->uiReadLen;
+ }
+ else
+ {
+ /* On non-blocking, zero length data could be returned from server
+ * Which is basically a no-acquire operation */
+ *ppPacketBuf = 0;
+ *pui32BufLen = 0;
+ }
+
+ return eError;
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+
+ /* the previous acquire did not return any data, this is a no-operation */
+ if (psSD->uiReadLen == 0)
+ {
+ return PVRSRV_OK;
+ }
+
+ /* Check release has not been called twice in a row without an acquire */
+ if (psSD->uiReadOffset == NO_ACQUIRE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLClientReleaseData_: no acquire to release"));
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ /* Inform the kernel to release the data from the buffer */
+ eError = BridgeTLReleaseData(hSrvHandle, psSD->hServerSD,
+ psSD->uiReadOffset, psSD->uiReadLen);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BridgeTLReleaseData: KM returned %d", eError));
+ /* Need to continue to keep client data consistent, fall through
+ * return eError */
+ }
+
+ /* Reset state to indicate no outstanding acquire */
+ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientWriteData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE *pui8Data)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+ PVR_ASSERT(ui32Size);
+ PVR_ASSERT(pui8Data);
+
+ eError = BridgeTLWriteData(hSrvHandle, psSD->hServerSD, ui32Size, pui8Data);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG)
+ {
+ static IMG_BOOL bPrinted = IMG_FALSE;
+
+ if (!bPrinted) {
+ PVR_DPF((PVR_DBG_ERROR, "Not enough space. Failed to write"
+ " data to the stream (%d).", eError));
+ bPrinted = IMG_TRUE;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLClientWriteData: KM returned %d",
+ eError));
+ }
+ }
+
+ return eError;
+}
+
+/******************************************************************************
+ End of file (tlclient.c)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File tlclient.h
+@Title Services Transport Layer shared API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport layer common API used in both clients and server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef TLCLIENT_H_
+#define TLCLIENT_H_
+
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+
+/* This value is used for the hSrvHandle argument in the client API when
+ * called directly from the kernel which will lead to a direct bridge access.
+ */
+#define DIRECT_BRIDGE_HANDLE ((IMG_HANDLE)0xDEADBEEFU)
+
+
+/**************************************************************************/ /*!
+ @Function TLClientOpenStream
+ @Description Open a descriptor onto an existing kernel transport stream.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input pszName Address of the stream name string, no longer
+ than PRVSRVTL_MAX_STREAM_NAME_SIZE.
+ @Input ui32Mode Unused
+ @Output phSD Address of a pointer to an stream object
+ @Return PVRSRV_ERROR_NOT_FOUND: when named stream not found
+ @Return PVRSRV_ERROR_ALREADY_OPEN: stream already open by another
+ @Return PVRSRV_ERROR_STREAM_ERROR: internal driver state error
+ @Return PVRSRV_ERROR_TIMEOUT: block timed out, stream not found
+ @Return PVRSRV_ERROR: for other system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(IMG_HANDLE hSrvHandle,
+ const IMG_CHAR* pszName,
+ IMG_UINT32 ui32Mode,
+ IMG_HANDLE* phSD);
+
+
+/**************************************************************************/ /*!
+ @Function TLClientCloseStream
+ @Description Close and release the stream connection to Services kernel
+ server transport layer. Any outstanding Acquire will be
+ released.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to close
+ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND: when SD handle is not known
+ @Return PVRSRV_ERROR_STREAM_ERROR: internal driver state error
+ @Return PVRSRV_ERROR: for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD);
+
+/**************************************************************************/ /*!
+ @Function TLClientDiscoverStreams
+ @Description Finds all streams that's name starts with pszNamePattern and
+ ends with a number.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input pszNamePattern Name pattern. Must be beginning of a string.
+ @Output pui32Streams Array of numbers from end of the discovered
+ names.
+ @inOut pui32Count When input max number of number that can fit
+ into pui32Streams. When output number of
+ discovered streams.
+ @Return PVRSRV_ERROR for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientDiscoverStreams(IMG_HANDLE hSrvHandle,
+ const IMG_CHAR *pszNamePattern,
+ IMG_UINT32 *pui32Streams,
+ IMG_UINT32 *pui32NumFound);
+
+/**************************************************************************/ /*!
+ @Function TLClientReserveStream
+ @Description Reserves a region with given size in the stream. If the stream
+ is already reserved the function will return an error.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to close
+ @Output ppui8Data pointer to the buffer
+ @Input ui32Size size of the data
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+ @Function TLClientStreamReserve2
+ @Description Reserves a region with given size in the stream. If the stream
+ is already reserved the function will return an error.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to close
+ @Output ppui8Data pointer to the buffer
+ @Input ui32Size size of the data
+ @Input ui32SizeMin minimum size of the data
+ @Input ui32Available available space in buffer
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream2(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32 *pui32Available);
+
+/**************************************************************************/ /*!
+ @Function TLClientStreamCommit
+ @Description Commits previously reserved region in the stream and therefore
+ allows next reserves.
+ This function call has to be preceded by the call to
+ TLClientReserveStream or TLClientReserveStream2.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to close
+ @Input ui32Size Size of the data
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCommitStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+ @Function TLClientAcquireData
+ @Description When there is data available in the stream buffer this call
+ returns with the address and length of the data buffer the
+ client can safely read. This buffer may contain one or more
+ packets of data.
+ If no data is available then this call blocks until it becomes
+ available. However if the stream has been destroyed while
+ waiting then a resource unavailable error will be returned
+ to the caller. Clients must pair this call with a
+ ReleaseData call.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to read
+ @Output ppPacketBuf Address of a pointer to an byte buffer. On exit
+ pointer contains address of buffer to read from
+ @Output puiBufLen Pointer to an integer. On exit it is the size
+ of the data to read from the packet buffer
+ @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND: when SD handle not known
+ @Return PVRSRV_ERROR_STREAM_ERROR: internal driver state error
+ @Return PVRSRV_ERROR_RETRY: release not called beforehand
+ @Return PVRSRV_ERROR_TIMEOUT: block timed out, no data
+ @Return PVRSRV_ERROR: for other system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_PBYTE* ppPacketBuf,
+ IMG_UINT32* puiBufLen);
+
+
+/**************************************************************************/ /*!
+ @Function TLClientReleaseData
+ @Description Called after client has read the stream data out of the buffer
+ The data is subsequently flushed from the stream buffer to make
+ room for more data packets from the stream source.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to read
+ @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND: when SD handle not known to TL
+ @Return PVRSRV_ERROR_STREAM_ERROR: internal driver state error
+ @Return PVRSRV_ERROR_RETRY: acquire not called beforehand
+ @Return PVRSRV_ERROR: for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD);
+
+/**************************************************************************/ /*!
+ @Function TLClientWriteData
+ @Description Writes data to the stream.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to read
+ @Input ui32Size Size of the data
+ @Input pui8Data Pointer to data
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientWriteData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE *pui8Data);
+
+
+#endif /* TLCLIENT_H_ */
+
+/******************************************************************************
+ End of file (tlclient.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Transport Layer kernel side API implementation.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport Layer functions available to driver components in
+ the driver.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "devicemem.h"
+
+#include "pvrsrv_tlcommon.h"
+#include "tlintern.h"
+
+/*
+ * Make functions
+ */
+PTL_STREAM_DESC
+TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3)
+{
+ PTL_STREAM_DESC ps = OSAllocZMem(sizeof(TL_STREAM_DESC));
+ if (ps == NULL)
+ {
+ return NULL;
+ }
+ ps->psNode = f1;
+ ps->ui32Flags = f2;
+ ps->hReadEvent = f3;
+ ps->uiRefCount = 1;
+ return ps;
+}
+
+PTL_SNODE
+TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4)
+{
+ PTL_SNODE ps = OSAllocZMem(sizeof(TL_SNODE));
+ if (ps == NULL)
+ {
+ return NULL;
+ }
+ ps->hReadEventObj = f2;
+ ps->psStream = f3;
+ ps->psRDesc = f4;
+ f3->psNode = ps;
+ return ps;
+}
+
+/*
+ * Transport Layer Global top variables and functions
+ */
+static TL_GLOBAL_DATA sTLGlobalData = { 0 };
+
+TL_GLOBAL_DATA *TLGGD(void) // TLGetGlobalData()
+{
+ return &sTLGlobalData;
+}
+
+/* TLInit must only be called once at driver initialisation for one device.
+ * An assert is provided to check this condition on debug builds.
+ */
+PVRSRV_ERROR
+TLInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDevNode);
+
+ /*
+ * The Transport Layer is designed to work in a single device system but
+ * this function will be called multiple times in a multi-device system.
+ * Return an error in this case.
+ */
+ if (sTLGlobalData.psRgxDevNode)
+ {
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+ /* Store the RGX device node for later use in devmem buffer allocations */
+ sTLGlobalData.psRgxDevNode = (void*)psDevNode;
+
+ /* Allocate a lock for TL global data, to be used while updating the TL data.
+ * This is for making TL global data muti-thread safe */
+ eError = OSLockCreate (&sTLGlobalData.hTLGDLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Allocate the event object used to signal global TL events such as
+ * new stream created */
+ eError = OSEventObjectCreate("TLGlobalEventObj", &sTLGlobalData.hTLEventObj);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ PVR_DPF_RETURN_OK;
+
+/* Don't allow the driver to start up on error */
+e1:
+ OSLockDestroy (sTLGlobalData.hTLGDLock);
+ sTLGlobalData.hTLGDLock = NULL;
+e0:
+ PVR_DPF_RETURN_RC (eError);
+}
+
+static void RemoveAndFreeStreamNode(PTL_SNODE psRemove)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE* last;
+ PTL_SNODE psn;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ // Unlink the stream node from the master list
+ PVR_ASSERT(psGD->psHead);
+ last = &psGD->psHead;
+ for (psn = psGD->psHead; psn; psn=psn->psNext)
+ {
+ if (psn == psRemove)
+ {
+ /* Other calling code may have freed and zero'd the pointers */
+ if (psn->psRDesc)
+ {
+ OSFreeMem(psn->psRDesc);
+ psn->psRDesc = NULL;
+ }
+ if (psn->psStream)
+ {
+ OSFreeMem(psn->psStream);
+ psn->psStream = NULL;
+ }
+ *last = psn->psNext;
+ break;
+ }
+ last = &psn->psNext;
+ }
+
+ // Release the event list object owned by the stream node
+ if (psRemove->hReadEventObj)
+ {
+ eError = OSEventObjectDestroy(psRemove->hReadEventObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+
+ psRemove->hReadEventObj = NULL;
+ }
+
+ // Release the memory of the stream node
+ OSFreeMem(psRemove);
+
+ PVR_DPF_RETURN;
+}
+
+void
+TLDeInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVR_DPF_ENTERED;
+
+ if (sTLGlobalData.psRgxDevNode != psDevNode)
+ {
+ PVR_DPF_RETURN;
+ }
+
+ if (sTLGlobalData.uiClientCnt)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLDeInit transport layer but %d client streams are still connected", sTLGlobalData.uiClientCnt));
+ sTLGlobalData.uiClientCnt = 0;
+ }
+
+ /* Clean up the SNODE list */
+ if (sTLGlobalData.psHead)
+ {
+ while (sTLGlobalData.psHead)
+ {
+ RemoveAndFreeStreamNode(sTLGlobalData.psHead);
+ }
+ /* Leave psHead NULL on loop exit */
+ }
+
+ /* Clean up the TL global event object */
+ if (sTLGlobalData.hTLEventObj)
+ {
+ OSEventObjectDestroy(sTLGlobalData.hTLEventObj);
+ sTLGlobalData.hTLEventObj = NULL;
+ }
+
+ /* Destroy the TL global data lock */
+ if (sTLGlobalData.hTLGDLock)
+ {
+ OSLockDestroy (sTLGlobalData.hTLGDLock);
+ sTLGlobalData.hTLGDLock = NULL;
+ }
+
+ sTLGlobalData.psRgxDevNode = NULL;
+
+ PVR_DPF_RETURN;
+}
+
+PVRSRV_DEVICE_NODE*
+TLGetGlobalRgxDevice(void)
+{
+ PVRSRV_DEVICE_NODE *p = (PVRSRV_DEVICE_NODE*)(TLGGD()->psRgxDevNode);
+ if (!p)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLGetGlobalRgxDevice() NULL node ptr, TL " \
+ "can not be used when no RGX device has been found"));
+ PVR_ASSERT(p);
+ }
+ return p;
+}
+
+void TLAddStreamNode(PTL_SNODE psAdd)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psAdd);
+ psAdd->psNext = TLGGD()->psHead;
+ TLGGD()->psHead = psAdd;
+
+ PVR_DPF_RETURN;
+}
+
+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psn;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(pszName);
+
+ for (psn = psGD->psHead; psn; psn=psn->psNext)
+ {
+ if (psn->psStream && OSStringCompare(psn->psStream->szName, pszName)==0)
+ {
+ PVR_DPF_RETURN_VAL(psn);
+ }
+ }
+
+ PVR_DPF_RETURN_VAL(NULL);
+}
+
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psn;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDesc);
+
+ for (psn = psGD->psHead; psn; psn=psn->psNext)
+ {
+ if (psn->psRDesc == psDesc || psn->psWDesc == psDesc)
+ {
+ PVR_DPF_RETURN_VAL(psn);
+ }
+ }
+ PVR_DPF_RETURN_VAL(NULL);
+}
+
+static inline IMG_BOOL IsDigit(IMG_CHAR c)
+{
+ return c >= '0' && c <= '9';
+}
+
+static inline IMG_BOOL ReadNumber(const IMG_CHAR *pszBuffer,
+ IMG_UINT32 *pui32Number)
+{
+ IMG_CHAR acTmp[11] = {0}; // max 10 digits
+ IMG_UINT32 ui32Result;
+ IMG_UINT i;
+
+ for (i = 0; i < sizeof(acTmp) - 1; i++)
+ {
+ if (!IsDigit(*pszBuffer))
+ break;
+ acTmp[i] = *pszBuffer++;
+ }
+
+ /* if there are no digits or there is something after the number */
+ if (i == 0 || *pszBuffer != '\0')
+ return IMG_FALSE;
+
+ if (OSStringToUINT32(acTmp, 10, &ui32Result) != PVRSRV_OK)
+ return IMG_FALSE;
+
+ *pui32Number = ui32Result;
+
+ return IMG_TRUE;
+}
+
+/**
+ * Matches pszPattern against pszName and stores results in pui32Numbers.
+ *
+ * @Input pszPattern this is a beginning part of the name string that should
+ * be followed by a number.
+ * @Input pszName name of the stream
+ * @Output pui32Number will contain numbers from stream's name end e.g.
+ * 1234 for name abc_1234
+ * @Return IMG_TRUE when a stream was found or IMG_FALSE if not
+ */
+static IMG_BOOL MatchNamePattern(const IMG_CHAR *pszNamePattern,
+ const IMG_CHAR *pszName,
+ IMG_UINT32 *pui32Number)
+{
+ IMG_UINT uiPatternLen;
+
+ uiPatternLen = OSStringLength(pszNamePattern);
+
+ if (OSStringNCompare(pszNamePattern, pszName, uiPatternLen) != 0)
+ return IMG_FALSE;
+
+ return ReadNumber(pszName + uiPatternLen, pui32Number);
+}
+
+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern,
+ IMG_UINT32 *pui32Streams,
+ IMG_UINT32 ui32Max)
+{
+ TL_GLOBAL_DATA *psGD = TLGGD();
+ PTL_SNODE psn;
+ IMG_UINT32 ui32Count = 0;
+
+ PVR_ASSERT(pszNamePattern);
+
+ for (psn = psGD->psHead; psn; psn = psn->psNext)
+ {
+ IMG_UINT32 ui32Number = 0;
+
+ if (!MatchNamePattern(pszNamePattern, psn->psStream->szName,
+ &ui32Number))
+ continue;
+
+ if (pui32Streams != NULL)
+ {
+ if (ui32Count > ui32Max)
+ break;
+
+ pui32Streams[ui32Count] = ui32Number;
+ }
+
+ ui32Count++;
+ }
+
+ return ui32Count;
+}
+
+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc)
+{
+ PTL_SNODE psn;
+
+ PVR_DPF_ENTERED;
+
+ psn = TLFindStreamNodeByDesc(psDesc);
+ if (psn == NULL)
+ PVR_DPF_RETURN_VAL(NULL);
+
+ PVR_ASSERT(psDesc == psn->psWDesc);
+
+ psn->uiWRefCount++;
+ psDesc->uiRefCount++;
+
+ PVR_DPF_RETURN_VAL(psn);
+}
+
+void TLReturnStreamNode(PTL_SNODE psNode)
+{
+ psNode->uiWRefCount--;
+ psNode->psWDesc->uiRefCount--;
+
+ PVR_ASSERT(psNode->uiWRefCount > 0);
+ PVR_ASSERT(psNode->psWDesc->uiRefCount > 0);
+}
+
+IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psRemove);
+
+ /* If there is a client connected to this stream, defer stream's deletion */
+ if (psRemove->psRDesc != NULL || psRemove->psWDesc != NULL)
+ {
+ PVR_DPF_RETURN_VAL (IMG_FALSE);
+ }
+
+ /* Remove stream from TL_GLOBAL_DATA's list and free stream node */
+ psRemove->psStream = NULL;
+ RemoveAndFreeStreamNode(psRemove);
+
+ PVR_DPF_RETURN_VAL (IMG_TRUE);
+}
+
+IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psNodeToRemove,
+ PTL_STREAM_DESC psSD)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psNodeToRemove);
+ PVR_ASSERT(psSD);
+
+ /* Decrement reference count. For descriptor obtained by reader it must
+ * reach 0 (only single reader allowed) and for descriptors obtained by
+ * writers it must reach value greater or equal to 0 (multiple writers
+ * model). */
+ psSD->uiRefCount--;
+
+ if (psSD == psNodeToRemove->psRDesc)
+ {
+ PVR_ASSERT(0 == psSD->uiRefCount);
+ /* Remove stream descriptor (i.e. stream reader context) */
+ psNodeToRemove->psRDesc = NULL;
+ }
+ else if (psSD == psNodeToRemove->psWDesc)
+ {
+ PVR_ASSERT(0 <= psSD->uiRefCount);
+
+ psNodeToRemove->uiWRefCount--;
+
+ /* Remove stream descriptor if reference == 0 */
+ if (0 == psSD->uiRefCount)
+ {
+ psNodeToRemove->psWDesc = NULL;
+ }
+ }
+
+ /* Do not Free Stream Node if there is a write reference (a producer
+ * context) to the stream */
+ if (NULL != psNodeToRemove->psRDesc || NULL != psNodeToRemove->psWDesc ||
+ 0 != psNodeToRemove->uiWRefCount)
+ {
+ PVR_DPF_RETURN_VAL (IMG_FALSE);
+ }
+
+ /* Make stream pointer NULL to prevent it from being destroyed in
+ * RemoveAndFreeStreamNode Cleanup of stream should be done by the calling
+ * context */
+ psNodeToRemove->psStream = NULL;
+ RemoveAndFreeStreamNode(psNodeToRemove);
+
+ PVR_DPF_RETURN_VAL (IMG_TRUE);
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Transport Layer internals
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport Layer header used by TL internally
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __TLINTERN_H__
+#define __TLINTERN_H__
+
+
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+#include "device.h"
+#include "lock.h"
+
+/* Forward declarations */
+typedef struct _TL_SNODE_* PTL_SNODE;
+
+/*! TL stream structure container.
+ * pbyBuffer holds the circular buffer.
+ * ui32Read points to the beginning of the buffer, ie to where data to
+ * Read begin.
+ * ui32Write points to the end of data that have been committed, ie this is
+ * where new data will be written.
+ * ui32Pending number of bytes reserved in last reserve call which have not
+ * yet been submitted. Therefore these data are not ready to
+ * be transported.
+ * hStreamLock - provides atomic protection for the ui32Pending & ui32Write
+ * members of the structure for when they are checked and/or
+ * updated in the context of a stream writer (producer)
+ * calling DoTLStreamReserve() & TLStreamCommit().
+ * - Reader context is not multi-threaded, only one client per
+ * stream is allowed. Also note the read context may be in an
+ * ISR which prevents a design where locks can be held in the
+ * AcquireData/ReleaseData() calls. Thus this lock only
+ * protects the stream members from simultaneous writers.
+ *
+ * ui32Read < ui32Write <= ui32Pending
+ * where < and <= operators are overloaded to make sense in a circular way.
+ */
+typedef struct _TL_STREAM_
+{
+ IMG_CHAR szName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; /*!< String name identifier */
+ IMG_BOOL bDrop; /*!< Flag: When buffer is full drop new data instead of
+ overwriting older data */
+ IMG_BOOL bBlock; /*!< Flag: When buffer is full reserve will block until there is
+ enough free space in the buffer to fullfil the request. */
+ IMG_BOOL bWaitForEmptyOnDestroy; /*!< Flag: On destroying a non empty stream block until
+ stream is drained. */
+ IMG_BOOL bNoSignalOnCommit; /*!< Flag: Used to avoid the TL signalling waiting consumers
+ that new data is available on every commit. Producers
+ using this flag will need to manually signal when
+ appropriate using the TLStreamSync() API */
+
+ void (*pfOnReaderOpenCallback)(void *); /*!< Optional on reader connect callback */
+ void *pvOnReaderOpenUserData; /*!< On reader connect user data */
+ void (*pfProducerCallback)(void); /*!< Optional producer callback of type TL_STREAM_SOURCECB */
+ void *pvProducerUserData; /*!< Producer callback user data */
+
+ volatile IMG_UINT32 ui32Read; /*!< Pointer to the beginning of available data */
+ volatile IMG_UINT32 ui32Write; /*!< Pointer to already committed data which are ready to be
+ copied to user space*/
+ IMG_UINT32 ui32BufferUt; /*!< Buffer utilisation high watermark, see
+ * TL_BUFFER_UTILIZATION in tlstream.c */
+ IMG_UINT32 ui32Pending; /*!< Count pending bytes reserved in buffer */
+ IMG_UINT32 ui32Size; /*!< Buffer size */
+ IMG_BYTE *pbyBuffer; /*!< Actual data buffer */
+
+ PTL_SNODE psNode; /*!< Ptr to parent stream node */
+ DEVMEM_MEMDESC *psStreamMemDesc; /*!< MemDescriptor used to allocate buffer space through PMR */
+
+ IMG_HANDLE hProducerEvent; /*!< Handle to wait on if there is not enough space */
+ IMG_HANDLE hProducerEventObj; /*!< Handle to signal blocked reserve calls */
+
+ POS_LOCK hStreamLock; /*!< Writers Lock for ui32Pending & ui32Write*/
+} TL_STREAM, *PTL_STREAM;
+
+/* there need to be enough space reserved in the buffer for 2 minimal packets
+ * and it needs to be aligned the same way the buffer is or there will be a
+ * compile error.*/
+#define BUFFER_RESERVED_SPACE 2*PVRSRVTL_PACKET_ALIGNMENT
+
+/* ensure the space reserved follows the buffer's alignment */
+static_assert(!(BUFFER_RESERVED_SPACE&(PVRSRVTL_PACKET_ALIGNMENT-1)),
+ "BUFFER_RESERVED_SPACE must be a multiple of PVRSRVTL_PACKET_ALIGNMENT");
+
+/* Define the largest value that a uint that matches the
+ * PVRSRVTL_PACKET_ALIGNMENT size can hold */
+#define MAX_UINT 0xffffFFFF
+
+/*! Defines the value used for TL_STREAM.ui32Pending when no reserve is
+ * outstanding on the stream. */
+#define NOTHING_PENDING IMG_UINT32_MAX
+
+
+/*
+ * Transport Layer Stream Descriptor types/defs
+ */
+typedef struct _TL_STREAM_DESC_
+{
+ PTL_SNODE psNode; /*!< Ptr to parent stream node */
+ IMG_UINT32 ui32Flags;
+ IMG_HANDLE hReadEvent; /*!< For wait call (only used/set in reader descriptors) */
+ IMG_INT uiRefCount; /*!< Reference count to the SD */
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+PTL_STREAM_DESC TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3);
+
+#define TL_STREAM_KM_FLAG_MASK 0xFFFF0000
+#define TL_STREAM_FLAG_TEST 0x10000000
+#define TL_STREAM_FLAG_WRAPREAD 0x00010000
+
+#define TL_STREAM_UM_FLAG_MASK 0x0000FFFF
+
+/*
+ * Transport Layer stream list node
+ */
+typedef struct _TL_SNODE_
+{
+ struct _TL_SNODE_* psNext; /*!< Linked list next element */
+ IMG_HANDLE hReadEventObj; /*!< Readers 'wait for data' event */
+ PTL_STREAM psStream; /*!< TL Stream object */
+ IMG_INT uiWRefCount; /*!< Stream writer reference count */
+ PTL_STREAM_DESC psRDesc; /*!< Stream reader 0 or ptr only */
+ PTL_STREAM_DESC psWDesc; /*!< Stream writer 0 or ptr only */
+} TL_SNODE;
+
+PTL_SNODE TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4);
+
+/*
+ * Transport Layer global top types and variables
+ * Use access function to obtain pointer.
+ *
+ * hTLGDLock - provides atomicity over read/check/write operations and
+ * sequence of operations on uiClientCnt, psHead list of SNODEs and
+ * the immediate members in a list element SNODE structure.
+ * - This larger scope of responsibility for this lock helps avoid
+ * the need for a lock in the SNODE structure.
+ * - Lock held in the client (reader) context when streams are
+ * opened/closed and in the server (writer) context when streams
+ * are created/open/closed.
+ */
+typedef struct _TL_GDATA_
+{
+ void *psRgxDevNode; /* Device node to use for buffer allocations */
+ IMG_HANDLE hTLEventObj; /* Global TL signal object, new streams, etc */
+
+ IMG_UINT uiClientCnt; /* Counter to track the number of client stream connections. */
+ PTL_SNODE psHead; /* List of TL streams and associated client handle */
+
+ POS_LOCK hTLGDLock; /* Lock for structure AND psHead SNODE list */
+} TL_GLOBAL_DATA, *PTL_GLOBAL_DATA;
+
+/*
+ * Transport Layer Internal Kernel-Mode Server API
+ */
+TL_GLOBAL_DATA* TLGGD(void); /* TLGetGlobalData() */
+
+PVRSRV_ERROR TLInit(PVRSRV_DEVICE_NODE *psDevNode);
+void TLDeInit(PVRSRV_DEVICE_NODE *psDevNode);
+
+PVRSRV_DEVICE_NODE* TLGetGlobalRgxDevice(void);
+
+void TLAddStreamNode(PTL_SNODE psAdd);
+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName);
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc);
+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern,
+ IMG_UINT32 *pui32Streams,
+ IMG_UINT32 ui32Max);
+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc);
+void TLReturnStreamNode(PTL_SNODE psNode);
+
+/****************************************************************************************
+ Function Name : TLTryRemoveStreamAndFreeStreamNode
+
+ Inputs : PTL_SNODE Pointer to the TL_SNODE whose stream is requested
+ to be removed from TL_GLOBAL_DATA's list
+
+ Return Value : IMG_TRUE - If the stream was made NULL and this
+ TL_SNODE was removed from the
+ TL_GLOBAL_DATA's list
+
+ IMG_FALSE - If the stream wasn't made NULL as there
+ is a client connected to this stream
+
+ Description : If there is no client currently connected to this stream then,
+ This function removes this TL_SNODE from the
+ TL_GLOBAL_DATA's list. The caller is responsible for the
+ cleanup of the TL_STREAM whose TL_SNODE may be removed
+
+ Otherwise, this function does nothing
+*****************************************************************************************/
+IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove);
+
+/*****************************************************************************************
+ Function Name : TLUnrefDescAndTryFreeStreamNode
+
+ Inputs : PTL_SNODE Pointer to the TL_SNODE whose descriptor is
+ requested to be removed
+ : PTL_STREAM_DESC Pointer to the STREAM_DESC
+
+ Return Value : IMG_TRUE - If this TL_SNODE was removed from the
+ TL_GLOBAL_DATA's list
+
+ IMG_FALSE - Otherwise
+
+ Description : This function removes the stream descriptor from this TL_SNODE
+ and,
+ If there is no writer (producer context) currently bound to this stream,
+ This function removes this TL_SNODE from the
+ TL_GLOBAL_DATA's list. The caller is responsible for the
+ cleanup of the TL_STREAM whose TL_SNODE may be removed
+******************************************************************************************/
+IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psRemove, PTL_STREAM_DESC psSD);
+
+/*
+ * Transport Layer stream interface to server part declared here to avoid
+ * circular dependency.
+ */
+IMG_UINT32 TLStreamAcquireReadPos(PTL_STREAM psStream, IMG_UINT32* puiReadOffset);
+void TLStreamAdvanceReadPos(PTL_STREAM psStream, IMG_UINT32 uiReadLen);
+
+DEVMEM_MEMDESC* TLStreamGetBufferPointer(PTL_STREAM psStream);
+IMG_BOOL TLStreamEOS(PTL_STREAM psStream);
+
+/****************************************************************************************
+ Function Name : TLStreamDestroy
+
+ Inputs : PTL_STREAM Pointer to the TL_STREAM to be destroyed
+
+ Description : This function performs all the clean-up operations required for
+ destruction of this stream
+*****************************************************************************************/
+void TLStreamDestroy (PTL_STREAM);
+
+/*
+ * Test related functions
+ */
+PVRSRV_ERROR TUtilsInit (PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR TUtilsDeinit (PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* __TLINTERN_H__ */
+/******************************************************************************
+ End of file (tlintern.h)
+******************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title KM server Transport Layer implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main bridge APIs for Transport Layer client functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <stddef.h>
+
+#include "img_defs.h"
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "connection_server.h"
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "tlintern.h"
+#include "tlstream.h"
+#include "tlserver.h"
+
+#define NO_STREAM_WAIT_PERIOD 2000000ULL
+#define NO_DATA_WAIT_PERIOD 1000000ULL
+#define NO_ACQUIRE 0xffffffffU
+
+#include "rgxhwperf.h"
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+PVRSRV_ERROR
+TLServerOpenStreamKM(const IMG_CHAR* pszName,
+ IMG_UINT32 ui32Mode,
+ PTL_STREAM_DESC* ppsSD,
+ PMR** ppsTLPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eErrorEO = PVRSRV_OK;
+ PTL_SNODE psNode = 0;
+ TL_STREAM_DESC* psNewSD = 0;
+ IMG_HANDLE hEvent;
+ IMG_BOOL bIsWriteOnly = ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ?
+ IMG_TRUE : IMG_FALSE;
+ PTL_GLOBAL_DATA psGD = TLGGD();
+
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+ PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (%s, %x)", __func__, __LINE__, pszName, ui32Mode));
+#endif
+
+ PVR_ASSERT(pszName);
+
+ /* Acquire TL_GLOBAL_DATA lock here, as if the following TLFindStreamNodeByName
+ * returns NON NULL PTL_SNODE, we try updating the global data client count and
+ * PTL_SNODE's psRDesc and we want to make sure the TL_SNODE is valid (eg. has
+ * not been deleted) while we are updating it
+ */
+ OSLockAcquire (psGD->hTLGDLock);
+
+ psNode = TLFindStreamNodeByName(pszName);
+ if ((psNode == NULL) && (ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT))
+ { /* Blocking code to wait for stream to be created if it does not exist */
+ eError = OSEventObjectOpen(psGD->hTLEventObj, &hEvent);
+ PVR_LOGG_IF_ERROR (eError, "OSEventObjectOpen", e0);
+
+ do
+ {
+ if ((psNode = TLFindStreamNodeByName(pszName)) == NULL)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "Stream %s does not exist, waiting...", pszName));
+
+ /* Release TL_GLOBAL_DATA lock before sleeping */
+ OSLockRelease (psGD->hTLGDLock);
+
+ /* Will exit OK or with timeout, both cases safe to ignore */
+ eErrorEO = OSEventObjectWaitTimeout(hEvent, NO_STREAM_WAIT_PERIOD);
+
+ /* Acquire lock after waking up */
+ OSLockAcquire (psGD->hTLGDLock);
+ }
+ }
+ while ((psNode == NULL) && (eErrorEO == PVRSRV_OK));
+
+ eError = OSEventObjectClose(hEvent);
+ PVR_LOGG_IF_ERROR (eError, "OSEventObjectClose", e0);
+ }
+
+ /* Make sure we have found a stream node after wait/search */
+ if (psNode == NULL)
+ {
+ /* Did we exit the wait with timeout, inform caller */
+ if (eErrorEO == PVRSRV_ERROR_TIMEOUT)
+ {
+ eError = eErrorEO;
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_NOT_FOUND;
+ PVR_DPF((PVR_DBG_ERROR, "Stream \"%s\" does not exist", pszName));
+ }
+ goto e0;
+ }
+
+ /* Allocate memory for the stream. The memory will be allocated with the
+ * first call. */
+ eError = TLAllocSharedMemIfNull(psNode->psStream);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate memory for stream"
+ " \"%s\"", pszName));
+ return eError;
+ }
+
+ if (bIsWriteOnly)
+ {
+
+ /* If psWDesc == NULL it means that this is the first attempt
+ * to open stream for write. If yes create the descriptor or increment
+ * reference count otherwise. */
+ if (psNode->psWDesc == NULL)
+ {
+ psNewSD = TLMakeStreamDesc(psNode, ui32Mode, NULL);
+ psNode->psWDesc = psNewSD;
+ }
+ else
+ {
+ psNewSD = psNode->psWDesc;
+ psNode->psWDesc->uiRefCount++;
+ }
+
+ if (!psNewSD)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream"
+ " writer descriptor"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ psNode->uiWRefCount++;
+ }
+ else
+ {
+ // Only one reader per stream supported
+ if (psNode->psRDesc != NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Cannot open \"%s\" stream, stream already"
+ " opened", pszName));
+ eError = PVRSRV_ERROR_ALREADY_OPEN;
+ goto e0;
+ }
+
+ // Create an event handle for this client to wait on when no data in
+ // stream buffer.
+ eError = OSEventObjectOpen(psNode->hReadEventObj, &hEvent);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Not possible to open node's event object"));
+ eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+ goto e0;
+ }
+
+ psNewSD = TLMakeStreamDesc(psNode, ui32Mode, hEvent);
+ psNode->psRDesc = psNewSD;
+
+ if (!psNewSD)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream descriptor"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE,
+ "TLServerOpenStreamKM evList=%p, evObj=%p",
+ psNode->hReadEventObj,
+ psNode->psRDesc->hReadEvent));
+ }
+
+ // Copy the import handle back to the user mode API to enable access to
+ // the stream buffer from user-mode process.
+ eError = DevmemLocalGetImportHandle(TLStreamGetBufferPointer(psNode->psStream), (void**) ppsTLPMR);
+ PVR_LOGG_IF_ERROR(eError, "DevmemLocalGetImportHandle", e2);
+
+ psGD->uiClientCnt++;
+
+ /* Global data updated. Now release global lock */
+ OSLockRelease (psGD->hTLGDLock);
+
+ *ppsSD = psNewSD;
+
+ /* This callback is executed only on reader open. There are some actions
+ * executed on reader open that don't make much sense for writers e.g.
+ * injection on time synchronisation packet into the stream. */
+ if (!bIsWriteOnly && psNode->psStream->pfOnReaderOpenCallback != NULL)
+ {
+ psNode->psStream->pfOnReaderOpenCallback(
+ psNode->psStream->pvOnReaderOpenUserData);
+ }
+
+ if (bIsWriteOnly)
+ {
+ /* Sending HWPerf event from TL is a temporary solution and this
+ * will change once TL is expanded by event allowing to signal
+ * stream opening. */
+ RGX_HWPERF_HOST_CTRL(CLIENT_STREAM_OPEN,
+ OSGetCurrentClientProcessIDKM());
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Stream %s opened for %s", __func__, pszName,
+ ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? "write" : "read"));
+
+ PVR_DPF_RETURN_OK;
+
+e2:
+ OSFreeMem(psNewSD);
+e1:
+ OSEventObjectClose(hEvent);
+e0:
+ OSLockRelease (psGD->hTLGDLock);
+ PVR_DPF_RETURN_RC (eError);
+}
+
+PVRSRV_ERROR
+TLServerCloseStreamKM(PTL_STREAM_DESC psSD)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PTL_GLOBAL_DATA psGD = TLGGD();
+ PTL_SNODE psNode = 0;
+ PTL_STREAM psStream;
+ IMG_BOOL bDestroyStream;
+ IMG_BOOL bIsWriteOnly = psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO ?
+ IMG_TRUE : IMG_FALSE;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psSD);
+
+ // Sanity check, quick exit if there are no streams
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ // Check stream still valid
+ psNode = TLFindStreamNodeByDesc(psSD);
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ /* Since the descriptor is valid, the stream should not have been made NULL */
+ PVR_ASSERT (psNode->psStream);
+
+ /* Save the stream's reference in-case its destruction is required after this
+ * client is removed */
+ psStream = psNode->psStream;
+
+ /* Acquire TL_GLOBAL_DATA lock as the following TLRemoveDescAndTryFreeStreamNode
+ * call will update the TL_SNODE's descriptor value */
+ OSLockAcquire (psGD->hTLGDLock);
+
+ /* Close event handle because event object list might be destroyed in
+ * TLUnrefDescAndTryFreeStreamNode(). */
+ if (!bIsWriteOnly)
+ {
+ // Close and free the event handle resource used by this descriptor
+ eError = OSEventObjectClose(psSD->hReadEvent);
+ if (eError != PVRSRV_OK)
+ {
+ // Log error but continue as it seems best
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectClose() failed error %d",
+ eError));
+ eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+ }
+ }
+ else
+ {
+ /* Sending HWPerf event from TL is a temporary solution and this
+ * will change once TL is expanded by event allowing to signal
+ * stream closing. */
+ RGX_HWPERF_HOST_CTRL(CLIENT_STREAM_CLOSE,
+ OSGetCurrentClientProcessIDKM());
+ }
+
+ // Remove descriptor from stream object/list
+ bDestroyStream = TLUnrefDescAndTryFreeStreamNode (psNode, psSD);
+
+ // Assert the counter is sane after input data validated.
+ PVR_ASSERT(psGD->uiClientCnt > 0);
+ psGD->uiClientCnt--;
+
+ OSLockRelease (psGD->hTLGDLock);
+
+ /* Destroy the stream if its TL_SNODE was removed from TL_GLOBAL_DATA */
+ if (bDestroyStream)
+ {
+ TLStreamDestroy (psStream);
+ psStream = NULL;
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE, "%s: Stream closed", __func__));
+
+ /* Free the descriptor if ref count reaches 0. */
+ if (psSD->uiRefCount == 0)
+ {
+ // Free the stream descriptor object
+ OSFreeMem(psSD);
+ }
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerReserveStreamKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32* ui32BufferOffset,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32* pui32Available)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psNode = 0;
+ IMG_UINT8* pui8Buffer = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psSD);
+
+ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ // Sanity check, quick exit if there are no streams
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+ }
+
+ /* Acquire the global lock. We have to be sure that no one modifies
+ * the list while we are looking for our stream. */
+ OSLockAcquire(psGD->hTLGDLock);
+ // Check stream still valid
+ psNode = TLFindAndGetStreamNodeByDesc(psSD);
+ OSLockRelease(psGD->hTLGDLock);
+
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+
+ /* Since we have a valid stream descriptor, the stream should not have been
+ * made NULL by any producer context. */
+ PVR_ASSERT (psNode->psStream);
+
+ eError = TLStreamReserve2(psNode->psStream, &pui8Buffer, ui32Size,
+ ui32SizeMin, pui32Available);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to reserve the stream (%d).", eError));
+ }
+ else if (pui8Buffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Not enough space in the stream."));
+ eError = PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG;
+ }
+ else
+ {
+ *ui32BufferOffset = pui8Buffer - psNode->psStream->pbyBuffer;
+ PVR_ASSERT(*ui32BufferOffset < psNode->psStream->ui32Size);
+ }
+
+ OSLockAcquire(psGD->hTLGDLock);
+ TLReturnStreamNode(psNode);
+ OSLockRelease(psGD->hTLGDLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerCommitStreamKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 ui32Size)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psNode = 0;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psSD);
+
+ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ // Sanity check, quick exit if there are no streams
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+ }
+
+ /* Acquire the global lock. We have to be sure that no one modifies
+ * the list while we are looking for our stream. */
+ OSLockAcquire(psGD->hTLGDLock);
+ // Check stream still valid
+ psNode = TLFindAndGetStreamNodeByDesc(psSD);
+ OSLockRelease(psGD->hTLGDLock);
+
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ /* Since we have a valid stream descriptor, the stream should not have been
+ * made NULL by any producer context. */
+ PVR_ASSERT (psNode->psStream);
+
+ eError = TLStreamCommit(psNode->psStream, ui32Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to commit data into stream."));
+ }
+
+ OSLockAcquire(psGD->hTLGDLock);
+ TLReturnStreamNode(psNode);
+ OSLockRelease(psGD->hTLGDLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern,
+ IMG_UINT32 ui32Max,
+ IMG_UINT32 *pui32Streams,
+ IMG_UINT32 *pui32NumFound)
+{
+ if (*pszNamePattern == '\0')
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ // Sanity check, quick exit if there are no streams
+ if (TLGGD()->psHead == NULL)
+ {
+ *pui32NumFound = 0;
+ return PVRSRV_OK;
+ }
+
+ OSLockAcquire(TLGGD()->hTLGDLock);
+ *pui32NumFound = TLDiscoverStreamNodes(pszNamePattern, pui32Streams,
+ ui32Max);
+ OSLockRelease(TLGGD()->hTLGDLock);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32* puiReadOffset,
+ IMG_UINT32* puiReadLen)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ IMG_UINT32 uiTmpOffset = NO_ACQUIRE;
+ IMG_UINT32 uiTmpLen = 0;
+ PTL_SNODE psNode = 0;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psSD);
+
+ // Sanity check, quick exit if there are no streams
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+ }
+
+ // Check stream still valid
+ psNode = TLFindStreamNodeByDesc(psSD);
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ /* If we are here, the stream will never be made NULL until this context itself
+ * calls TLRemoveDescAndTryFreeStreamNode(). This is because the producer will
+ * fail to make the stream NULL (by calling TLTryRemoveStreamAndFreeStreamNode)
+ * when a valid stream descriptor is present (i.e. a client is connected).
+ * Hence, no checks for stream being NON NULL are required after this. */
+ PVR_ASSERT (psNode->psStream);
+
+ //PVR_DPF((PVR_DBG_VERBOSE, "TLServerAcquireDataKM evList=%p, evObj=%p", psSD->psNode->hReadEventObj, psSD->hReadEvent));
+
+ /* Check for data in the associated stream buffer, sleep/wait if none */
+ while (((uiTmpLen = TLStreamAcquireReadPos(psNode->psStream, &uiTmpOffset)) == 0) &&
+ (!(psSD->ui32Flags&PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) )
+ {
+ PVR_DPF((PVR_DBG_VERBOSE, "TLAcquireDataKM sleeping..."));
+
+ // Loop around if EndOfStream (nothing to read) and wait times out,
+ // exit loop if not time out but data is ready for client
+ while (TLStreamEOS(psNode->psStream))
+ {
+ eError = OSEventObjectWaitTimeout(psSD->hReadEvent, NO_DATA_WAIT_PERIOD);
+ if (eError != PVRSRV_OK)
+ {
+ /* Return timeout or other error condition to the caller who
+ * can choose to call again if desired. We don't block
+ * Indefinitely as we want the user mode application to have a
+ * chance to break out and end if it needs to, so we return the
+ * time out error code. */
+ PVR_DPF((PVR_DBG_VERBOSE, "TL Server timed out"));
+ PVR_DPF_RETURN_RC(eError);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_VERBOSE, "TL Server signalled"));
+ }
+ }
+ }
+
+ /* Data available now if we reach here in blocking more or we take the
+ * values as is in non-blocking mode which might be all zeros. */
+ *puiReadOffset = uiTmpOffset;
+ *puiReadLen = uiTmpLen;
+
+ PVR_DPF((PVR_DBG_VERBOSE, "TLAcquireDataKM return offset=%d, len=%d bytes", *puiReadOffset, *puiReadLen));
+
+ PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 uiReadOffset,
+ IMG_UINT32 uiReadLen)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psNode = 0;
+
+ PVR_DPF_ENTERED;
+
+ /* Unreferenced in release builds */
+ PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+
+ PVR_ASSERT(psSD);
+
+ // Sanity check, quick exit if there are no streams
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+ }
+
+ // Check stream still valid
+ psNode = TLFindStreamNodeByDesc(psSD);
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ /* Since we have a valid stream descriptor, the stream should not have been
+ * made NULL by any producer context. */
+ PVR_ASSERT (psNode->psStream);
+
+ PVR_DPF((PVR_DBG_VERBOSE, "TLReleaseDataKM uiReadOffset=%d, uiReadLen=%d", uiReadOffset, uiReadLen));
+
+ // Move read position on to free up space in stream buffer
+ TLStreamAdvanceReadPos(psNode->psStream, uiReadLen);
+
+ PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLServerWriteDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE* pui8Data)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psNode = 0;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psSD);
+
+ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ // Sanity check, quick exit if there are no streams
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+ }
+
+ OSLockAcquire(psGD->hTLGDLock);
+ // Check stream still valid
+ psNode = TLFindAndGetStreamNodeByDesc(psSD);
+ OSLockRelease(psGD->hTLGDLock);
+
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ /* Since we have a valid stream descriptor, the stream should not have been
+ * made NULL by any producer context. */
+ PVR_ASSERT (psNode->psStream);
+
+ eError = TLStreamWrite(psNode->psStream, pui8Data, ui32Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to write data to the stream (%d).",
+ eError));
+ }
+
+ OSLockAcquire(psGD->hTLGDLock);
+ TLReturnStreamNode(psNode);
+ OSLockRelease(psGD->hTLGDLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+/*****************************************************************************
+ End of file (tlserver.c)
+*****************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title KM server Transport Layer implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main bridge APIs for Transport Layer client functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __TLSERVER_H_
+#define __TLSERVER_H_
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+
+#include "tlintern.h"
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+
+PVRSRV_ERROR TLServerConnectKM(CONNECTION_DATA *psConnection);
+PVRSRV_ERROR TLServerDisconnectKM(CONNECTION_DATA *psConnection);
+
+PVRSRV_ERROR TLServerOpenStreamKM(const IMG_CHAR* pszName,
+ IMG_UINT32 ui32Mode,
+ PTL_STREAM_DESC* ppsSD,
+ PMR** ppsTLPMR);
+
+PVRSRV_ERROR TLServerCloseStreamKM(PTL_STREAM_DESC psSD);
+
+PVRSRV_ERROR TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern,
+ IMG_UINT32 ui32Max,
+ IMG_UINT32 *pui32Streams,
+ IMG_UINT32 *pui32NumFound);
+
+PVRSRV_ERROR TLServerReserveStreamKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32* ui32BufferOffset,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32* pui32Available);
+
+PVRSRV_ERROR TLServerCommitStreamKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 ui32Size);
+
+PVRSRV_ERROR TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32* puiReadOffset,
+ IMG_UINT32* puiReadLen);
+
+PVRSRV_ERROR TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 uiReadOffset,
+ IMG_UINT32 uiReadLen);
+
+PVRSRV_ERROR TLServerWriteDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE *pui8Data);
+
+#endif /* __TLSERVER_H_ */
+
+/*****************************************************************************
+ End of file (tlserver.h)
+*****************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Transport Layer kernel side API implementation.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport Layer API implementation.
+ These functions are provided to driver components.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "devicemem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "log2.h"
+
+#include "pvrsrv_tlcommon.h"
+#include "tlintern.h"
+#include "tlstream.h"
+
+/* To debug buffer utilisation enable this macro here and
+ * define PVRSRV_NEED_PVR_TRACE in the server pvr_debug.c and in tutils.c
+ * before the inclusion of pvr_debug.h. Issue pvrtutils 6 on target to see
+ * stream buffer utilisation. */
+//#define TL_BUFFER_UTILIZATION 1
+
+#define EVENT_OBJECT_TIMEOUT_US 1000000ULL
+
+/* Given the state of the buffer it returns a number of bytes that the client
+ * can use for a successful allocation. */
+static INLINE IMG_UINT32 suggestAllocSize(IMG_UINT32 ui32LRead,
+ IMG_UINT32 ui32LWrite,
+ IMG_UINT32 ui32CBSize,
+ IMG_UINT32 ui32ReqSizeMin)
+{
+ IMG_UINT32 ui32AvSpace = 0;
+
+ /* This could be written in fewer lines using the ? operator but it
+ would not be kind to potential readers of this source at all. */
+ if ( ui32LRead > ui32LWrite ) /* Buffer WRAPPED */
+ {
+ if ( (ui32LRead - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+ {
+ ui32AvSpace = ui32LRead - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+ }
+ }
+ else /* Normal, no wrap */
+ {
+ if ( (ui32CBSize - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+ {
+ ui32AvSpace = ui32CBSize - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+ }
+ else if ( (ui32LRead - 0) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+ {
+ ui32AvSpace = ui32LRead - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+ }
+ }
+ /* The max size of a TL packet currently is UINT16. adjust accordingly */
+ return MIN(ui32AvSpace, PVRSRVTL_MAX_PACKET_SIZE);
+}
+
+/* Returns bytes left in the buffer. Negative if there is not any.
+ * two 4b aligned values are reserved, one for the write failed buffer flag
+ * and one to be able to distinguish the buffer full state to the buffer
+ * empty state.
+ * Always returns free space -8 even when the "write failed" packet may be
+ * already in the stream before this write. */
+static INLINE IMG_INT
+cbSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size)
+{
+ /* We need to reserve 4b (one packet) in the buffer to be able to tell empty
+ * buffers from full buffers and one more for packet write fail packet */
+ if ( ui32Read > ui32Write )
+ {
+ return (IMG_INT) ui32Read - (IMG_INT)ui32Write - (IMG_INT) BUFFER_RESERVED_SPACE;
+ }
+ else
+ {
+ return (IMG_INT)ui32size - ((IMG_INT)ui32Write - (IMG_INT)ui32Read) - (IMG_INT) BUFFER_RESERVED_SPACE;
+ }
+}
+
+PVRSRV_ERROR TLAllocSharedMemIfNull(IMG_HANDLE hStream)
+{
+ PTL_STREAM psStream = (PTL_STREAM) hStream;
+ PVRSRV_ERROR eError;
+
+ IMG_CHAR pszBufferLabel[PRVSRVTL_MAX_STREAM_NAME_SIZE + 20];
+ DEVMEM_FLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+ /* Exit if memory has already been allocated. */
+ if (psStream->pbyBuffer != NULL)
+ return PVRSRV_OK;
+
+ OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s",
+ psStream->szName);
+
+ eError = DevmemAllocateExportable((IMG_HANDLE) TLGetGlobalRgxDevice(),
+ (IMG_DEVMEM_SIZE_T) psStream->ui32Size,
+ (IMG_DEVMEM_ALIGN_T) OSGetPageSize(),
+ ExactLog2(OSGetPageSize()),
+ uiMemFlags,
+ pszBufferLabel,
+ &psStream->psStreamMemDesc);
+ PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+ eError = DevmemAcquireCpuVirtAddr(psStream->psStreamMemDesc,
+ (void**) &psStream->pbyBuffer);
+ PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e1);
+
+ return PVRSRV_OK;
+
+e1:
+ DevmemFree(psStream->psStreamMemDesc);
+e0:
+ return eError;
+}
+
+void TLFreeSharedMem(IMG_HANDLE hStream)
+{
+ PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+ if (psStream->pbyBuffer != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psStream->psStreamMemDesc);
+ psStream->pbyBuffer = NULL;
+ }
+ if (psStream->psStreamMemDesc != NULL)
+ {
+ DevmemFree(psStream->psStreamMemDesc);
+ psStream->psStreamMemDesc = NULL;
+ }
+}
+
+/*******************************************************************************
+ * TL Server public API implementation.
+ ******************************************************************************/
+PVRSRV_ERROR
+TLStreamCreate(IMG_HANDLE *phStream,
+ IMG_CHAR *szStreamName,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32StreamFlags,
+ TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB,
+ void *pvOnRederOpenUD,
+ TL_STREAM_SOURCECB pfProducerCB,
+ void *pvProducerUD)
+{
+ PTL_STREAM psTmp;
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hEventList;
+ PTL_SNODE psn = 0;
+
+ PVR_DPF_ENTERED;
+ /* Sanity checks: */
+ /* non NULL handler required */
+ if ( NULL == phStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+ if (szStreamName == NULL || *szStreamName == '\0' ||
+ OSStringLength(szStreamName) >= PRVSRVTL_MAX_STREAM_NAME_SIZE)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ /* Acquire TL_GLOBAL_DATA lock here because, if the following TLFindStreamNodeByName()
+ * returns NULL, a new TL_SNODE will be added to TL_GLOBAL_DATA's TL_SNODE list */
+ OSLockAcquire (TLGGD()->hTLGDLock);
+
+ /* Check if there already exists a stream with this name. */
+ psn = TLFindStreamNodeByName( szStreamName );
+ if ( NULL != psn )
+ {
+ eError = PVRSRV_ERROR_ALREADY_EXISTS;
+ goto e0;
+ }
+
+ /* Allocate stream structure container (stream struct) for the new stream */
+ psTmp = OSAllocZMem(sizeof(TL_STREAM)) ;
+ if ( NULL == psTmp )
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ OSStringCopy(psTmp->szName, szStreamName);
+
+ if ( ui32StreamFlags & TL_FLAG_FORCE_FLUSH )
+ {
+ psTmp->bWaitForEmptyOnDestroy = IMG_TRUE;
+ }
+
+ psTmp->bNoSignalOnCommit = (ui32StreamFlags&TL_FLAG_NO_SIGNAL_ON_COMMIT) ? IMG_TRUE : IMG_FALSE;
+
+ if ( ui32StreamFlags & TL_FLAG_RESERVE_DROP_NEWER )
+ {
+ if ( ui32StreamFlags & TL_FLAG_RESERVE_BLOCK )
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e1;
+ }
+ psTmp->bDrop = IMG_TRUE;
+ }
+ else if ( ui32StreamFlags & TL_FLAG_RESERVE_BLOCK )
+ { /* Additional synchronization object required for this kind of stream */
+ psTmp->bBlock = IMG_TRUE;
+ }
+
+ eError = OSEventObjectCreate(NULL, &psTmp->hProducerEventObj);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+ /* Create an event handle for this kind of stream */
+ eError = OSEventObjectOpen(psTmp->hProducerEventObj, &psTmp->hProducerEvent);
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ psTmp->pfOnReaderOpenCallback = pfOnReaderOpenCB;
+ psTmp->pvOnReaderOpenUserData = pvOnRederOpenUD;
+ /* Remember producer supplied CB and data for later */
+ psTmp->pfProducerCallback = (void(*)(void))pfProducerCB;
+ psTmp->pvProducerUserData = pvProducerUD;
+
+ /* Round the requested bytes to a multiple of array elements' size, eg round 3 to 4 */
+ psTmp->ui32Size = PVRSRVTL_ALIGN(ui32Size);
+ psTmp->ui32Read = 0;
+ psTmp->ui32Write = 0;
+ psTmp->ui32Pending = NOTHING_PENDING;
+
+ /* Memory will be allocated on first connect to the stream */
+ if (!(ui32StreamFlags & TL_FLAG_ALLOCATE_ON_FIRST_OPEN))
+ {
+ /* Allocate memory for the circular buffer and export it to user space. */
+ eError = TLAllocSharedMemIfNull(psTmp);
+ PVR_LOGG_IF_ERROR(eError, "TLAllocSharedMem", e3);
+ }
+
+ /* Synchronisation object to synchronise with user side data transfers. */
+ eError = OSEventObjectCreate(psTmp->szName, &hEventList);
+ if (eError != PVRSRV_OK)
+ {
+ goto e4;
+ }
+
+ eError = OSLockCreate (&psTmp->hStreamLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e5;
+ }
+
+ /* Now remember the stream in the global TL structures */
+ psn = TLMakeSNode(hEventList, (TL_STREAM *)psTmp, 0);
+ if (psn == NULL)
+ {
+ eError=PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e6;
+ }
+
+ /* Stream node created, now reset the write reference count to 1
+ * (i.e. this context's reference) */
+ psn->uiWRefCount = 1;
+
+ TLAddStreamNode(psn);
+
+ /* Release TL_GLOBAL_DATA lock as the new TL_SNODE is now added to the list */
+ OSLockRelease (TLGGD()->hTLGDLock);
+
+ /* Best effort signal, client wait timeout will ultimately let it find the
+ * new stream if this fails, acceptable to avoid cleanup as it is tricky
+ * at this point */
+ (void) OSEventObjectSignal(TLGGD()->hTLEventObj);
+
+ /* Pass the newly created stream handle back to caller */
+ *phStream = (IMG_HANDLE)psTmp;
+ PVR_DPF_RETURN_OK;
+
+e6:
+ OSLockDestroy(psTmp->hStreamLock);
+e5:
+ OSEventObjectDestroy(hEventList);
+e4:
+ TLFreeSharedMem(psTmp);
+e3:
+ OSEventObjectClose(psTmp->hProducerEvent);
+e2:
+ OSEventObjectDestroy(psTmp->hProducerEventObj);
+e1:
+ OSFreeMem(psTmp);
+e0:
+ OSLockRelease (TLGGD()->hTLGDLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLStreamReconfigure(
+ IMG_HANDLE hStream,
+ IMG_UINT32 ui32StreamFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PTL_STREAM psTmp;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == hStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ psTmp = (PTL_STREAM)hStream;
+
+ /* Prevent the TL Stream buffer from being written to
+ * while its mode is being reconfigured
+ */
+ OSLockAcquire (psTmp->hStreamLock);
+ if ( NOTHING_PENDING != psTmp->ui32Pending )
+ {
+ OSLockRelease (psTmp->hStreamLock);
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY);
+ }
+ psTmp->ui32Pending = 0;
+ OSLockRelease (psTmp->hStreamLock);
+
+ if ( ui32StreamFlags & TL_FLAG_RESERVE_DROP_NEWER )
+ {
+ if ( ui32StreamFlags & TL_FLAG_RESERVE_BLOCK )
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e1;
+ }
+ psTmp->bDrop = IMG_TRUE;
+ psTmp->bBlock = IMG_FALSE;
+ }
+ else if ( ui32StreamFlags & TL_FLAG_RESERVE_BLOCK )
+ {
+ psTmp->bBlock = IMG_TRUE;
+ psTmp->bDrop = IMG_FALSE;
+ }
+
+e1:
+ OSLockAcquire (psTmp->hStreamLock);
+ psTmp->ui32Pending = NOTHING_PENDING;
+ OSLockRelease (psTmp->hStreamLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE *phStream,
+ IMG_CHAR *szStreamName)
+{
+ PTL_SNODE psTmpSNode;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == phStream || NULL == szStreamName )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ /* Acquire the TL_GLOBAL_DATA lock first to ensure,
+ * the TL_STREAM while returned and being modified,
+ * is not deleted by some other context */
+ OSLockAcquire (TLGGD()->hTLGDLock);
+
+ /* Search for a stream node with a matching stream name */
+ psTmpSNode = TLFindStreamNodeByName(szStreamName);
+
+ if ( NULL == psTmpSNode )
+ {
+ OSLockRelease (TLGGD()->hTLGDLock);
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_FOUND);
+ }
+
+ /* The TL_SNODE->uiWRefCount governs the presence of this node in the
+ * TL_GLOBAL_DATA list i.e. when uiWRefCount falls to zero we try removing
+ * this node from the TL_GLOBAL_DATA list. Hence, is protected using the
+ * TL_GLOBAL_DATA lock and not TL_STREAM lock */
+ psTmpSNode->uiWRefCount++;
+
+ OSLockRelease (TLGGD()->hTLGDLock);
+
+ /* Return the stream handle to the caller */
+ *phStream = (IMG_HANDLE)psTmpSNode->psStream;
+
+ PVR_DPF_RETURN_VAL(PVRSRV_OK);
+}
+
+void
+TLStreamClose(IMG_HANDLE hStream)
+{
+ PTL_STREAM psTmp;
+ IMG_BOOL bDestroyStream;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == hStream )
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "TLStreamClose failed as NULL stream handler passed, nothing done."));
+ PVR_DPF_RETURN;
+ }
+
+ psTmp = (PTL_STREAM)hStream;
+
+ /* Acquire TL_GLOBAL_DATA lock for updating the reference count as this will be required
+ * in-case this TL_STREAM node is to be deleted */
+ OSLockAcquire (TLGGD()->hTLGDLock);
+
+ /* Decrement write reference counter of the stream */
+ psTmp->psNode->uiWRefCount--;
+
+ if ( 0 != psTmp->psNode->uiWRefCount )
+ { /* The stream is still being used in other context(s) do not destroy anything */
+ OSLockRelease (TLGGD()->hTLGDLock);
+ PVR_DPF_RETURN;
+ }
+ else
+ {
+ /* Now we try removing this TL_STREAM from TL_GLOBAL_DATA */
+
+ if ( psTmp->bWaitForEmptyOnDestroy == IMG_TRUE )
+ {
+ /* We won't require the TL_STREAM lock to be acquired here for accessing its read
+ * and write offsets. REASON: We are here because there is no producer context
+ * referencing this TL_STREAM, hence its ui32Write offset won't be changed now.
+ * Also, the updation of ui32Read offset is not protected by locks */
+ while (psTmp->ui32Read != psTmp->ui32Write)
+ {
+ /* Release lock before sleeping */
+ OSLockRelease (TLGGD()->hTLGDLock);
+
+ OSEventObjectWaitTimeout(psTmp->hProducerEvent, EVENT_OBJECT_TIMEOUT_US);
+
+ OSLockAcquire (TLGGD()->hTLGDLock);
+
+ /* Ensure destruction of stream is still required */
+ if (0 != psTmp->psNode->uiWRefCount)
+ {
+ OSLockRelease (TLGGD()->hTLGDLock);
+ PVR_DPF_RETURN;
+ }
+ }
+ }
+
+ /* Try removing the stream from TL_GLOBAL_DATA */
+ bDestroyStream = TLTryRemoveStreamAndFreeStreamNode (psTmp->psNode);
+
+ OSLockRelease (TLGGD()->hTLGDLock);
+
+ if (bDestroyStream)
+ {
+ /* Destroy the stream if it was removed from TL_GLOBAL_DATA */
+ TLStreamDestroy (psTmp);
+ psTmp = NULL;
+ }
+ PVR_DPF_RETURN;
+ }
+}
+
+static PVRSRV_ERROR
+DoTLStreamReserve(IMG_HANDLE hStream,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32ReqSize,
+ IMG_UINT32 ui32ReqSizeMin,
+ PVRSRVTL_PACKETTYPE ePacketType,
+ IMG_UINT32* pui32AvSpace)
+{
+ PTL_STREAM psTmp;
+ IMG_UINT32 *pui32Buf, ui32LRead, ui32LWrite, ui32LPending, lReqSizeAligned, lReqSizeActual;
+ IMG_INT pad, iFreeSpace;
+
+ PVR_DPF_ENTERED;
+ if (pui32AvSpace) *pui32AvSpace = 0;
+
+ if (( NULL == hStream ))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+ psTmp = (PTL_STREAM)hStream;
+
+ /* Assert used as the packet type parameter is currently only provided
+ * by the TL APIs, not the calling client */
+ PVR_ASSERT((PVRSRVTL_PACKETTYPE_UNDEF < ePacketType) && (PVRSRVTL_PACKETTYPE_LAST >= ePacketType));
+
+ /* The buffer is only used in "rounded" (aligned) chunks */
+ lReqSizeAligned = PVRSRVTL_ALIGN(ui32ReqSize);
+
+ /* Lock the stream before reading it's pending value, because if pending is set
+ * to NOTHING_PENDING, we update the pending value such that subsequent calls to
+ * this function from other context(s) fail with PVRSRV_ERROR_NOT_READY */
+ OSLockAcquire (psTmp->hStreamLock);
+
+ /* Get a local copy of the stream buffer parameters */
+ ui32LRead = psTmp->ui32Read ;
+ ui32LWrite = psTmp->ui32Write ;
+ ui32LPending = psTmp->ui32Pending ;
+
+ /* Multiple pending reserves are not supported. */
+ if ( NOTHING_PENDING != ui32LPending )
+ {
+ OSLockRelease (psTmp->hStreamLock);
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY);
+ }
+
+ if ( PVRSRVTL_MAX_PACKET_SIZE < lReqSizeAligned )
+ {
+ psTmp->ui32Pending = NOTHING_PENDING;
+ if (pui32AvSpace)
+ {
+ *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin);
+ }
+ OSLockRelease (psTmp->hStreamLock);
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG);
+ }
+
+ /* Prevent other threads from entering this region before we are done updating
+ * the pending value and write offset (incase of padding). This is not exactly
+ * a lock but a signal for other contexts that there is a TLStreamCommit operation
+ * pending on this stream */
+ psTmp->ui32Pending = 0;
+
+ OSLockRelease (psTmp->hStreamLock);
+
+ /* If there is enough contiguous space following the current Write
+ * position then no padding is required */
+ if ( psTmp->ui32Size
+ < ui32LWrite + lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) )
+ {
+ pad = psTmp->ui32Size - ui32LWrite;
+ }
+ else
+ {
+ pad = 0 ;
+ }
+
+ lReqSizeActual = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) + pad ;
+ /* If this is a blocking reserve and there is not enough space then wait. */
+ if( psTmp->bBlock )
+ {
+ if( psTmp->ui32Size < lReqSizeActual )
+ {
+ /* Acquire stream lock for updating pending value */
+ OSLockAcquire (psTmp->hStreamLock);
+ psTmp->ui32Pending = NOTHING_PENDING;
+ OSLockRelease (psTmp->hStreamLock);
+
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+ }
+ while ( ( cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)
+ <(IMG_INT) lReqSizeActual ) )
+ {
+ /* The TL should never drop the Bridge Lock as this can lead to
+ * deadlocks. See comment for TLStreamReconfigure.
+ */
+ OSEventObjectWaitAndHoldBridgeLock(psTmp->hProducerEvent);
+ // update local copies.
+ ui32LRead = psTmp->ui32Read ;
+ ui32LWrite = psTmp->ui32Write ;
+ }
+ }
+
+ iFreeSpace = cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+
+ /* The easy case: buffer has enough space to hold the requested packet (data + header) */
+ if ( iFreeSpace >=(IMG_INT) lReqSizeActual )
+ {
+ if ( pad )
+ {
+ /* Inserting padding packet. */
+ pui32Buf = (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite];
+ *pui32Buf = PVRSRVTL_SET_PACKET_PADDING(pad-sizeof(PVRSRVTL_PACKETHDR)) ;
+
+ /* CAUTION: the used pad value should always result in a properly
+ * aligned ui32LWrite pointer, which in this case is 0 */
+ ui32LWrite = (ui32LWrite + pad) % psTmp->ui32Size;
+ /* Detect unaligned pad value */
+ PVR_ASSERT( ui32LWrite == 0);
+ }
+ /* Insert size-stamped packet header */
+ pui32Buf = (IMG_UINT32*) &psTmp->pbyBuffer[ui32LWrite];
+
+ *pui32Buf = PVRSRVTL_SET_PACKET_HDR(ui32ReqSize, ePacketType);
+
+ /* return the next position in the buffer to the user */
+ *ppui8Data = &psTmp->pbyBuffer[ ui32LWrite+sizeof(PVRSRVTL_PACKETHDR) ] ;
+
+ /* update pending offset: size stamp + data */
+ ui32LPending = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) ;
+ }
+ /* The not so easy case: not enough space, decide how to handle data */
+ else
+ {
+
+#if defined(DEBUG)
+ /* Sanity check that the user is not trying to add more data than the
+ * buffer size. Conditionally compile it out to ensure this check has
+ * no impact to release performance */
+ if ( lReqSizeAligned+sizeof(PVRSRVTL_PACKETHDR) > psTmp->ui32Size )
+ {
+ OSLockAcquire (psTmp->hStreamLock);
+ psTmp->ui32Pending = NOTHING_PENDING;
+ OSLockRelease (psTmp->hStreamLock);
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+ }
+#endif
+
+ /* No data overwriting, insert write_failed flag and return */
+ if (psTmp->bDrop)
+ {
+ /* Caller should not try to use ppui8Data,
+ * NULLify to give user a chance of avoiding memory corruption */
+ ppui8Data = NULL;
+
+ /* This flag should not be inserted two consecutive times, so
+ * check the last ui32 in case it was a packet drop packet. */
+ pui32Buf = ui32LWrite
+ ?
+ (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite - sizeof(PVRSRVTL_PACKETHDR)]
+ : // Previous four bytes are not guaranteed to be a packet header...
+ (IMG_UINT32*)&psTmp->pbyBuffer[psTmp->ui32Size - PVRSRVTL_PACKET_ALIGNMENT];
+
+ if ( PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED
+ !=
+ GET_PACKET_TYPE( (PVRSRVTL_PACKETHDR*)pui32Buf ) )
+ {
+ /* Insert size-stamped packet header */
+ pui32Buf = (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite];
+ *pui32Buf = PVRSRVTL_SET_PACKET_WRITE_FAILED ;
+ ui32LWrite += sizeof(PVRSRVTL_PACKETHDR);
+ ui32LWrite %= psTmp->ui32Size;
+ iFreeSpace -= sizeof(PVRSRVTL_PACKETHDR);
+ }
+
+ OSLockAcquire (psTmp->hStreamLock);
+ psTmp->ui32Write = ui32LWrite;
+ psTmp->ui32Pending = NOTHING_PENDING;
+ OSLockRelease (psTmp->hStreamLock);
+
+ if (pui32AvSpace)
+ {
+ *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin);
+ }
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG);
+ }
+ }
+
+ /* Acquire stream lock for updating stream parameters */
+ OSLockAcquire (psTmp->hStreamLock);
+ psTmp->ui32Write = ui32LWrite ;
+ psTmp->ui32Pending = ui32LPending ;
+ OSLockRelease (psTmp->hStreamLock);
+
+ PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamReserve(IMG_HANDLE hStream,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size)
+{
+ return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL);
+}
+
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32* pui32Available)
+{
+ return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32SizeMin, PVRSRVTL_PACKETTYPE_DATA, pui32Available);
+}
+
+PVRSRV_ERROR
+TLStreamCommit(IMG_HANDLE hStream, IMG_UINT32 ui32ReqSize)
+{
+ PTL_STREAM psTmp;
+ IMG_UINT32 ui32LRead, ui32OldWrite, ui32LWrite, ui32LPending;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == hStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+ psTmp = (PTL_STREAM)hStream;
+
+ /* Get a local copy of the stream buffer parameters */
+ ui32LRead = psTmp->ui32Read ;
+ ui32LWrite = psTmp->ui32Write ;
+ ui32LPending = psTmp->ui32Pending ;
+
+ ui32OldWrite = ui32LWrite;
+
+ // Space in buffer is aligned
+ ui32ReqSize = PVRSRVTL_ALIGN(ui32ReqSize) + sizeof(PVRSRVTL_PACKETHDR);
+
+ /* Check pending reserver and ReqSize + packet header size. */
+ if ((ui32LPending == NOTHING_PENDING) || (ui32ReqSize > ui32LPending))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+ }
+
+ /* Update pointer to written data. */
+ ui32LWrite = (ui32LWrite + ui32ReqSize) % psTmp->ui32Size;
+
+ /* and reset LPending to 0 since data are now submitted */
+ ui32LPending = NOTHING_PENDING;
+
+ /* Calculate high water mark for debug purposes */
+#if defined(TL_BUFFER_UTILIZATION)
+ {
+ IMG_UINT32 tmp = 0;
+ if (ui32LWrite > ui32LRead)
+ {
+ tmp = (ui32LWrite-ui32LRead);
+ }
+ else if (ui32LWrite < ui32LRead)
+ {
+ tmp = (psTmp->ui32Size-ui32LRead+ui32LWrite);
+ } /* else equal, ignore */
+
+ if (tmp > psTmp->ui32BufferUt)
+ {
+ psTmp->ui32BufferUt = tmp;
+ }
+ }
+#endif
+
+ /* Acquire stream lock to ensure other context(s) (if any)
+ * wait on the lock (in DoTLStreamReserve) for consistent values
+ * of write offset and pending value */
+ OSLockAcquire (psTmp->hStreamLock);
+
+ /* Update stream buffer parameters to match local copies */
+ psTmp->ui32Write = ui32LWrite ;
+ psTmp->ui32Pending = ui32LPending ;
+
+ OSLockRelease (psTmp->hStreamLock);
+
+ /* If we have transitioned from an empty buffer to a non-empty buffer,
+ * signal any consumers that may be waiting */
+ if (ui32OldWrite == ui32LRead && !psTmp->bNoSignalOnCommit)
+ {
+ /* Signal consumers that may be waiting */
+ eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj);
+ if ( eError != PVRSRV_OK)
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+ }
+
+ PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamWrite(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size)
+{
+ IMG_BYTE *pbyDest = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == hStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ eError = TLStreamReserve(hStream, &pbyDest, ui32Size);
+ if ( PVRSRV_OK != eError )
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+ else if ( pbyDest )
+ {
+ OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size);
+ eError = TLStreamCommit(hStream, ui32Size);
+ if ( PVRSRV_OK != eError )
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+ }
+ else
+ {
+ /* A NULL ptr returned from TLStreamReserve indicates the TL buffer is full */
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG);
+ }
+ PVR_DPF_RETURN_OK;
+}
+
+void TLStreamInfo(PTL_STREAM_INFO psInfo)
+{
+ IMG_DEVMEM_SIZE_T actual_req_size;
+ IMG_DEVMEM_ALIGN_T align = 4; /* Low dummy value so the real value can be obtained */
+
+ actual_req_size = 2;
+ DevmemExportalignAdjustSizeAndAlign(OSGetPageShift(), &actual_req_size, &align);
+
+ psInfo->headerSize = sizeof(PVRSRVTL_PACKETHDR);
+ psInfo->minReservationSize = sizeof(IMG_UINT32);
+ psInfo->pageSize = (IMG_UINT32)(actual_req_size);
+ psInfo->pageAlign = (IMG_UINT32)(align);
+}
+
+PVRSRV_ERROR
+TLStreamMarkEOS(IMG_HANDLE psStream)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT8* pData;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == psStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL);
+ if ( PVRSRV_OK != eError )
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+
+ PVR_DPF_RETURN_RC(TLStreamCommit(psStream, 0));
+}
+
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE psStream)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PTL_STREAM psTmp;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == psStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+ psTmp = (PTL_STREAM)psStream;
+
+ /* If read client exists and has opened stream in blocking mode,
+ * signal when data is available to read. */
+ if (psTmp->psNode->psRDesc &&
+ (!(psTmp->psNode->psRDesc->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) &&
+ psTmp->ui32Read != psTmp->ui32Write)
+ {
+ eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj);
+ }
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+/*
+ * Internal stream APIs to server part of Transport Layer, declared in
+ * header tlintern.h. Direct pointers to stream objects are used here as
+ * these functions are internal.
+ */
+IMG_UINT32
+TLStreamAcquireReadPos(PTL_STREAM psStream, IMG_UINT32* puiReadOffset)
+{
+ IMG_UINT32 uiReadLen = 0;
+ IMG_UINT32 ui32LRead, ui32LWrite;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psStream);
+ PVR_ASSERT(puiReadOffset);
+
+ /* Grab a local copy */
+ ui32LRead = psStream->ui32Read;
+ ui32LWrite = psStream->ui32Write;
+
+ /* No data available and CB defined - try and get data */
+ if ((ui32LRead == ui32LWrite) && psStream->pfProducerCallback)
+ {
+ PVRSRV_ERROR eRc;
+ IMG_UINT32 ui32Resp = 0;
+
+ eRc = ((TL_STREAM_SOURCECB)psStream->pfProducerCallback)(psStream, TL_SOURCECB_OP_CLIENT_EOS,
+ &ui32Resp, psStream->pvProducerUserData);
+ PVR_LOG_IF_ERROR(eRc, "TLStream->pfProducerCallback");
+
+ ui32LWrite = psStream->ui32Write;
+ }
+
+ /* No data available... */
+ if (ui32LRead == ui32LWrite)
+ {
+ PVR_DPF_RETURN_VAL(0);
+ }
+
+ /* Data is available to read... */
+ *puiReadOffset = ui32LRead;
+
+ /*PVR_DPF((PVR_DBG_VERBOSE,
+ * "TLStreamAcquireReadPos Start before: Write:%d, Read:%d, size:%d",
+ * ui32LWrite, ui32LRead, psStream->ui32Size));
+ */
+
+ if ( ui32LRead > ui32LWrite )
+ { /* CB has wrapped around.
+ * Return the first contiguous piece of memory, ie [ReadLen,EndOfBuffer]
+ * and let a subsequent AcquireReadPos read the rest of the Buffer */
+ /*PVR_DPF((PVR_DBG_VERBOSE, "TLStreamAcquireReadPos buffer has wrapped"));*/
+ uiReadLen = psStream->ui32Size - ui32LRead;
+ }
+ else
+ { // CB has not wrapped
+ uiReadLen = ui32LWrite - ui32LRead;
+ }
+
+ PVR_DPF_RETURN_VAL(uiReadLen);
+}
+
+void
+TLStreamAdvanceReadPos(PTL_STREAM psStream, IMG_UINT32 uiReadLen)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psStream);
+
+ /* Update the read offset by the length provided in a circular manner.
+ * Assuming the updation to be atomic hence, avoiding use of locks */
+ psStream->ui32Read = (psStream->ui32Read + uiReadLen) % psStream->ui32Size;
+
+ /* notify reserves that may be pending */
+ /* The producer event object is used to signal the StreamReserve if the TL
+ * Buffer is in blocking mode and is full.
+ * Previously this event was only signalled if the buffer was created in
+ * blocking mode. Since the buffer mode can now change dynamically the event
+ * is signalled every time to avoid any potential race where the signal is
+ * required, but not produced.
+ */
+ {
+ PVRSRV_ERROR eError;
+ eError = OSEventObjectSignal(psStream->hProducerEventObj);
+ if ( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "Error in TLStreamAdvanceReadPos: OSEventObjectSignal returned:%u",
+ eError));
+ }
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE,
+ "TLStreamAdvanceReadPos Read now at: %d",
+ psStream->ui32Read));
+ PVR_DPF_RETURN;
+}
+
+void
+TLStreamDestroy (PTL_STREAM psStream)
+{
+ PVR_ASSERT (psStream);
+
+ OSLockDestroy (psStream->hStreamLock);
+
+ OSEventObjectClose(psStream->hProducerEvent);
+ OSEventObjectDestroy(psStream->hProducerEventObj);
+
+ TLFreeSharedMem(psStream);
+ OSFreeMem(psStream);
+}
+
+DEVMEM_MEMDESC*
+TLStreamGetBufferPointer(PTL_STREAM psStream)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psStream);
+
+ PVR_DPF_RETURN_VAL(psStream->psStreamMemDesc);
+}
+
+IMG_BOOL
+TLStreamEOS(PTL_STREAM psStream)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psStream);
+
+ /* If both pointers are equal then the buffer is empty */
+ PVR_DPF_RETURN_VAL( psStream->ui32Read == psStream->ui32Write );
+}
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Transport Layer kernel side API.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description TL provides driver components with a way to copy data from kernel
+ space to user space (e.g. screen/file).
+
+ Data can be passed to the Transport Layer through the
+ TL Stream (kernel space) API interface.
+
+ The buffer provided to every stream is a modified version of a
+ circular buffer. Which CB version is created is specified by
+ relevant flags when creating a stream. Currently two types
+ of buffer are available:
+ - TL_FLAG_RESERVE_DROP_NEWER:
+ When the buffer is full, incoming data are dropped
+ (instead of overwriting older data) and a marker is set
+ to let the user know that data have been lost.
+ - TL_FLAG_RESERVE_BLOCK:
+ When the circular buffer is full, reserve/write calls block
+ until enough space is freed.
+
+ All size/space requests are in bytes. However, the actual
+ implementation uses native word sizes (i.e. 4 byte aligned).
+
+ The user does not need to provide space for the stream buffer
+ as the TL handles memory allocations and usage.
+
+ Inserting data to a stream's buffer can be done either:
+ - by using TLReserve/TLCommit: User is provided with a buffer
+ to write data to.
+ - or by using TLWrite: User provides a buffer with
+ data to be committed. The TL
+ copies the data from the
+ buffer into the stream buffer
+ and returns.
+ Users should be aware that there are implementation overheads
+ associated with every stream buffer. If you find that less
+ data are captured than expected then try increasing the
+ stream buffer size or use TLInfo to obtain buffer parameters
+ and calculate optimum required values at run time.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __TLSTREAM_H__
+#define __TLSTREAM_H__
+
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/*! Flags specifying stream and circular buffer behaviour */
+/*! Reject new data if the buffer is full, producer may then decide to
+ * drop the data or retry after some time. */
+#define TL_FLAG_RESERVE_DROP_NEWER (1U<<0)
+/*! Block Reserve (subsequently Write) calls if there is not enough space
+ * until some space is freed via a client read operation. */
+#define TL_FLAG_RESERVE_BLOCK (1U<<1)
+/*! When buffer is full, advance the tail/read position to accept the new
+ * reserve call (size permitting), effectively overwriting the oldest
+ * data in the circular buffer. Not supported yet. */
+#define TL_FLAG_RESERVE_DROP_OLDEST (1U<<2)
+
+/*! Do not destroy stream if there still are data that have not been
+ * copied in user space. BLock until the stream is emptied. */
+#define TL_FLAG_FORCE_FLUSH (1U<<8)
+/*! Do not signal consumers on commit automatically when the stream buffer
+ * transitions from empty to non-empty. Producer responsible for signal when
+ * it chooses. */
+#define TL_FLAG_NO_SIGNAL_ON_COMMIT (1U<<9)
+
+/*! Defer allocation of stream's shared memory until first open. */
+#define TL_FLAG_ALLOCATE_ON_FIRST_OPEN (1U<<10)
+
+/*! Structure used to pass internal TL stream sizes information to users.*/
+typedef struct _TL_STREAM_INFO_
+{
+ IMG_UINT32 headerSize; /*!< Packet header size in bytes */
+ IMG_UINT32 minReservationSize; /*!< Minimum data size reserved in bytes */
+ IMG_UINT32 pageSize; /*!< Page size in bytes */
+ IMG_UINT32 pageAlign; /*!< Page alignment in bytes */
+} TL_STREAM_INFO, *PTL_STREAM_INFO;
+
+/*! Callback operations or notifications that a stream producer may handle
+ * when requested by the Transport Layer.
+ */
+#define TL_SOURCECB_OP_CLIENT_EOS 0x01 /*!< Client has reached end of stream,
+ * can anymore data be supplied?
+ * ui32Resp ignored in this operation */
+
+/*! Function pointer type for the callback handler into the "producer" code
+ * that writes data to the TL stream. Producer should handle the notification
+ * or operation supplied in ui32ReqOp on stream hStream. The
+ * Operations and notifications are defined above in TL_SOURCECB_OP */
+typedef PVRSRV_ERROR (*TL_STREAM_SOURCECB)(IMG_HANDLE hStream,
+ IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser);
+
+typedef void (*TL_STREAM_ONREADEROPENCB)(void *pvArg);
+
+/*************************************************************************/ /*!
+ @Function TLAllocSharedMem
+ @Description Allocates shared memory for the stream.
+ @Input phStream Stream handle.
+ @Return eError Internal services call returned eError error
+ number.
+ @Return PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLAllocSharedMemIfNull(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function TLFreeSharedMem
+ @Description Frees stream's shared memory.
+ @Input phStream Stream handle.
+*/ /**************************************************************************/
+void
+TLFreeSharedMem(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function TLStreamCreate
+ @Description Request the creation of a new stream and open a handle.
+ If creating a stream which should continue to exist after the
+ current context is finished, then TLStreamCreate must be
+ followed by a TLStreamOpen call. On any case, the number of
+ create/open calls must balance with the number of close calls
+ used. This ensures the resources of a stream are released when
+ it is no longer required.
+ @Output phStream Pointer to handle to store the new stream.
+ @Input szStreamName Name of stream, maximum length:
+ PRVSRVTL_MAX_STREAM_NAME_SIZE.
+ If a longer string is provided,creation fails.
+ @Input ui32Size Desired buffer size in bytes.
+ @Input ui32StreamFlags Flags that configure buffer behaviour.See above.
+ @Input pfProducerDB Optional callback, may be null.
+ @Input pvProducerData Optional user data for callback, may be null.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or string name
+ exceeded MAX_STREAM_NAME_SIZE
+ @Return PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate space for stream
+ handle.
+ @Return PVRSRV_ERROR_DUPLICATE_VALUE There already exists a stream with
+ the same stream name string.
+ @Return eError Internal services call returned
+ eError error number.
+ @Return PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamCreate(IMG_HANDLE *phStream,
+ IMG_CHAR *szStreamName,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32StreamFlags,
+ TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB,
+ void *pvOnReaderOpenUD,
+ TL_STREAM_SOURCECB pfProducerCB,
+ void *pvProducerUD);
+
+/*************************************************************************/ /*!
+ @Function TLStreamOpen
+ @Description Attach to existing stream that has already been created by a
+ TLStreamCreate call. A handle is returned to the stream.
+ @Output phStream Pointer to handle to store the stream.
+ @Input szStreamName Name of stream, should match an already
+ existing stream name
+ @Return PVRSRV_ERROR_NOT_FOUND None of the streams matched the
+ requested stream name.
+ PVRSRV_ERROR_INVALID_PARAMS non NULL pointer to stream
+ handler is required.
+ @Return PVRSRV_OK Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE *phStream,
+ IMG_CHAR *szStreamName);
+
+/*************************************************************************/ /*!
+ @Function TLStreamReconfigure
+ @Description Request the stream flags controlling buffer behaviour to
+ be updated.
+ In the case where TL_FLAG_RESERVE_BLOCK is to be used,
+ TLStreamCreate should be called without that flag and this
+ function used to change the stream mode once a consumer process
+ has been started. This avoids a deadlock scenario where the
+ TLStreaWrite/TLStreamReserve call will hold the Bridge Lock
+ while blocking if the TL buffer is full.
+ The TL_FLAG_RESERVE_BLOCK should never drop the Bridge Lock
+ as this leads to another deadlock scenario where the caller to
+ TLStreamWrite/TLStreamReserve has already acquired another lock
+ (eg. gHandleLock) which is not dropped. This then leads to that
+ thead acquiring locks out of order.
+ @Input hStream Handle to stream to update.
+ @Input ui32StreamFlags Flags that configure buffer behaviour. See above.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or inconsistent
+ stream flags.
+ @Return PVRSRV_ERROR_NOT_READY Stream is currently being written to
+ try again later.
+ @Return eError Internal services call returned
+ eError error number.
+ @Return PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReconfigure(
+ IMG_HANDLE hStream,
+ IMG_UINT32 ui32StreamFlags);
+
+/*************************************************************************/ /*!
+ @Function TLStreamClose
+ @Description Detach from the stream associated with the given handle. If
+ the current handle is the last one accessing the stream
+ (i.e. the number of TLStreamCreate+TLStreamOpen calls matches
+ the number of TLStreamClose calls) then the stream is also
+ deleted.
+ On return the handle is no longer valid.
+ @Input hStream Handle to stream that will be closed.
+ @Return None.
+*/ /**************************************************************************/
+void
+TLStreamClose(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function TLStreamReserve
+ @Description Reserve space in stream buffer. When successful every
+ TLStreamReserve call must be followed by a matching
+ TLStreamCommit call. While a TLStreamCommit call is pending
+ for a stream, subsequent TLStreamReserve calls for this
+ stream will fail.
+ @Input hStream Stream handle.
+ @Output ppui8Data Pointer to a pointer to a location in the
+ buffer. The caller can then use this address
+ in writing data into the stream.
+ @Input ui32Size Number of bytes to reserve in buffer.
+ @Return PVRSRV_INVALID_PARAMS NULL stream handler.
+ @Return PVRSRV_ERROR_NOT_READY There are data previously reserved
+ that are pending to be committed.
+ @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to
+ reserve more space than the
+ buffer size.
+ @Return PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG The reserve size requested
+ is larger than the free
+ space or maximum supported
+ packet size.
+ @Return PVRSRV_OK Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserve(IMG_HANDLE hStream,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function TLStreamReserve2
+ @Description Reserve space in stream buffer. When successful every
+ TLStreamReserve call must be followed by a matching
+ TLStreamCommit call. While a TLStreamCommit call is pending
+ for a stream, subsequent TLStreamReserve calls for this
+ stream will fail.
+ @Input hStream Stream handle.
+ @Output ppui8Data Pointer to a pointer to a location in the
+ buffer. The caller can then use this address
+ in writing data into the stream.
+ @Input ui32Size Ideal number of bytes to reserve in buffer.
+ @Input ui32SizeMin Minimum number of bytes to reserve in buffer.
+ @Input pui32Available Optional, but when present and the
+ RESERVE_TOO_BIG error is returned, a size
+ suggestion is returned in this argument which
+ the caller can attempt to reserve again for a
+ successful allocation.
+ @Return PVRSRV_INVALID_PARAMS NULL stream handler.
+ @Return PVRSRV_ERROR_NOT_READY There are data previously reserved
+ that are pending to be committed.
+ @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to
+ reserve more space than the
+ buffer size.
+ @Return PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG The reserve size requested
+ is larger than the free
+ space or maximum supported
+ packet size.
+ Check the pui32Available
+ value for the correct
+ reserve size to use.
+ @Return PVRSRV_OK Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32* pui32Available);
+
+/*************************************************************************/ /*!
+ @Function TLStreamCommit
+ @Description Notify TL that data have been written in the stream buffer.
+ Should always follow and match TLStreamReserve call.
+ @Input hStream Stream handle.
+ @Input ui32Size Number of bytes that have been added to the
+ stream.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle.
+ @Return PVRSRV_ERROR_STREAM_MISUSE Commit results in more data
+ committed than the buffer size,
+ the stream is misused.
+ @Return eError Commit was successful but
+ internal services call returned
+ eError error number.
+ @Return PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamCommit(IMG_HANDLE hStream,
+ IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function TLStreamWrite
+ @Description Combined Reserve/Commit call. This function Reserves space in
+ the specified stream buffer, copies ui32Size bytes of data
+ from the array pui8Src points to and Commits in an "atomic"
+ style operation.
+ @Input hStream Stream handle.
+ @Input pui8Src Source to read data from.
+ @Input ui32Size Number of bytes to copy and commit.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler.
+ @Return eError Error codes returned by either
+ Reserve or Commit.
+ @Return PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamWrite(IMG_HANDLE hStream,
+ IMG_UINT8 *pui8Src,
+ IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function TLStreamSync
+ @Description Signal the consumer to start acquiring data from the stream
+ buffer. Called by producers that use the TL_FLAG_NO_SIGNAL_ON_COMMIT
+ flag to manually control when consumers starting reading the
+ stream. Used when multiple small writes need to be batched.
+ @Input hStream Stream handle.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle.
+ @Return eError Error codes returned by either
+ Reserve or Commit.
+ @Return PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE hStream);
+
+
+/*************************************************************************/ /*!
+ @Function TLStreamMarkEOS
+ @Description Insert a EOS marker packet in the given stream.
+ @Input hStream Stream handle.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler.
+ @Return eError Error codes returned by either
+ Reserve or Commit.
+ @Return PVRSRV_OK Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkEOS(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function TLStreamInfo
+ @Description Run time information about buffer elemental sizes.
+ It sets psInfo members accordingly. Users can use those values
+ to calculate the parameters they use in TLStreamCreate and
+ TLStreamReserve.
+ @Output psInfo pointer to stream info structure.
+ @Return None.
+*/ /**************************************************************************/
+void
+TLStreamInfo(PTL_STREAM_INFO psInfo);
+
+
+#endif /* __TLSTREAM_H__ */
+/*****************************************************************************
+ End of file (tlstream.h)
+*****************************************************************************/
+
--- /dev/null
+/*************************************************************************/ /*!
+@Title Linux trace event helper functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/sched.h>
+
+#include "img_types.h"
+#include "trace_events.h"
+#if !defined(SUPPORT_GPUTRACE_EVENTS)
+#define CREATE_TRACE_POINTS
+#endif
+#include "rogue_trace_events.h"
+
+static bool fence_update_event_enabled, fence_check_event_enabled;
+
+bool trace_rogue_are_fence_updates_traced(void)
+{
+ return fence_update_event_enabled;
+}
+
+bool trace_rogue_are_fence_checks_traced(void)
+{
+ return fence_check_event_enabled;
+}
+
+/*
+ * Call backs referenced from rogue_trace_events.h. Note that these are not
+ * thread-safe, however, since running trace code when tracing is not enabled is
+ * simply a no-op, there is no harm in it.
+ */
+
+void trace_fence_update_enabled_callback(void)
+{
+ fence_update_event_enabled = true;
+}
+
+void trace_fence_update_disabled_callback(void)
+{
+ fence_update_event_enabled = false;
+}
+
+void trace_fence_check_enabled_callback(void)
+{
+ fence_check_event_enabled = true;
+}
+
+void trace_fence_check_disabled_callback(void)
+{
+ fence_check_event_enabled = false;
+}
+
+/* This is a helper that calls trace_rogue_fence_update for each fence in an
+ * array.
+ */
+void trace_rogue_fence_updates(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values)
+{
+ IMG_UINT i;
+ for (i = 0; i < uCount; i++)
+ {
+ trace_rogue_fence_update(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+ pauiAddresses[i].ui32Addr, paui32Values[i]);
+ }
+}
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values)
+{
+ IMG_UINT i;
+ for (i = 0; i < uCount; i++)
+ {
+ trace_rogue_fence_check(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+ pauiAddresses[i].ui32Addr, paui32Values[i]);
+ }
+}
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+ IMG_UINT i;
+ for (i = 0; i < ui32UFOCount; i++)
+ {
+ trace_rogue_ufo_update(ui64OSTimestamp, ui32FWCtx,
+ ui32JobId,
+ puData->sUpdate.ui32FWAddr,
+ puData->sUpdate.ui32OldValue,
+ puData->sUpdate.ui32NewValue);
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+ + sizeof(puData->sUpdate));
+ }
+}
+
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+ IMG_UINT i;
+ for (i = 0; i < ui32UFOCount; i++)
+ {
+ if (bPrEvent)
+ {
+ trace_rogue_ufo_pr_check_success(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+ puData->sCheckSuccess.ui32FWAddr,
+ puData->sCheckSuccess.ui32Value);
+ }
+ else
+ {
+ trace_rogue_ufo_check_success(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+ puData->sCheckSuccess.ui32FWAddr,
+ puData->sCheckSuccess.ui32Value);
+ }
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+ + sizeof(puData->sCheckSuccess));
+ }
+}
+
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+ IMG_UINT i;
+ for (i = 0; i < ui32UFOCount; i++)
+ {
+ if (bPrEvent)
+ {
+ trace_rogue_ufo_pr_check_fail(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+ puData->sCheckFail.ui32FWAddr,
+ puData->sCheckFail.ui32Value,
+ puData->sCheckFail.ui32Required);
+ }
+ else
+ {
+ trace_rogue_ufo_check_fail(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+ puData->sCheckFail.ui32FWAddr,
+ puData->sCheckFail.ui32Value,
+ puData->sCheckFail.ui32Required);
+ }
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+ + sizeof(puData->sCheckFail));
+ }
+}
+
+#endif /* defined(SUPPORT_GPUTRACE_EVENTS) */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Linux trace events and event helper functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(TRACE_EVENTS_H)
+#define TRACE_EVENTS_H
+
+#include "rgx_fwif_km.h"
+#include "rgx_hwperf_km.h"
+
+/* We need to make these functions do nothing if CONFIG_EVENT_TRACING isn't
+ * enabled, just like the actual trace event functions that the kernel
+ * defines for us.
+ */
+#ifdef CONFIG_EVENT_TRACING
+bool trace_rogue_are_fence_checks_traced(void);
+
+bool trace_rogue_are_fence_updates_traced(void);
+
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+ IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values);
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+ IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values);
+
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+#else /* CONFIG_TRACE_EVENTS */
+static inline
+bool trace_rogue_are_fence_checks_traced(void)
+{
+ return false;
+}
+
+static inline
+bool trace_rogue_are_fence_updates_traced(void)
+{
+ return false;
+}
+
+static inline
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+ IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values)
+{
+}
+
+static inline
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+ IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values)
+{
+}
+
+static inline
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+
+static inline
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+
+static inline
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+#endif /* CONFIG_TRACE_EVENTS */
+
+#endif /* TRACE_EVENTS_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Provides splay-trees.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implementation of splay-trees.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h" /* for OSMemAlloc / OSMemFree */
+#include "osfunc.h" /* for OSMemFree */
+#include "pvr_debug.h"
+#include "uniq_key_splay_tree.h"
+
+/**
+ * This function performs a simple top down splay
+ *
+ * @param ui32Flags the flags that must splayed to the root (if possible).
+ * @param psTree The tree to splay.
+ * @return the resulting tree after the splay operation.
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree)
+{
+ IMG_SPLAY_TREE sTmp1;
+ IMG_PSPLAY_TREE psLeft;
+ IMG_PSPLAY_TREE psRight;
+ IMG_PSPLAY_TREE psTmp2;
+
+ if (psTree == NULL)
+ {
+ return NULL;
+ }
+
+ sTmp1.psLeft = NULL;
+ sTmp1.psRight = NULL;
+
+ psLeft = &sTmp1;
+ psRight = &sTmp1;
+
+ for (;;)
+ {
+ if (ui32Flags < psTree->ui32Flags)
+ {
+ if (psTree->psLeft == NULL)
+ {
+ break;
+ }
+
+ if (ui32Flags < psTree->psLeft->ui32Flags)
+ {
+ /* if we get to this point, we need to rotate right the tree */
+ psTmp2 = psTree->psLeft;
+ psTree->psLeft = psTmp2->psRight;
+ psTmp2->psRight = psTree;
+ psTree = psTmp2;
+ if (psTree->psLeft == NULL)
+ {
+ break;
+ }
+ }
+
+ /* if we get to this point, we need to link right */
+ psRight->psLeft = psTree;
+ psRight = psTree;
+ psTree = psTree->psLeft;
+ }
+ else
+ {
+ if (ui32Flags > psTree->ui32Flags)
+ {
+ if (psTree->psRight == NULL)
+ {
+ break;
+ }
+
+ if (ui32Flags > psTree->psRight->ui32Flags)
+ {
+ /* if we get to this point, we need to rotate left the tree */
+ psTmp2 = psTree->psRight;
+ psTree->psRight = psTmp2->psLeft;
+ psTmp2->psLeft = psTree;
+ psTree = psTmp2;
+ if (psTree->psRight == NULL)
+ {
+ break;
+ }
+ }
+
+ /* if we get to this point, we need to link left */
+ psLeft->psRight = psTree;
+ psLeft = psTree;
+ psTree = psTree->psRight;
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+
+ /* at this point re-assemble the tree */
+ psLeft->psRight = psTree->psLeft;
+ psRight->psLeft = psTree->psRight;
+ psTree->psLeft = sTmp1.psRight;
+ psTree->psRight = sTmp1.psLeft;
+ return psTree;
+}
+
+
+/**
+ * This function inserts a node into the Tree (unless it is already present, in
+ * which case it is equivalent to performing only a splay operation
+ *
+ * @param ui32Flags the key of the new node
+ * @param psTree The tree into which one wants to add a new node
+ * @return The resulting with the node in it
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree)
+{
+ IMG_PSPLAY_TREE psNew;
+
+ if (psTree != NULL)
+ {
+ psTree = PVRSRVSplay(ui32Flags, psTree);
+ if (psTree->ui32Flags == ui32Flags)
+ {
+ return psTree;
+ }
+ }
+
+ psNew = (IMG_PSPLAY_TREE) OSAllocMem(sizeof(IMG_SPLAY_TREE));
+ if (psNew == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "Error: failed to allocate memory to add a node to the splay tree."));
+ return NULL;
+ }
+
+ psNew->ui32Flags = ui32Flags;
+ OSCachedMemSet(&(psNew->buckets[0]), 0, sizeof(psNew->buckets));
+
+#if defined(PVR_CTZLL)
+ psNew->bHasEltsMapping = ~(((IMG_ELTS_MAPPINGS) 1 << (sizeof(psNew->buckets) / (sizeof(psNew->buckets[0])))) - 1);
+#endif
+
+ if (psTree == NULL)
+ {
+ psNew->psLeft = NULL;
+ psNew->psRight = NULL;
+ return psNew;
+ }
+
+ if (ui32Flags < psTree->ui32Flags)
+ {
+ psNew->psLeft = psTree->psLeft;
+ psNew->psRight = psTree;
+ psTree->psLeft = NULL;
+ }
+ else
+ {
+ psNew->psRight = psTree->psRight;
+ psNew->psLeft = psTree;
+ psTree->psRight = NULL;
+ }
+
+ return psNew;
+}
+
+
+/**
+ * Deletes a node from the tree (unless it is not there, in which case it is
+ * equivalent to a splay operation)
+ *
+ * @param ui32Flags the value of the node to remove
+ * @param psTree the tree into which the node must be removed
+ * @return the resulting tree
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree)
+{
+ IMG_PSPLAY_TREE psTmp;
+ if (psTree == NULL)
+ {
+ return NULL;
+ }
+
+ psTree = PVRSRVSplay(ui32Flags, psTree);
+ if (ui32Flags == psTree->ui32Flags)
+ {
+ /* The value was present in the tree */
+ if (psTree->psLeft == NULL)
+ {
+ psTmp = psTree->psRight;
+ }
+ else
+ {
+ psTmp = PVRSRVSplay(ui32Flags, psTree->psLeft);
+ psTmp->psRight = psTree->psRight;
+ }
+ OSFreeMem(psTree);
+ return psTmp;
+ }
+
+ /* the value was not present in the tree, so just return it as is (after the
+ * splay) */
+ return psTree;
+}
+
+
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Splay trees interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides debug functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef UNIQ_KEY_SPLAY_TREE_H_
+#define UNIQ_KEY_SPLAY_TREE_H_
+
+#include "img_types.h"
+#include "pvr_intrinsics.h"
+
+#if defined(PVR_CTZLL)
+ /* map the is_bucket_n_free to an int.
+ * This way, the driver can find the first non empty without loop
+ */
+ typedef IMG_UINT64 IMG_ELTS_MAPPINGS;
+#endif
+
+/* head of list of free boundary tags for indexed by pvr_log2 of the
+ boundary tag size */
+#define FREE_TABLE_LIMIT 40
+
+struct _BT_;
+
+typedef struct img_splay_tree
+{
+ /* left child/subtree */
+ struct img_splay_tree * psLeft;
+
+ /* right child/subtree */
+ struct img_splay_tree * psRight;
+
+ /* Flags to match on this span, used as the key. */
+ IMG_UINT32 ui32Flags;
+#if defined(PVR_CTZLL)
+ /* each bit of this int is a boolean telling if the corresponding
+ bucket is empty or not */
+ IMG_ELTS_MAPPINGS bHasEltsMapping;
+#endif
+ struct _BT_ * buckets[FREE_TABLE_LIMIT];
+} IMG_SPLAY_TREE, *IMG_PSPLAY_TREE;
+
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+
+
+#endif /* !UNIQ_KEY_SPLAY_TREE_H_ */
--- /dev/null
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File services_kernel_client.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* This file contains a partial redefinition of the PowerVR Services 5
+ * interface for use by components which are checkpatch clean. This
+ * header is included by the unrefined, non-checkpatch clean headers
+ * to ensure that prototype/typedef/macro changes break the build.
+ */
+
+#ifndef __SERVICES_KERNEL_CLIENT__
+#define __SERVICES_KERNEL_CLIENT__
+
+#include "pvrsrv_error.h"
+
+#include <linux/types.h>
+
+#ifndef __pvrsrv_defined_struct_enum__
+
+/* rgx_fwif_shared.h */
+
+struct _RGXFWIF_DEV_VIRTADDR_ {
+ __u32 ui32Addr;
+};
+
+/* sync_external.h */
+
+struct PVRSRV_CLIENT_SYNC_PRIM {
+ volatile __u32 *pui32LinAddr;
+};
+
+struct PVRSRV_CLIENT_SYNC_PRIM_OP {
+ __u32 ui32Flags;
+ struct pvrsrv_sync_prim *psSync;
+ __u32 ui32FenceValue;
+ __u32 ui32UpdateValue;
+};
+
+#else /* __pvrsrv_defined_struct_enum__ */
+
+struct _RGXFWIF_DEV_VIRTADDR_;
+
+struct PVRSRV_CLIENT_SYNC_PRIM;
+struct PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+#endif /* __pvrsrv_defined_struct_enum__ */
+
+struct _PMR_;
+struct _PVRSRV_DEVICE_NODE_;
+struct dma_buf;
+struct SYNC_PRIM_CONTEXT;
+
+/* pvr_notifier.h */
+
+typedef void (*PFN_CMDCOMP_NOTIFY)(void *hCmdCompHandle);
+enum PVRSRV_ERROR PVRSRVRegisterCmdCompleteNotify(void **phNotify,
+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, void *hPrivData);
+enum PVRSRV_ERROR PVRSRVUnregisterCmdCompleteNotify(void *hNotify);
+void PVRSRVCheckStatus(void *hCmdCompCallerHandle);
+
+#define DEBUG_REQUEST_DC 0
+#define DEBUG_REQUEST_SERVERSYNC 1
+#define DEBUG_REQUEST_SYS 2
+#define DEBUG_REQUEST_ANDROIDSYNC 3
+#define DEBUG_REQUEST_LINUXFENCE 4
+#define DEBUG_REQUEST_SYNCCHECKPOINT 5
+#define DEBUG_REQUEST_HTB 6
+#define DEBUG_REQUEST_APPHINT 7
+
+#define DEBUG_REQUEST_VERBOSITY_LOW 0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1
+#define DEBUG_REQUEST_VERBOSITY_HIGH 2
+#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH
+
+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile,
+ const char *fmt, ...) __printf(2, 3);
+
+typedef void (*PFN_DBGREQ_NOTIFY) (void *hDebugRequestHandle,
+ __u32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+enum PVRSRV_ERROR PVRSRVRegisterDbgRequestNotify(void **phNotify,
+ struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+ __u32 ui32RequesterID,
+ void *hDbgRequestHandle);
+enum PVRSRV_ERROR PVRSRVUnregisterDbgRequestNotify(void *hNotify);
+
+/* physmem_dmabuf.h */
+
+struct dma_buf *PhysmemGetDmaBuf(struct _PMR_ *psPMR);
+
+/* pvrsrv.h */
+
+enum PVRSRV_ERROR PVRSRVAcquireGlobalEventObjectKM(void **phGlobalEventObject);
+enum PVRSRV_ERROR PVRSRVReleaseGlobalEventObjectKM(void *hGlobalEventObject);
+
+/* sync.h */
+
+enum PVRSRV_ERROR SyncPrimContextCreate(
+ struct _PVRSRV_DEVICE_NODE_ *psDevConnection,
+ struct SYNC_PRIM_CONTEXT **phSyncPrimContext);
+void SyncPrimContextDestroy(struct SYNC_PRIM_CONTEXT *hSyncPrimContext);
+
+enum PVRSRV_ERROR SyncPrimAlloc(struct SYNC_PRIM_CONTEXT *hSyncPrimContext,
+ struct PVRSRV_CLIENT_SYNC_PRIM **ppsSync, const char *pszClassName);
+enum PVRSRV_ERROR SyncPrimFree(struct PVRSRV_CLIENT_SYNC_PRIM *psSync);
+enum PVRSRV_ERROR SyncPrimGetFirmwareAddr(
+ struct PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ __u32 *sync_addr);
+enum PVRSRV_ERROR SyncPrimSet(struct PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ __u32 ui32Value);
+
+/* pdump_km.h */
+
+#ifdef PDUMP
+enum PVRSRV_ERROR __printf(1, 2) PDumpComment(char *fmt, ...);
+#else
+static inline enum PVRSRV_ERROR __printf(1, 2) PDumpComment(char *fmt, ...)
+{
+ return PVRSRV_OK;
+}
+#endif
+
+/* osfunc.h */
+
+void OSAcquireBridgeLock(void);
+void OSReleaseBridgeLock(void);
+enum PVRSRV_ERROR OSEventObjectWait(void *hOSEventKM);
+enum PVRSRV_ERROR OSEventObjectOpen(void *hEventObject, void **phOSEventKM);
+enum PVRSRV_ERROR OSEventObjectClose(void *hOSEventKM);
+
+/* srvkm.h */
+
+enum PVRSRV_ERROR PVRSRVDeviceCreate(void *pvOSDevice,
+ struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
+enum PVRSRV_ERROR PVRSRVDeviceDestroy(
+ struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+const char *PVRSRVGetErrorStringKM(enum PVRSRV_ERROR eError);
+
+#endif /* __SERVICES_KERNEL_CLIENT__ */