source security/apparmor/Kconfig
source security/yama/Kconfig
+source security/optee_linuxdriver/Kconfig
+
source security/integrity/Kconfig
choice
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
+obj-$(CONFIG_TEE_SUPPORT) += optee_linuxdriver/
# Object integrity file lists
subdir-$(CONFIG_INTEGRITY) += integrity
# Trursted Execution Environment Configuration
config TEE_SUPPORT
bool "Trusted Execution Environment Support"
- default y
+ default n
---help---
This implements the Trusted Execution Environment (TEE) Client
API Specification from GlobalPlatform Device Technology.
#include <arm_common/teesmc.h>
#include <arm_common/teesmc_st.h>
+#include <linux/cpumask.h>
+
#include "tee_mem.h"
#include "tee_tz_op.h"
#include "tee_tz_priv.h"
#include "handle.h"
+#ifdef CONFIG_OUTER_CACHE
+#undef CONFIG_OUTER_CACHE
+#endif
+
#define SWITCH_CPU0_DEBUG
#define _TEE_TZ_NAME "armtz"
static void switch_cpumask_to_cpu0(cpumask_t *saved_cpu_mask)
{
long ret;
+
cpumask_t local_cpu_mask = CPU_MASK_NONE;
- pr_info("switch_cpumask_to_cpu cpu0\n");
- cpu_set(0, local_cpu_mask);
+ cpumask_set_cpu(0, &local_cpu_mask);
cpumask_copy(saved_cpu_mask, tsk_cpus_allowed(current));
ret = sched_setaffinity(0, &local_cpu_mask);
if (ret)
static void restore_cpumask(cpumask_t *saved_cpu_mask)
{
long ret;
- pr_info("restore_cpumask cpu0\n");
+
ret = sched_setaffinity(0, saved_cpu_mask);
if (ret)
pr_err("sched_setaffinity #2 -> 0x%lX", ret);
#endif
ret = param.a0;
- if (ret == TEESMC_RETURN_EBUSY) {
+ if (ret == TEESMC_RETURN_ETHREAD_LIMIT) {
/*
- * Since secure world returned busy, release the
+ * Since secure world is out of threads, release the
* lock we had when entering this function and wait
* for "something to happen" (something else to
* exit from secure world and needed resources may
dev_dbg(_DEV(tee), "%s: > ctx=%p\n", __func__, ctx);
- mutex_lock(&tee->lock);
tee_dec_stats(&tee->stats[TEE_STATS_CONTEXT_IDX]);
list_del(&ctx->entry);
- mutex_unlock(&tee->lock);
devm_kfree(_DEV(tee), ctx);
tee_put(tee);
{
int present = 1;
- mutex_lock(&tee->lock);
if ((entry->next == LIST_POISON1) && (entry->prev == LIST_POISON2))
present = 0;
- mutex_unlock(&tee->lock);
return present;
}
dev_dbg(_DEV(tee), "%s: ctx=%p\n", __func__, ctx);
+ mutex_lock(&tee->lock);
tee_context_put(ctx);
+ mutex_unlock(&tee->lock);
}
int tee_context_copy_from_client(const struct tee_context *ctx,
#define _TEE_CORE_FW_VER "1:0.1"
-static char *_tee_supp_app_name = "tee_supplicant";
+static char *_tee_supp_app_name = "tee-supplicant";
/* Store the class misc reference */
static struct class *misc_class;
{
int ret = -EINVAL;
struct tee_context *ctx = filp->private_data;
+ void __user *u_arg;
BUG_ON(!ctx);
BUG_ON(!ctx->tee);
dev_dbg(_DEV(ctx->tee), "%s: > cmd nr=%d\n", __func__, _IOC_NR(cmd));
+#ifdef CONFIG_COMPAT
+ if (is_compat_task())
+ u_arg = compat_ptr(arg);
+ else
+ u_arg = (void __user *)arg;
+#else
+ u_arg = (void __user *)arg;
+#endif
+
switch (cmd) {
case TEE_OPEN_SESSION_IOC:
- ret =
- tee_do_create_session(ctx, (struct tee_cmd_io __user *)arg);
+ ret = tee_do_create_session(ctx,
+ (struct tee_cmd_io __user *)u_arg);
break;
case TEE_ALLOC_SHM_IOC:
- ret = tee_do_shm_alloc(ctx, (struct tee_shm_io __user *)arg);
+ ret = tee_do_shm_alloc(ctx, (struct tee_shm_io __user *)u_arg);
break;
case TEE_GET_FD_FOR_RPC_SHM_IOC:
- ret =
- tee_do_get_fd_for_rpc_shm(ctx,
- (struct tee_shm_io __user *)arg);
+ ret = tee_do_get_fd_for_rpc_shm(ctx,
+ (struct tee_shm_io __user *)u_arg);
break;
default:
ret = -ENOSYS;
.write = tee_supp_write,
.open = tee_ctx_open,
.release = tee_ctx_release,
- .unlocked_ioctl = tee_ioctl,
- .compat_ioctl = tee_ioctl
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = tee_ioctl,
+#endif
+ .unlocked_ioctl = tee_ioctl
};
static void tee_plt_device_release(struct device *dev)
static void reset_tee_cmd(struct tee_cmd_io *cmd)
{
+ memset(cmd, 0, sizeof(struct tee_cmd_io));
cmd->fd_sess = -1;
cmd->cmd = 0;
cmd->uuid = NULL;
shm = (struct tee_shm *)(long)shm_io.fd_shm;
shared_memory->buffer = shm->kaddr;
- pr_debug("%s(%zd) => fd=%d, kaddr=%p\n", __func__,
+ pr_debug("%s(%d) => fd=%d, kaddr=%p\n", __func__,
shm_io.size, shm_io.fd_shm, (void *)shared_memory->buffer);
return TEEC_SUCCESS;
const struct file_operations tee_session_fops = {
.owner = THIS_MODULE,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = tee_session_ioctl,
+#endif
.unlocked_ioctl = tee_session_ioctl,
.compat_ioctl = tee_session_ioctl,
.release = tee_session_release,
ret = tee_session_close_be(sess);
- mutex_lock(&sess->ctx->tee->lock);
+ mutex_lock(&tee->lock);
tee_dec_stats(&tee->stats[TEE_STATS_SESSION_IDX]);
list_del(&sess->entry);
- mutex_unlock(&sess->ctx->tee->lock);
devm_kfree(_DEV(tee), sess);
tee_context_put(ctx);
tee_put(tee);
+ mutex_unlock(&tee->lock);
dev_dbg(_DEV(tee), "%s: <\n", __func__);
return ret;
sess->ctx = ctx;
ret = tee_session_open_be(sess, cmd_io);
+ mutex_lock(&tee->lock);
if (ret || !sess->sessid || cmd_io->err) {
dev_err(_DEV(tee), "%s: ERROR ret=%d (err=0x%08x, org=%d, sessid=0x%08x)\n",
__func__, ret, cmd_io->err,
tee_put(tee);
tee_context_put(ctx);
devm_kfree(_DEV(tee), sess);
+ mutex_unlock(&tee->lock);
if (ret)
return ERR_PTR(ret);
else
return NULL;
}
- mutex_lock(&tee->lock);
tee_inc_stats(&tee->stats[TEE_STATS_SESSION_IDX]);
list_add_tail(&sess->entry, &ctx->list_sess);
mutex_unlock(&tee->lock);
dev_dbg(_DEV_TEE,
"Size has been updated by the TA %zd != %zd\n",
size_new,
- cmd_io->op->params[idx].tmpref.
- size);
+ cmd_io->op->params[idx].tmpref.size);
tee_put_user(ctx, size_new,
&cmd_io->op->params[idx].tmpref.size);
}
dev_err(_DEV_TEE,
" *** Wrong returned size from %d:%zd > %zd\n",
idx, size_new,
- cmd_io->op->params[idx].tmpref.
- size);
+ cmd_io->op->params[idx].tmpref.size);
else if (tee_copy_to_user
(ctx,
case TEEC_MEMREF_PARTIAL_OUTPUT:
case TEEC_MEMREF_PARTIAL_INOUT:
case TEEC_MEMREF_WHOLE:
+ parent = &cmd->param.c_shm[idx];
if (type == TEEC_MEMREF_WHOLE) {
offset = 0;
size = parent->size;
offset = cmd_io->op->params[idx].memref.offset;
size = cmd_io->op->params[idx].memref.size;
}
- parent = &cmd->param.c_shm[idx];
/* Returned updated size */
size_new = cmd->param.params[idx].shm->size_req;
INMSG();
+ mutex_lock(&tee->lock);
shm = tee_shm_alloc(tee, size, TEE_SHM_TEMP | TEE_SHM_FROM_RPC);
if (IS_ERR_OR_NULL(shm)) {
dev_err(_DEV(tee), "%s: buffer allocation failed (%ld)\n",
goto out;
}
- mutex_lock(&tee->lock);
tee_inc_stats(&tee->stats[TEE_STATS_SHM_IDX]);
list_add_tail(&shm->entry, &tee->list_rpc_shm);
- mutex_unlock(&tee->lock);
+
shm->ctx = NULL;
out:
+ mutex_unlock(&tee->lock);
OUTMSGX(shm);
return shm;
}
void tee_shm_free_from_rpc(struct tee_shm *shm)
{
+ struct tee *tee;
+
if (shm == NULL)
return;
-
+ tee = shm->tee;
+ mutex_lock(&tee->lock);
if (shm->ctx == NULL) {
- mutex_lock(&shm->tee->lock);
tee_dec_stats(&shm->tee->stats[TEE_STATS_SHM_IDX]);
list_del(&shm->entry);
- mutex_unlock(&shm->tee->lock);
}
tee_shm_free(shm);
+ mutex_unlock(&tee->lock);
}
struct tee_shm *tee_shm_alloc(struct tee *tee, size_t size, uint32_t flags)
if (ctx->usr_client)
shm_io->fd_shm = 0;
- else
- shm_io->ptr = NULL;
+ mutex_lock(&tee->lock);
shm = tee_shm_alloc(tee, shm_io->size, shm_io->flags);
if (IS_ERR_OR_NULL(shm)) {
dev_err(_DEV(tee), "%s: buffer allocation failed (%ld)\n",
__func__, PTR_ERR(shm));
- return PTR_ERR(shm);
+ ret = PTR_ERR(shm);
+ goto out;
}
if (ctx->usr_client) {
}
shm->flags |= TEEC_MEM_DMABUF;
- } else
- shm_io->ptr = shm;
+ }
shm->ctx = ctx;
shm->dev = get_device(_DEV(tee));
BUG_ON(ret); /* tee_core_get must not issue */
tee_context_get(ctx);
- mutex_lock(&tee->lock);
tee_inc_stats(&tee->stats[TEE_STATS_SHM_IDX]);
list_add_tail(&shm->entry, &ctx->list_shm);
- mutex_unlock(&tee->lock);
out:
+ mutex_unlock(&tee->lock);
OUTMSG(ret);
return ret;
}
mutex_lock(&ctx->tee->lock);
tee_dec_stats(&tee->stats[TEE_STATS_SHM_IDX]);
list_del(&shm->entry);
- mutex_unlock(&ctx->tee->lock);
tee_shm_free(shm);
tee_put(ctx->tee);
tee_context_put(ctx);
if (dev)
put_device(dev);
+ mutex_unlock(&ctx->tee->lock);
}
/* Buffer allocated by rpc from fw and to be accessed by the user
shm_io->fd_shm = 0;
+ mutex_lock(&tee->lock);
if (!list_empty(&tee->list_rpc_shm)) {
list_for_each(pshm, &tee->list_rpc_shm) {
shm = list_entry(pshm, struct tee_shm, entry);
}
shm->ctx = ctx;
- mutex_lock(&tee->lock);
list_move(&shm->entry, &ctx->list_shm);
- mutex_unlock(&tee->lock);
shm->dev = get_device(_DEV(tee));
ret = tee_get(tee);
BUG_ON(!tee->ops->shm_inc_ref(shm));
out:
+ mutex_unlock(&tee->lock);
OUTMSG(ret);
return ret;
}
dev_dbg(_DEV(tee), "%s: > fd=%d flags=%08x\n",
__func__, c_shm->d.fd, c_shm->flags);
+ mutex_lock(&tee->lock);
shm = kzalloc(sizeof(*shm), GFP_KERNEL);
if (IS_ERR_OR_NULL(shm)) {
dev_err(_DEV(tee), "can't alloc tee_shm\n");
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err;
}
shm->ctx = ctx;
#endif
}
+ mutex_unlock(&tee->lock);
OUTMSGX(shm);
return shm;
err:
kfree(shm);
+ mutex_unlock(&tee->lock);
OUTMSGX(ERR_PTR(ret));
return ERR_PTR(ret);
}
BUG_ON(!shm);
BUG_ON(!(shm->flags & TEE_SHM_MEMREF));
+ mutex_lock(&tee->lock);
if (shm->flags & TEEC_MEM_DMABUF) {
struct tee_shm_dma_buf *sdb;
struct dma_buf *dma_buf;
}
kfree(shm);
+ mutex_unlock(&tee->lock);
OUTMSG(0);
}
if (sizeof(rpc->commToUser) < datalen)
break;
- mutex_lock(&rpc->outsync);
+ /*
+ * Other threads blocks here until we've copied our
+ * answer from the supplicant
+ */
+ mutex_lock(&rpc->thrd_mutex);
+ mutex_lock(&rpc->outsync);
memcpy(&rpc->commToUser, data, datalen);
-
mutex_unlock(&rpc->outsync);
dev_dbg(tee->dev,
rpc->commToUser.cmd);
mutex_lock(&rpc->insync);
-
memcpy(data, &rpc->commFromUser, datalen);
-
mutex_unlock(&rpc->insync);
+ mutex_unlock(&rpc->thrd_mutex);
+
res = TEEC_RPC_OK;
break;
__SEMAPHORE_INITIALIZER(rpc->datafromuser, 0);
rpc->datatouser = (struct semaphore)
__SEMAPHORE_INITIALIZER(rpc->datatouser, 0);
+ mutex_init(&rpc->thrd_mutex);
mutex_init(&rpc->outsync);
mutex_init(&rpc->insync);
atomic_set(&rpc->used, 0);
};
struct tee_rpc_cmd {
- void *buffer;
+ union {
+ void *buffer;
+ uint64_t padding_buf;
+ };
uint32_t size;
uint32_t type;
int fd;
+ int reserved;
};
struct tee_rpc_invoke {
uint32_t cmd;
uint32_t res;
uint32_t nbr_bf;
+ uint32_t reserved;
struct tee_rpc_cmd cmds[TEE_RPC_BUFFER_NUMBER];
};
struct tee_rpc_invoke commFromUser;
struct semaphore datatouser;
struct semaphore datafromuser;
+ struct mutex thrd_mutex; /* Block the thread to wait for supp answer */
struct mutex outsync; /* Out sync mutex */
struct mutex insync; /* In sync mutex */
struct mutex reqsync; /* Request sync mutex */
/dts-v1/;
-/memreserve/ 0x81000000 0x00100000;
/memreserve/ 0x80000000 0x00010000;
/ {
<0x00000008 0x80000000 0 0x80000000>;
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ optee@0x83000000 {
+ reg = <0x00000000 0x83000000 0 0x01000000>;
+ };
+ };
+
gic: interrupt-controller@2f000000 {
compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
#interrupt-cells = <3>;
* r4-7/x4-7 Preserved
*
* Ebusy return register usage:
- * r0/x0 Return value, TEESMC_RETURN_EBUSY
+ * r0/x0 Return value, TEESMC_RETURN_ETHREAD_LIMIT
* r1-3/x1-3 Preserved
* r4-7/x4-7 Preserved
*
* TEESMC_RETURN_OK Call completed, result updated in
* the previously supplied struct
* teesmc32_arg.
- * TEESMC_RETURN_EBUSY Trusted OS busy, try again later.
+ * TEESMC_RETURN_ETHREAD_LIMIT Trusted OS out of threads,
+ * try again later.
* TEESMC_RETURN_EBADADDR Bad physcial pointer to struct
* teesmc32_arg.
* TEESMC_RETURN_EBADCMD Bad/unknown cmd in struct teesmc32_arg
* struct teesmc32_arg
* TEESMC_RETURN_RPC Call suspended by RPC call to normal
* world.
- * TEESMC_RETURN_EBUSY Trusted OS busy, try again later.
+ * TEESMC_RETURN_ETHREAD_LIMIT Trusted OS out of threads,
+ * try again later.
* TEESMC_RETURN_ERESUME Resume failed, the opaque resume
* information was corrupt.
*/
/* Returned in r0 only from Trusted OS functions */
#define TEESMC_RETURN_OK 0x0
-#define TEESMC_RETURN_EBUSY 0x1
+#define TEESMC_RETURN_ETHREAD_LIMIT 0x1
#define TEESMC_RETURN_ERESUME 0x2
#define TEESMC_RETURN_EBADADDR 0x3
#define TEESMC_RETURN_EBADCMD 0x4
uint8_t clockSeqAndNode[8];
} TEEC_UUID;
+/**
+ * In terms of compatible kernel, the data struct shared by client application
+ * and TEE driver should be restructrued in "compatible" rules. To keep GP's
+ * standard in compatibility mode, the anonymous padding members are filled
+ * in the struct definition below.
+ */
+
+
/**
* struct TEEC_SharedMemory - Memory to transfer data between a client
* application and trusted code.
* is responsible to populate the buffer pointer.
*/
typedef struct {
- void *buffer;
- size_t size;
+ union {
+ void *buffer;
+ uint64_t padding_ptr;
+ };
+ union {
+ size_t size;
+ uint64_t padding_sz;
+ };
uint32_t flags;
/*
* identifier can store a handle (int) or a structure pointer (void *).
* define this union to match case where sizeof(int)!=sizeof(void *).
*/
+ uint32_t reserved;
union {
int fd;
void *ptr;
+ uint64_t padding_d;
} d;
- uint8_t registered;
+ uint64_t registered;
} TEEC_SharedMemory;
/**
* operation to be called.
*/
typedef struct {
- void *buffer;
- size_t size;
+ union {
+ void *buffer;
+ uint64_t padding_ptr;
+ };
+ union {
+ size_t size;
+ uint64_t padding_sz;
+ };
} TEEC_TempMemoryReference;
/**
*
*/
typedef struct {
- TEEC_SharedMemory *parent;
- size_t size;
- size_t offset;
+ union {
+ TEEC_SharedMemory *parent;
+ uint64_t padding_ptr;
+ };
+ union {
+ size_t size;
+ uint64_t padding_sz;
+ };
+ union {
+ size_t offset;
+ uint64_t padding_off;
+ };
} TEEC_RegisteredMemoryReference;
/**
uint32_t paramTypes;
TEEC_Parameter params[TEEC_CONFIG_PAYLOAD_REF_COUNT];
/* Implementation-Defined */
- TEEC_Session *session;
+ union {
+ TEEC_Session *session;
+ uint64_t padding_ptr;
+ };
TEEC_SharedMemory memRefs[TEEC_CONFIG_PAYLOAD_REF_COUNT];
- uint32_t flags;
+ uint64_t flags;
} TEEC_Operation;
/**
TEEC_Result err;
uint32_t origin;
uint32_t cmd;
- TEEC_UUID __user *uuid;
- void __user *data;
- uint32_t data_size;
- TEEC_Operation __user *op;
int fd_sess;
+ /*
+ * Here fd_sess is 32-bit variable. Since TEEC_Result also is defined as
+ * "uint32_t", this structure is aligned.
+ */
+ union {
+ TEEC_UUID __user *uuid;
+ uint64_t padding_uuid;
+ };
+ union {
+ void __user *data;
+ uint64_t padding_data;
+ };
+ union {
+ TEEC_Operation __user *op;
+ uint64_t padding_op;
+ };
+ uint32_t data_size;
+ int32_t reserved;
};
struct tee_shm_io {
- void __user *buffer;
- size_t size;
- uint32_t flags;
union {
- int fd_shm;
- void *ptr;
+ void __user *buffer;
+ uint64_t padding_buf;
};
- uint8_t registered;
+ uint32_t size;
+ uint32_t flags;
+ /*
+ * Here fd_shm is 32-bit. To be compliant with the convention of file
+ * descriptor definition, fd_shm is defined as "int" type other
+ * than "int32_t". Even though using "int32_t" is more obvious to
+ * indicate that we intend to keep this structure aligned.
+ */
+ int fd_shm;
+ uint32_t registered;
};
#define TEE_OPEN_SESSION_IOC _IOWR('t', 161, struct tee_cmd_io)