#include <services/ffa_svc.h>
#include "tsp_private.h"
+/*******************************************************************************
+ * Wrapper function to send a direct request.
+ ******************************************************************************/
+smc_args_t ffa_msg_send_direct_req(ffa_endpoint_id16_t sender,
+ ffa_endpoint_id16_t receiver,
+ uint32_t arg3,
+ uint32_t arg4,
+ uint32_t arg5,
+ uint32_t arg6,
+ uint32_t arg7)
+{
+ uint32_t src_dst_ids = (sender << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+ (receiver << FFA_DIRECT_MSG_DESTINATION_SHIFT);
+
+
+ /* Send Direct Request. */
+ return smc_helper(FFA_MSG_SEND_DIRECT_REQ_SMC64, src_dst_ids,
+ 0, arg3, arg4, arg5, arg6, arg7);
+}
+
+/*******************************************************************************
+ * Wrapper function to send a direct response.
+ ******************************************************************************/
+smc_args_t *ffa_msg_send_direct_resp(ffa_endpoint_id16_t sender,
+ ffa_endpoint_id16_t receiver,
+ uint32_t arg3,
+ uint32_t arg4,
+ uint32_t arg5,
+ uint32_t arg6,
+ uint32_t arg7)
+{
+ uint32_t src_dst_ids = (sender << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+ (receiver << FFA_DIRECT_MSG_DESTINATION_SHIFT);
+
+ return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC64, src_dst_ids,
+ 0, arg3, arg4, arg5, arg6, arg7);
+}
+
+/*******************************************************************************
+ * Memory Management Helpers.
+ ******************************************************************************/
+
/**
* Initialises the header of the given `ffa_mtd`, not including the
* composite memory region offset.
*
* Returns the size of the descriptor written.
*/
-uint32_t ffa_memory_retrieve_request_init(
+static uint32_t ffa_memory_retrieve_request_init(
struct ffa_mtd *memory_region, uint64_t handle,
ffa_endpoint_id16_t sender, ffa_endpoint_id16_t *receivers, uint32_t receiver_count,
uint64_t tag, ffa_mtd_flag32_t flags,
0, 0, 0, 0);
}
+bool memory_retrieve(struct mailbox *mb,
+ struct ffa_mtd **retrieved,
+ uint64_t handle, ffa_endpoint_id16_t sender,
+ ffa_endpoint_id16_t *receivers, uint32_t receiver_count,
+ ffa_mtd_flag32_t flags, uint32_t *frag_length,
+ uint32_t *total_length)
+{
+ smc_args_t ret;
+ uint32_t descriptor_size;
+ struct ffa_mtd *memory_region = (struct ffa_mtd *)mb->tx_buffer;
+
+ if (retrieved == NULL || mb == NULL) {
+ ERROR("Invalid parameters!\n");
+ return false;
+ }
+
+ /* Clear TX buffer. */
+ memset(memory_region, 0, PAGE_SIZE);
+
+ /* Clear local buffer. */
+ memset(mem_region_buffer, 0, REGION_BUF_SIZE);
+
+ descriptor_size = ffa_memory_retrieve_request_init(
+ memory_region, handle, sender, receivers, receiver_count, 0, flags,
+ FFA_MEM_PERM_RW | FFA_MEM_PERM_NX,
+ FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB |
+ FFA_MEM_ATTR_INNER_SHAREABLE);
+
+ ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
+
+ if (ffa_func_id(ret) == FFA_ERROR) {
+ ERROR("Couldn't retrieve the memory page. Error: %x\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ /*
+ * Following total_size and fragment_size are useful to keep track
+ * of the state of transaction. When the sum of all fragment_size of all
+ * fragments is equal to total_size, the memory transaction has been
+ * completed.
+ */
+ *total_length = ret._regs[1];
+ *frag_length = ret._regs[2];
+
+ /* Validate frag_length is less than total_length and mailbox size. */
+ if (*frag_length == 0U || *total_length == 0U ||
+ *frag_length > *total_length || *frag_length > (mb->rxtx_page_count * PAGE_SIZE)) {
+ ERROR("Invalid parameters!\n");
+ return false;
+ }
+
+ /* Copy response to local buffer. */
+ memcpy(mem_region_buffer, mb->rx_buffer, *frag_length);
+
+ if (ffa_rx_release()) {
+ ERROR("Failed to release buffer!\n");
+ return false;
+ }
+
+ *retrieved = (struct ffa_mtd *) mem_region_buffer;
+
+ if ((*retrieved)->emad_count > MAX_MEM_SHARE_RECIPIENTS) {
+ VERBOSE("SPMC memory sharing supports max of %u receivers!\n",
+ MAX_MEM_SHARE_RECIPIENTS);
+ return false;
+ }
+
+ /*
+ * We are sharing memory from the normal world therefore validate the NS
+ * bit was set by the SPMC.
+ */
+ if (((*retrieved)->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) == 0U) {
+ ERROR("SPMC has not set the NS bit! 0x%x\n",
+ (*retrieved)->memory_region_attributes);
+ return false;
+ }
+
+ VERBOSE("Memory Descriptor Retrieved!\n");
+
+ return true;
+}
+
/* Relinquish the memory region. */
bool memory_relinquish(struct ffa_mem_relinquish_descriptor *m, uint64_t handle,
ffa_endpoint_id16_t id)
/*
- * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <bl32/tsp/tsp.h>
#include <common/bl_common.h>
#include <common/debug.h>
+#include "ffa_helpers.h"
#include <lib/psci/psci.h>
#include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
#include <plat/common/platform.h>
#include <platform_tsp.h>
#include <services/ffa_svc.h>
#include <platform_def.h>
+static ffa_endpoint_id16_t tsp_id, spmc_id;
+uint8_t mem_region_buffer[4096 * 2] __aligned(PAGE_SIZE);
+
+/* Partition Mailbox. */
+static uint8_t send_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+static uint8_t recv_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+/*
+ * Declare a global mailbox for use within the TSP.
+ * This will be initialized appropriately when the buffers
+ * are mapped with the SPMC.
+ */
+static struct mailbox mailbox;
+
+/*******************************************************************************
+ * This enum is used to handle test cases driven from the FF-A Test Driver.
+ ******************************************************************************/
+/* Keep in Sync with FF-A Test Driver. */
+enum message_t {
+ /* Partition Only Messages. */
+ FF_A_RELAY_MESSAGE = 0,
+
+ /* Basic Functionality. */
+ FF_A_ECHO_MESSAGE,
+ FF_A_RELAY_MESSAGE_EL3,
+
+ /* Memory Sharing. */
+ FF_A_MEMORY_SHARE,
+ FF_A_MEMORY_SHARE_FRAGMENTED,
+ FF_A_MEMORY_LEND,
+ FF_A_MEMORY_LEND_FRAGMENTED,
+
+ FF_A_MEMORY_SHARE_MULTI_ENDPOINT,
+ FF_A_MEMORY_LEND_MULTI_ENDPOINT,
+
+ LAST,
+ FF_A_RUN_ALL = 255,
+ FF_A_OP_MAX = 256
+};
+
+#if SPMC_AT_EL3
extern void tsp_cpu_on_entry(void);
+#endif
-static ffa_endpoint_id16_t tsp_id, spmc_id;
+/*******************************************************************************
+ * Test Functions.
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Enable the TSP to forward the received message to another partition and ask
+ * it to echo the value back in order to validate direct messages functionality.
+ ******************************************************************************/
+static int ffa_test_relay(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ smc_args_t ffa_forward_result;
+ ffa_endpoint_id16_t receiver = arg5;
+
+ ffa_forward_result = ffa_msg_send_direct_req(ffa_endpoint_source(arg1),
+ receiver,
+ FF_A_ECHO_MESSAGE, arg4,
+ 0, 0, 0);
+ return ffa_forward_result._regs[3];
+}
+
+/*******************************************************************************
+ * This function handles memory management tests, currently share and lend.
+ * This test supports the use of FRAG_RX to use memory descriptors that do not
+ * fit in a single 4KB buffer.
+ ******************************************************************************/
+static int test_memory_send(ffa_endpoint_id16_t sender, uint64_t handle,
+ ffa_mtd_flag32_t flags, bool multi_endpoint)
+{
+ struct ffa_mtd *m;
+ struct ffa_emad_v1_0 *receivers;
+ struct ffa_comp_mrd *composite;
+ int ret, status = 0;
+ unsigned int mem_attrs;
+ char *ptr;
+ ffa_endpoint_id16_t source = sender;
+ uint32_t total_length, recv_length = 0;
+
+ /*
+ * In the case that we're testing multiple endpoints choose a partition
+ * ID that resides in the normal world so the SPMC won't detect it as
+ * invalid.
+ * TODO: Should get endpoint receiver id and flag as input from NWd.
+ */
+ uint32_t receiver_count = multi_endpoint ? 2 : 1;
+ ffa_endpoint_id16_t test_receivers[2] = { tsp_id, 0x10 };
+
+ /* Ensure that the sender ID resides in the normal world. */
+ if (ffa_is_secure_world_id(sender)) {
+ ERROR("Invalid sender ID 0x%x.\n", sender);
+ return FFA_ERROR_DENIED;
+ }
+
+ if (!memory_retrieve(&mailbox, &m, handle, source, test_receivers,
+ receiver_count, flags, &recv_length,
+ &total_length)) {
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ receivers = (struct ffa_emad_v1_0 *)
+ ((uint8_t *) m + m->emad_offset);
+ while (total_length != recv_length) {
+ smc_args_t ffa_return;
+ uint32_t frag_length;
+
+ ffa_return = ffa_mem_frag_rx(handle, recv_length);
+
+ if (ffa_return._regs[0] == FFA_ERROR) {
+ WARN("TSP: failed to resume mem with handle %lx\n",
+ handle);
+ return ffa_return._regs[2];
+ }
+ frag_length = ffa_return._regs[3];
+
+ /* Validate frag_length is less than total_length and mailbox size. */
+ if (frag_length > total_length ||
+ frag_length > (mailbox.rxtx_page_count * PAGE_SIZE)) {
+ ERROR("Invalid parameters!\n");
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Validate frag_length is less than remaining mem_region_buffer size. */
+ if (frag_length + recv_length >= REGION_BUF_SIZE) {
+ ERROR("Out of memory!\n");
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ memcpy(&mem_region_buffer[recv_length], mailbox.rx_buffer,
+ frag_length);
+
+ if (ffa_rx_release()) {
+ ERROR("Failed to release buffer!\n");
+ return FFA_ERROR_DENIED;
+ }
+
+ recv_length += frag_length;
+
+ assert(recv_length <= total_length);
+ }
+
+ composite = ffa_memory_region_get_composite(m, 0);
+ if (composite == NULL) {
+ WARN("Failed to get composite descriptor!\n");
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ VERBOSE("Address: %p; page_count: %x %lx\n",
+ (void *)composite->address_range_array[0].address,
+ composite->address_range_array[0].page_count, PAGE_SIZE);
+
+ /* This test is only concerned with RW permissions. */
+ if (ffa_get_data_access_attr(
+ receivers[0].mapd.memory_access_permissions) != FFA_MEM_PERM_RW) {
+ ERROR("Data permission in retrieve response %x does not match share/lend %x!\n",
+ ffa_get_data_access_attr(receivers[0].mapd.memory_access_permissions),
+ FFA_MEM_PERM_RW);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ mem_attrs = MT_RW_DATA | MT_EXECUTE_NEVER;
+
+ /* Only expecting to be sent memory from NWd so map accordingly. */
+ mem_attrs |= MT_NS;
+
+ for (uint32_t i = 0U; i < composite->address_range_count; i++) {
+ size_t size = composite->address_range_array[i].page_count * PAGE_SIZE;
+
+ ptr = (char *) composite->address_range_array[i].address;
+ ret = mmap_add_dynamic_region(
+ (uint64_t)ptr,
+ (uint64_t)ptr,
+ size, mem_attrs);
+
+ if (ret != 0) {
+ ERROR("Failed [%u] mmap_add_dynamic_region %u (%lx) (%lx) (%x)!\n",
+ i, ret,
+ (uint64_t)composite->address_range_array[i].address,
+ size, mem_attrs);
+
+ /* Remove mappings created in this transaction. */
+ for (i--; i >= 0U; i--) {
+ ret = mmap_remove_dynamic_region(
+ (uint64_t)ptr,
+ composite->address_range_array[i].page_count * PAGE_SIZE);
+
+ if (ret != 0) {
+ ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
+ panic();
+ }
+ }
+ return FFA_ERROR_NO_MEMORY;
+ }
+
+ /* Increment memory region for validation purposes. */
+ ++(*ptr);
+
+ /*
+ * Read initial magic number from memory region for
+ * validation purposes.
+ */
+ if (!i) {
+ status = *ptr;
+ }
+ }
+
+ for (uint32_t i = 0U; i < composite->address_range_count; i++) {
+ ret = mmap_remove_dynamic_region(
+ (uint64_t)composite->address_range_array[i].address,
+ composite->address_range_array[i].page_count * PAGE_SIZE);
+
+ if (ret != 0) {
+ ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
+ return FFA_ERROR_NO_MEMORY;
+ }
+ }
+
+ if (!memory_relinquish((struct ffa_mem_relinquish_descriptor *)mailbox.tx_buffer,
+ m->handle, tsp_id)) {
+ ERROR("Failed to relinquish memory region!\n");
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ return status;
+}
static smc_args_t *send_ffa_pm_success(void)
{
return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC32,
- tsp_id << FFA_DIRECT_MSG_SOURCE_SHIFT |
- spmc_id,
+ ((tsp_id & FFA_DIRECT_MSG_ENDPOINT_ID_MASK)
+ << FFA_DIRECT_MSG_SOURCE_SHIFT) | spmc_id,
FFA_FWK_MSG_BIT |
(FFA_PM_MSG_PM_RESP & FFA_FWK_MSG_MASK),
0, 0, 0, 0, 0);
tsp_stats[linear_id].eret_count++;
tsp_stats[linear_id].cpu_off_count++;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
- spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx off request\n", read_mpidr());
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_off_count);
- spin_unlock(&console_lock);
-#endif
return send_ffa_pm_success();
}
tsp_stats[linear_id].eret_count++;
tsp_stats[linear_id].cpu_suspend_count++;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
- spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_suspend_count);
- spin_unlock(&console_lock);
-#endif
return send_ffa_pm_success();
}
tsp_stats[linear_id].eret_count++;
tsp_stats[linear_id].cpu_resume_count++;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
- spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx resumed. maximum off power level %" PRId64 "\n",
read_mpidr(), max_off_pwrlvl);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n",
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_resume_count);
- spin_unlock(&console_lock);
-#endif
return send_ffa_pm_success();
}
panic();
}
+/*******************************************************************************
+ * Handles partition messages. Exercised from the FF-A Test Driver.
+ ******************************************************************************/
+static smc_args_t *handle_partition_message(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint16_t sender = ffa_endpoint_source(arg1);
+ uint16_t receiver = ffa_endpoint_destination(arg1);
+ int status = -1;
+ const bool multi_endpoint = true;
+
+ switch (arg3) {
+ case FF_A_MEMORY_SHARE:
+ INFO("TSP Tests: Memory Share Request--\n");
+ status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, !multi_endpoint);
+ break;
+
+ case FF_A_MEMORY_LEND:
+ INFO("TSP Tests: Memory Lend Request--\n");
+ status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, !multi_endpoint);
+ break;
+
+ case FF_A_MEMORY_SHARE_MULTI_ENDPOINT:
+ INFO("TSP Tests: Multi Endpoint Memory Share Request--\n");
+ status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, multi_endpoint);
+ break;
+
+ case FF_A_MEMORY_LEND_MULTI_ENDPOINT:
+ INFO("TSP Tests: Multi Endpoint Memory Lend Request--\n");
+ status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, multi_endpoint);
+ break;
+ case FF_A_RELAY_MESSAGE:
+ INFO("TSP Tests: Relaying message--\n");
+ status = ffa_test_relay(arg0, arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7);
+ break;
+
+ case FF_A_ECHO_MESSAGE:
+ INFO("TSP Tests: echo message--\n");
+ status = arg4;
+ break;
+
+ default:
+ INFO("TSP Tests: Unknown request ID %d--\n", (int) arg3);
+ }
+
+ /* Swap the sender and receiver in the response. */
+ return ffa_msg_send_direct_resp(receiver, sender, status, 0, 0, 0, 0);
+}
+
/*******************************************************************************
* This function implements the event loop for handling FF-A ABI invocations.
******************************************************************************/
*/
return set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0);
+ case FFA_MSG_SEND_DIRECT_REQ_SMC64:
case FFA_MSG_SEND_DIRECT_REQ_SMC32:
/* Check if a framework message, handle accordingly. */
if ((arg2 & FFA_FWK_MSG_BIT)) {
return handle_framework_message(smc_fid, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
- default:
- break;
+ return handle_partition_message(smc_fid, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7);
}
ERROR("%s: Unsupported FF-A FID (0x%lx)\n", __func__, smc_fid);
* Mask FIQ interrupts to avoid preemption
* in case EL3 SPMC delegates an IRQ next or a
* managed exit. Lastly, unmask IRQs so that
- * they can be handled immediately upon re-entry
+ * they can be handled immediately upon re-entry.
* ---------------------------------------------
*/
write_daifset(DAIF_FIQ_BIT);
args->_regs[3], args->_regs[4], args->_regs[5],
args->_regs[6], args->_regs[7]);
args = tsp_event_loop(ret._regs[0], ret._regs[1], ret._regs[2],
- ret._regs[3], ret._regs[4], ret._regs[5],
- ret._regs[6], ret._regs[7]);
+ ret._regs[3], ret._regs[4], ret._regs[5],
+ ret._regs[6], ret._regs[7]);
} while (1);
/* Not Reached. */
smc_args._regs[2]);
panic();
}
- /* Get TSP's endpoint id */
+ /* Get TSP's endpoint id. */
smc_args = smc_helper(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
ERROR("TSP could not get own ID (0x%lx) on core%d\n",
tsp_id = smc_args._regs[2];
INFO("TSP FF-A endpoint id = 0x%x\n", tsp_id);
- /* Get the SPMC ID */
+
+ /* Get the SPMC ID. */
smc_args = smc_helper(FFA_SPM_ID_GET, 0, 0, 0, 0, 0, 0, 0);
if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
ERROR("TSP could not get SPMC ID (0x%lx) on core%d\n",
spmc_id = smc_args._regs[2];
- /* Update this cpu's statistics */
+ /* Call RXTX_MAP to map a 4k RX and TX buffer. */
+ if (ffa_rxtx_map((uintptr_t) send_page,
+ (uintptr_t) recv_page, 1)) {
+ ERROR("TSP could not map it's RX/TX Buffers\n");
+ panic();
+ }
+
+ mailbox.tx_buffer = send_page;
+ mailbox.rx_buffer = recv_page;
+ mailbox.rxtx_page_count = 1;
+
+ /* Update this cpu's statistics. */
tsp_stats[linear_id].smc_count++;
tsp_stats[linear_id].eret_count++;
tsp_stats[linear_id].cpu_on_count++;
tsp_stats[linear_id].smc_count++;
tsp_stats[linear_id].eret_count++;
tsp_stats[linear_id].cpu_on_count++;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
- spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx turned on\n", read_mpidr());
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_on_count);
- spin_unlock(&console_lock);
-#endif
/* ---------------------------------------------
* Jump to the main event loop to return to EL3
* and be ready for the next request on this cpu.
tsp_stats[linear_id].eret_count++;
tsp_stats[linear_id].cpu_on_count++;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
- spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_on_count);
- spin_unlock(&console_lock);
-#endif
return (uint64_t) &tsp_vector_table;
}
tsp_stats[linear_id].eret_count++;
tsp_stats[linear_id].cpu_on_count++;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
- spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx turned on\n", read_mpidr());
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_on_count);
- spin_unlock(&console_lock);
-#endif
/* Indicate to the SPD that we have completed turned ourselves on */
return set_smc_args(TSP_ON_DONE, 0, 0, 0, 0, 0, 0, 0);
}
tsp_stats[linear_id].eret_count++;
tsp_stats[linear_id].cpu_off_count++;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
- spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx off request\n", read_mpidr());
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_off_count);
- spin_unlock(&console_lock);
-#endif
/* Indicate to the SPD that we have completed this request */
return set_smc_args(TSP_OFF_DONE, 0, 0, 0, 0, 0, 0, 0);
tsp_stats[linear_id].eret_count++;
tsp_stats[linear_id].cpu_suspend_count++;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
- spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_suspend_count);
- spin_unlock(&console_lock);
-#endif
/* Indicate to the SPD that we have completed this request */
return set_smc_args(TSP_SUSPEND_DONE, 0, 0, 0, 0, 0, 0, 0);
tsp_stats[linear_id].eret_count++;
tsp_stats[linear_id].cpu_resume_count++;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
- spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx resumed. maximum off power level %" PRId64 "\n",
read_mpidr(), max_off_pwrlvl);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n",
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_resume_count);
- spin_unlock(&console_lock);
-#endif
/* Indicate to the SPD that we have completed this request */
return set_smc_args(TSP_RESUME_DONE, 0, 0, 0, 0, 0, 0, 0);
}
tsp_stats[linear_id].smc_count++;
tsp_stats[linear_id].eret_count++;
-#if LOG_LEVEL >= LOG_LEVEL_INFO
- spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx received %s smc 0x%" PRIx64 "\n", read_mpidr(),
((func >> 31) & 1) == 1 ? "fast" : "yielding",
func);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets\n", read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count);
- spin_unlock(&console_lock);
-#endif
/* Render secure services and obtain results here */
results[0] = arg1;
break;
case TSP_CHECK_DIT:
if (!is_armv8_4_dit_present()) {
-#if LOG_LEVEL >= LOG_LEVEL_ERROR
- spin_lock(&console_lock);
ERROR("DIT not supported\n");
- spin_unlock(&console_lock);
-#endif
results[0] = 0;
results[1] = 0xffff;
break;