};
MODULE_DEVICE_TABLE(of, host1x_of_match);
-static void host1x_setup_sid_table(struct host1x *host)
+static void host1x_setup_virtualization_tables(struct host1x *host)
{
const struct host1x_info *info = host->info;
unsigned int i;
host1x_hypervisor_writel(host, entry->offset, entry->base);
host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
}
+
+ for (i = 0; i < info->streamid_vm_table.count; i++) {
+ /* Allow access to all stream IDs to all VMs. */
+ host1x_hypervisor_writel(host, 0xff, info->streamid_vm_table.base + 4 * i);
+ }
+
+ for (i = 0; i < info->classid_vm_table.count; i++) {
+ /* Allow access to all classes to all VMs. */
+ host1x_hypervisor_writel(host, 0xff, info->classid_vm_table.base + 4 * i);
+ }
+
+ for (i = 0; i < info->mmio_vm_table.count; i++) {
+ /* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */
+ host1x_hypervisor_writel(host, 0x1, info->mmio_vm_table.base + 4 * i);
+ }
}
static bool host1x_wants_iommu(struct host1x *host1x)
return 0;
resume_host1x:
- host1x_setup_sid_table(host);
+ host1x_setup_virtualization_tables(host);
host1x_syncpt_restore(host);
host1x_intr_start(host);
goto disable_clk;
}
- host1x_setup_sid_table(host);
+ host1x_setup_virtualization_tables(host);
host1x_syncpt_restore(host);
host1x_intr_start(host);
unsigned int limit;
};
+struct host1x_table_desc {
+ unsigned int base;
+ unsigned int count;
+};
+
struct host1x_info {
unsigned int nb_channels; /* host1x: number of channels supported */
unsigned int nb_pts; /* host1x: number of syncpoints supported */
bool has_common; /* has common registers separate from hypervisor */
unsigned int num_sid_entries;
const struct host1x_sid_entry *sid_table;
+ struct host1x_table_desc streamid_vm_table;
+ struct host1x_table_desc classid_vm_table;
+ struct host1x_table_desc mmio_vm_table;
/*
* On T20-T148, the boot chain may setup DC to increment syncpoints
* 26/27 on VBLANK. As such we cannot use these syncpoints until