At runtime if the realm guest accesses memory which hasn't yet been
mapped then KVM needs to either populate the region or fault the guest.
For memory in the lower (protected) region of IPA a fresh page is
provided to the RMM which will zero the contents. For memory in the
upper (shared) region of IPA, the memory from the memslot is mapped
into the realm VM non secure.
Signed-off-by: Steven Price <steven.price@xxxxxxx>
diff --git a/arch/arm64/kvm/rme.c b/arch/arm64/kvm/rme.c
index f6af3ea6ea8a..b6959cd17a6c 100644
--- a/arch/arm64/kvm/rme.c
+++ b/arch/arm64/kvm/rme.c
@@ -714,6 +714,186 @@ static int realm_create_protected_data_page(struct realm *realm,
return -ENXIO;
}
+static int fold_rtt(struct realm *realm, unsigned long addr, int level)
+{
+ phys_addr_t rtt_addr;
+ int ret;
+
+ ret = realm_rtt_fold(realm, addr, level, &rtt_addr);
+ if (ret)
+ return ret;
+
+ free_delegated_granule(rtt_addr);
+
+ return 0;
+}
+
+int realm_map_protected(struct realm *realm,
+ unsigned long ipa,
+ kvm_pfn_t pfn,
+ unsigned long map_size,
+ struct kvm_mmu_memory_cache *memcache)
+{
+ phys_addr_t phys = __pfn_to_phys(pfn);
+ phys_addr_t rd = virt_to_phys(realm->rd);
+ unsigned long base_ipa = ipa;
+ unsigned long size;
+ int map_level;
+ int ret = 0;
+
+ if (WARN_ON(!IS_ALIGNED(map_size, RMM_PAGE_SIZE)))
+ return -EINVAL;
+
+ if (WARN_ON(!IS_ALIGNED(ipa, map_size)))
+ return -EINVAL;
+
+ if (IS_ALIGNED(map_size, RMM_L2_BLOCK_SIZE))
+ map_level = 2;
+ else
+ map_level = 3;
+
+ if (map_level < RMM_RTT_MAX_LEVEL) {
+ /*
+ * A temporary RTT is needed during the map, precreate it,
+ * however if there is an error (e.g. missing parent tables)
+ * this will be handled below.
+ */
+ realm_create_rtt_levels(realm, ipa, map_level,
+ RMM_RTT_MAX_LEVEL, memcache);
+ }
+
+ for (size = 0; size < map_size; size += RMM_PAGE_SIZE) {
+ if (rmi_granule_delegate(phys)) {
+ /*
+ * It's likely we raced with another VCPU on the same
+ * fault. Assume the other VCPU has handled the fault
+ * and return to the guest.
+ */
+ return 0;
+ }
+
+ ret = rmi_data_create_unknown(rd, phys, ipa);
+
+ if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+ /* Create missing RTTs and retry */
+ int level = RMI_RETURN_INDEX(ret);
+
+ WARN_ON(level == RMM_RTT_MAX_LEVEL);
+
+ ret = realm_create_rtt_levels(realm, ipa, level,
+ RMM_RTT_MAX_LEVEL,
+ memcache);
+ if (ret)
+ goto err_undelegate;
+
+ ret = rmi_data_create_unknown(rd, phys, ipa);
+ }
+
+ if (WARN_ON(ret))
+ goto err_undelegate;
+
+ phys += RMM_PAGE_SIZE;
+ ipa += RMM_PAGE_SIZE;
+ }
+
+ if (map_size == RMM_L2_BLOCK_SIZE) {
+ ret = fold_rtt(realm, base_ipa, map_level + 1);
+ if (WARN_ON(ret))
+ goto err;
+ }
+
+ return 0;
+
+err_undelegate:
+ if (WARN_ON(rmi_granule_undelegate(phys))) {
+ /* Page can't be returned to NS world so is lost */
+ get_page(phys_to_page(phys));
+ }
+err:
+ while (size > 0) {
+ unsigned long data, top;
+
+ phys -= RMM_PAGE_SIZE;
+ size -= RMM_PAGE_SIZE;
+ ipa -= RMM_PAGE_SIZE;
+
+ WARN_ON(rmi_data_destroy(rd, ipa, &data, &top));
+
+ if (WARN_ON(rmi_granule_undelegate(phys))) {
+ /* Page can't be returned to NS world so is lost */
+ get_page(phys_to_page(phys));
+ }
+ }
+ return -ENXIO;
+}
+
+int realm_map_non_secure(struct realm *realm,
+ unsigned long ipa,
+ kvm_pfn_t pfn,
+ unsigned long size,
+ struct kvm_mmu_memory_cache *memcache)
+{
+ phys_addr_t rd = virt_to_phys(realm->rd);
+ phys_addr_t phys = __pfn_to_phys(pfn);
+ unsigned long offset;
+ int map_size, map_level;
+ int ret = 0;
+
+ if (WARN_ON(!IS_ALIGNED(size, RMM_PAGE_SIZE)))
+ return -EINVAL;
+
+ if (WARN_ON(!IS_ALIGNED(ipa, size)))
+ return -EINVAL;
+
+ if (IS_ALIGNED(size, RMM_L2_BLOCK_SIZE)) {
+ map_level = 2;
+ map_size = RMM_L2_BLOCK_SIZE;
+ } else {
+ map_level = 3;
+ map_size = RMM_PAGE_SIZE;
+ }
+
+ for (offset = 0; offset < size; offset += map_size) {
+ /*
+ * realm_map_ipa() enforces that the memory is writable,
+ * so for now we permit both read and write.
+ */
+ unsigned long desc = phys |
+ PTE_S2_MEMATTR(MT_S2_FWB_NORMAL) |
+ KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R |
+ KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
+ ret = rmi_rtt_map_unprotected(rd, ipa, map_level, desc);
+
+ if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+ /* Create missing RTTs and retry */
+ int level = RMI_RETURN_INDEX(ret);
+
+ ret = realm_create_rtt_levels(realm, ipa, level,
+ map_level, memcache);
+ if (ret)
+ return -ENXIO;
+
+ ret = rmi_rtt_map_unprotected(rd, ipa, map_level, desc);
+ }
+ /*
+ * RMI_ERROR_RTT can be reported for two reasons: either the
+ * RTT tables are not there, or there is an RTTE already
+ * present for the address. The call to
+ * realm_create_rtt_levels() above handles the first case, and
+ * in the second case this indicates that another thread has
+ * already populated the RTTE for us, so we can ignore the
+ * error and continue.
+ */
+ if (ret && RMI_RETURN_STATUS(ret) != RMI_ERROR_RTT)
+ return -ENXIO;
+
+ ipa += map_size;
+ phys += map_size;
+ }
+
+ return 0;
+}
+
static int populate_region(struct kvm *kvm,
phys_addr_t ipa_base,
phys_addr_t ipa_end,