Merge tag 'arm-fixes-6.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / arch / s390 / kvm / gaccess.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
22938978
HC
2/*
3 * guest access functions
4 *
5 * Copyright IBM Corp. 2014
6 *
7 */
8
9#include <linux/vmalloc.h>
589ee628 10#include <linux/mm_types.h>
22938978 11#include <linux/err.h>
ca5999fd 12#include <linux/pgtable.h>
e613d834 13#include <linux/bitfield.h>
30410373 14#include <asm/access-regs.h>
44ae7663 15#include <asm/fault.h>
aa17aa57 16#include <asm/gmap.h>
275d05ce 17#include <asm/dat-bits.h>
22938978
HC
18#include "kvm-s390.h"
19#include "gaccess.h"
20
22938978
HC
21/*
22 * vaddress union in order to easily decode a virtual address into its
23 * region first index, region second index etc. parts.
24 */
25union vaddress {
26 unsigned long addr;
27 struct {
28 unsigned long rfx : 11;
29 unsigned long rsx : 11;
30 unsigned long rtx : 11;
31 unsigned long sx : 11;
32 unsigned long px : 8;
33 unsigned long bx : 12;
34 };
35 struct {
36 unsigned long rfx01 : 2;
37 unsigned long : 9;
38 unsigned long rsx01 : 2;
39 unsigned long : 9;
40 unsigned long rtx01 : 2;
41 unsigned long : 9;
42 unsigned long sx01 : 2;
43 unsigned long : 29;
44 };
45};
46
47/*
48 * raddress union which will contain the result (real or absolute address)
49 * after a page table walk. The rfaa, sfaa and pfra members are used to
50 * simply assign them the value of a region, segment or page table entry.
51 */
52union raddress {
53 unsigned long addr;
54 unsigned long rfaa : 33; /* Region-Frame Absolute Address */
55 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
56 unsigned long pfra : 52; /* Page-Frame Real Address */
57};
58
664b4973
AY
59union alet {
60 u32 val;
61 struct {
62 u32 reserved : 7;
63 u32 p : 1;
64 u32 alesn : 8;
65 u32 alen : 16;
66 };
67};
68
69union ald {
70 u32 val;
71 struct {
72 u32 : 1;
73 u32 alo : 24;
74 u32 all : 7;
75 };
76};
77
78struct ale {
79 unsigned long i : 1; /* ALEN-Invalid Bit */
80 unsigned long : 5;
81 unsigned long fo : 1; /* Fetch-Only Bit */
82 unsigned long p : 1; /* Private Bit */
83 unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
84 unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
85 unsigned long : 32;
86 unsigned long : 1;
87 unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
88 unsigned long : 6;
89 unsigned long astesn : 32; /* ASTE Sequence Number */
1cae0255 90};
664b4973
AY
91
92struct aste {
93 unsigned long i : 1; /* ASX-Invalid Bit */
94 unsigned long ato : 29; /* Authority-Table Origin */
95 unsigned long : 1;
96 unsigned long b : 1; /* Base-Space Bit */
97 unsigned long ax : 16; /* Authorization Index */
98 unsigned long atl : 12; /* Authority-Table Length */
99 unsigned long : 2;
100 unsigned long ca : 1; /* Controlled-ASN Bit */
101 unsigned long ra : 1; /* Reusable-ASN Bit */
102 unsigned long asce : 64; /* Address-Space-Control Element */
103 unsigned long ald : 32;
104 unsigned long astesn : 32;
105 /* .. more fields there */
1cae0255 106};
8a242234 107
0130337e 108int ipte_lock_held(struct kvm *kvm)
8a242234 109{
0130337e 110 if (sclp.has_siif) {
5e044315
ED
111 int rc;
112
0130337e
PM
113 read_lock(&kvm->arch.sca_lock);
114 rc = kvm_s390_get_ipte_control(kvm)->kh != 0;
115 read_unlock(&kvm->arch.sca_lock);
5e044315
ED
116 return rc;
117 }
0130337e 118 return kvm->arch.ipte_lock_count != 0;
8a242234
HC
119}
120
0130337e 121static void ipte_lock_simple(struct kvm *kvm)
8a242234
HC
122{
123 union ipte_control old, new, *ic;
124
0130337e
PM
125 mutex_lock(&kvm->arch.ipte_mutex);
126 kvm->arch.ipte_lock_count++;
127 if (kvm->arch.ipte_lock_count > 1)
8a242234 128 goto out;
5e044315 129retry:
0130337e
PM
130 read_lock(&kvm->arch.sca_lock);
131 ic = kvm_s390_get_ipte_control(kvm);
8a242234 132 do {
5de72a22 133 old = READ_ONCE(*ic);
5e044315 134 if (old.k) {
0130337e 135 read_unlock(&kvm->arch.sca_lock);
8a242234 136 cond_resched();
5e044315 137 goto retry;
8a242234
HC
138 }
139 new = old;
140 new.k = 1;
141 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
0130337e 142 read_unlock(&kvm->arch.sca_lock);
8a242234 143out:
0130337e 144 mutex_unlock(&kvm->arch.ipte_mutex);
8a242234
HC
145}
146
0130337e 147static void ipte_unlock_simple(struct kvm *kvm)
8a242234
HC
148{
149 union ipte_control old, new, *ic;
150
0130337e
PM
151 mutex_lock(&kvm->arch.ipte_mutex);
152 kvm->arch.ipte_lock_count--;
153 if (kvm->arch.ipte_lock_count)
8a242234 154 goto out;
0130337e
PM
155 read_lock(&kvm->arch.sca_lock);
156 ic = kvm_s390_get_ipte_control(kvm);
8a242234 157 do {
5de72a22 158 old = READ_ONCE(*ic);
1365039d 159 new = old;
8a242234
HC
160 new.k = 0;
161 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
0130337e
PM
162 read_unlock(&kvm->arch.sca_lock);
163 wake_up(&kvm->arch.ipte_wq);
8a242234 164out:
0130337e 165 mutex_unlock(&kvm->arch.ipte_mutex);
8a242234
HC
166}
167
0130337e 168static void ipte_lock_siif(struct kvm *kvm)
8a242234
HC
169{
170 union ipte_control old, new, *ic;
171
5e044315 172retry:
0130337e
PM
173 read_lock(&kvm->arch.sca_lock);
174 ic = kvm_s390_get_ipte_control(kvm);
8a242234 175 do {
5de72a22 176 old = READ_ONCE(*ic);
5e044315 177 if (old.kg) {
0130337e 178 read_unlock(&kvm->arch.sca_lock);
8a242234 179 cond_resched();
5e044315 180 goto retry;
8a242234
HC
181 }
182 new = old;
183 new.k = 1;
184 new.kh++;
185 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
0130337e 186 read_unlock(&kvm->arch.sca_lock);
8a242234
HC
187}
188
0130337e 189static void ipte_unlock_siif(struct kvm *kvm)
8a242234
HC
190{
191 union ipte_control old, new, *ic;
192
0130337e
PM
193 read_lock(&kvm->arch.sca_lock);
194 ic = kvm_s390_get_ipte_control(kvm);
8a242234 195 do {
5de72a22 196 old = READ_ONCE(*ic);
1365039d 197 new = old;
8a242234
HC
198 new.kh--;
199 if (!new.kh)
200 new.k = 0;
201 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
0130337e 202 read_unlock(&kvm->arch.sca_lock);
8a242234 203 if (!new.kh)
0130337e 204 wake_up(&kvm->arch.ipte_wq);
8a242234
HC
205}
206
0130337e 207void ipte_lock(struct kvm *kvm)
8a242234 208{
0130337e
PM
209 if (sclp.has_siif)
210 ipte_lock_siif(kvm);
8a242234 211 else
0130337e 212 ipte_lock_simple(kvm);
8a242234
HC
213}
214
0130337e 215void ipte_unlock(struct kvm *kvm)
8a242234 216{
0130337e
PM
217 if (sclp.has_siif)
218 ipte_unlock_siif(kvm);
8a242234 219 else
0130337e 220 ipte_unlock_simple(kvm);
8a242234
HC
221}
222
27f67f87 223static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
92c96321 224 enum gacc_mode mode)
664b4973
AY
225{
226 union alet alet;
227 struct ale ale;
228 struct aste aste;
229 unsigned long ald_addr, authority_table_addr;
230 union ald ald;
231 int eax, rc;
232 u8 authority_table;
233
234 if (ar >= NUM_ACRS)
235 return -EINVAL;
236
01be7f53
EF
237 if (vcpu->arch.acrs_loaded)
238 save_access_regs(vcpu->run->s.regs.acrs);
664b4973
AY
239 alet.val = vcpu->run->s.regs.acrs[ar];
240
241 if (ar == 0 || alet.val == 0) {
242 asce->val = vcpu->arch.sie_block->gcr[1];
243 return 0;
244 } else if (alet.val == 1) {
245 asce->val = vcpu->arch.sie_block->gcr[7];
246 return 0;
247 }
248
249 if (alet.reserved)
250 return PGM_ALET_SPECIFICATION;
251
252 if (alet.p)
253 ald_addr = vcpu->arch.sie_block->gcr[5];
254 else
255 ald_addr = vcpu->arch.sie_block->gcr[2];
256 ald_addr &= 0x7fffffc0;
257
258 rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
259 if (rc)
260 return rc;
261
262 if (alet.alen / 8 > ald.all)
263 return PGM_ALEN_TRANSLATION;
264
265 if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
266 return PGM_ADDRESSING;
267
268 rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
269 sizeof(struct ale));
270 if (rc)
271 return rc;
272
273 if (ale.i == 1)
274 return PGM_ALEN_TRANSLATION;
275 if (ale.alesn != alet.alesn)
276 return PGM_ALE_SEQUENCE;
277
278 rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
279 if (rc)
280 return rc;
281
282 if (aste.i)
283 return PGM_ASTE_VALIDITY;
284 if (aste.astesn != ale.astesn)
285 return PGM_ASTE_SEQUENCE;
286
287 if (ale.p == 1) {
288 eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
289 if (ale.aleax != eax) {
290 if (eax / 16 > aste.atl)
291 return PGM_EXTENDED_AUTHORITY;
292
293 authority_table_addr = aste.ato * 4 + eax / 4;
294
295 rc = read_guest_real(vcpu, authority_table_addr,
296 &authority_table,
297 sizeof(u8));
298 if (rc)
299 return rc;
300
301 if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
302 return PGM_EXTENDED_AUTHORITY;
303 }
304 }
305
92c96321 306 if (ale.fo == 1 && mode == GACC_STORE)
664b4973
AY
307 return PGM_PROTECTION;
308
309 asce->val = aste.asce;
310 return 0;
311}
312
d03193de
DH
313enum prot_type {
314 PROT_TYPE_LA = 0,
315 PROT_TYPE_KEYC = 1,
316 PROT_TYPE_ALC = 2,
317 PROT_TYPE_DAT = 3,
6ae1574c 318 PROT_TYPE_IEP = 4,
b3cefd6b
JSG
319 /* Dummy value for passing an initialized value when code != PGM_PROTECTION */
320 PROT_NONE,
d03193de
DH
321};
322
c783631b
JSG
323static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
324 enum gacc_mode mode, enum prot_type prot, bool terminate)
d03193de
DH
325{
326 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
44ae7663 327 union teid *teid;
d03193de
DH
328
329 memset(pgm, 0, sizeof(*pgm));
330 pgm->code = code;
44ae7663 331 teid = (union teid *)&pgm->trans_exc_code;
d03193de
DH
332
333 switch (code) {
c14b88d7
JF
334 case PGM_PROTECTION:
335 switch (prot) {
b3cefd6b
JSG
336 case PROT_NONE:
337 /* We should never get here, acts like termination */
338 WARN_ON_ONCE(1);
339 break;
6ae1574c 340 case PROT_TYPE_IEP:
44ae7663 341 teid->b61 = 1;
3b684a42 342 fallthrough;
a679c547 343 case PROT_TYPE_LA:
44ae7663 344 teid->b56 = 1;
a679c547
CB
345 break;
346 case PROT_TYPE_KEYC:
44ae7663 347 teid->b60 = 1;
a679c547 348 break;
c14b88d7 349 case PROT_TYPE_ALC:
44ae7663 350 teid->b60 = 1;
3b684a42 351 fallthrough;
c14b88d7 352 case PROT_TYPE_DAT:
44ae7663 353 teid->b61 = 1;
c14b88d7 354 break;
c14b88d7 355 }
c783631b 356 if (terminate) {
44ae7663
HC
357 teid->b56 = 0;
358 teid->b60 = 0;
359 teid->b61 = 0;
c783631b 360 }
3b684a42 361 fallthrough;
d03193de
DH
362 case PGM_ASCE_TYPE:
363 case PGM_PAGE_TRANSLATION:
364 case PGM_REGION_FIRST_TRANS:
365 case PGM_REGION_SECOND_TRANS:
366 case PGM_REGION_THIRD_TRANS:
367 case PGM_SEGMENT_TRANSLATION:
368 /*
369 * op_access_id only applies to MOVE_PAGE -> set bit 61
370 * exc_access_id has to be set to 0 for some instructions. Both
c14b88d7 371 * cases have to be handled by the caller.
d03193de 372 */
44ae7663
HC
373 teid->addr = gva >> PAGE_SHIFT;
374 teid->fsi = mode == GACC_STORE ? TEID_FSI_STORE : TEID_FSI_FETCH;
375 teid->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
3b684a42 376 fallthrough;
d03193de
DH
377 case PGM_ALEN_TRANSLATION:
378 case PGM_ALE_SEQUENCE:
379 case PGM_ASTE_VALIDITY:
380 case PGM_ASTE_SEQUENCE:
381 case PGM_EXTENDED_AUTHORITY:
c14b88d7
JF
382 /*
383 * We can always store exc_access_id, as it is
384 * undefined for non-ar cases. It is undefined for
385 * most DAT protection exceptions.
386 */
d03193de
DH
387 pgm->exc_access_id = ar;
388 break;
d03193de
DH
389 }
390 return code;
391}
392
c783631b
JSG
393static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
394 enum gacc_mode mode, enum prot_type prot)
395{
396 return trans_exc_ending(vcpu, code, gva, ar, mode, prot, false);
397}
398
664b4973 399static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
27f67f87 400 unsigned long ga, u8 ar, enum gacc_mode mode)
22938978 401{
664b4973 402 int rc;
34346b9a 403 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
664b4973 404
a7525982 405 if (!psw.dat) {
664b4973
AY
406 asce->val = 0;
407 asce->r = 1;
408 return 0;
409 }
410
8bb3fdd6
HC
411 if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME))
412 psw.as = PSW_BITS_AS_PRIMARY;
34346b9a
DH
413
414 switch (psw.as) {
8bb3fdd6 415 case PSW_BITS_AS_PRIMARY:
664b4973
AY
416 asce->val = vcpu->arch.sie_block->gcr[1];
417 return 0;
8bb3fdd6 418 case PSW_BITS_AS_SECONDARY:
664b4973
AY
419 asce->val = vcpu->arch.sie_block->gcr[7];
420 return 0;
8bb3fdd6 421 case PSW_BITS_AS_HOME:
664b4973
AY
422 asce->val = vcpu->arch.sie_block->gcr[13];
423 return 0;
8bb3fdd6 424 case PSW_BITS_AS_ACCREG:
92c96321 425 rc = ar_translation(vcpu, asce, ar, mode);
664b4973 426 if (rc > 0)
bcfa01d7 427 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
664b4973 428 return rc;
22938978
HC
429 }
430 return 0;
431}
432
433static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
434{
435 return kvm_read_guest(kvm, gpa, val, sizeof(*val));
436}
437
438/**
439 * guest_translate - translate a guest virtual into a guest absolute address
440 * @vcpu: virtual cpu
441 * @gva: guest virtual address
442 * @gpa: points to where guest physical (absolute) address should be stored
75a18122 443 * @asce: effective asce
92c96321 444 * @mode: indicates the access mode to be used
6ae1574c 445 * @prot: returns the type for protection exceptions
22938978
HC
446 *
447 * Translate a guest virtual address into a guest absolute address by means
16b0fc13 448 * of dynamic address translation as specified by the architecture.
22938978
HC
449 * If the resulting absolute address is not available in the configuration
450 * an addressing exception is indicated and @gpa will not be changed.
451 *
452 * Returns: - zero on success; @gpa contains the resulting absolute address
453 * - a negative value if guest access failed due to e.g. broken
454 * guest mapping
cada938a 455 * - a positive value if an access exception happened. In this case
22938978
HC
456 * the returned value is the program interruption code as defined
457 * by the architecture
458 */
459static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
75a18122 460 unsigned long *gpa, const union asce asce,
6ae1574c 461 enum gacc_mode mode, enum prot_type *prot)
22938978
HC
462{
463 union vaddress vaddr = {.addr = gva};
464 union raddress raddr = {.addr = gva};
465 union page_table_entry pte;
466 int dat_protection = 0;
6ae1574c 467 int iep_protection = 0;
22938978
HC
468 union ctlreg0 ctlreg0;
469 unsigned long ptr;
6ae1574c 470 int edat1, edat2, iep;
22938978
HC
471
472 ctlreg0.val = vcpu->arch.sie_block->gcr[0];
9d8d5786
MM
473 edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
474 edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
6ae1574c 475 iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
22938978
HC
476 if (asce.r)
477 goto real_address;
275d05ce 478 ptr = asce.rsto * PAGE_SIZE;
22938978
HC
479 switch (asce.dt) {
480 case ASCE_TYPE_REGION1:
481 if (vaddr.rfx01 > asce.tl)
482 return PGM_REGION_FIRST_TRANS;
483 ptr += vaddr.rfx * 8;
484 break;
485 case ASCE_TYPE_REGION2:
486 if (vaddr.rfx)
487 return PGM_ASCE_TYPE;
488 if (vaddr.rsx01 > asce.tl)
489 return PGM_REGION_SECOND_TRANS;
490 ptr += vaddr.rsx * 8;
491 break;
492 case ASCE_TYPE_REGION3:
493 if (vaddr.rfx || vaddr.rsx)
494 return PGM_ASCE_TYPE;
495 if (vaddr.rtx01 > asce.tl)
496 return PGM_REGION_THIRD_TRANS;
497 ptr += vaddr.rtx * 8;
498 break;
499 case ASCE_TYPE_SEGMENT:
500 if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
501 return PGM_ASCE_TYPE;
502 if (vaddr.sx01 > asce.tl)
503 return PGM_SEGMENT_TRANSLATION;
504 ptr += vaddr.sx * 8;
505 break;
506 }
507 switch (asce.dt) {
508 case ASCE_TYPE_REGION1: {
509 union region1_table_entry rfte;
510
9e7325ac 511 if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
22938978
HC
512 return PGM_ADDRESSING;
513 if (deref_table(vcpu->kvm, ptr, &rfte.val))
514 return -EFAULT;
515 if (rfte.i)
516 return PGM_REGION_FIRST_TRANS;
517 if (rfte.tt != TABLE_TYPE_REGION1)
518 return PGM_TRANSLATION_SPEC;
519 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
520 return PGM_REGION_SECOND_TRANS;
521 if (edat1)
522 dat_protection |= rfte.p;
58cdf5eb 523 ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
22938978 524 }
3b684a42 525 fallthrough;
22938978
HC
526 case ASCE_TYPE_REGION2: {
527 union region2_table_entry rste;
528
9e7325ac 529 if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
22938978
HC
530 return PGM_ADDRESSING;
531 if (deref_table(vcpu->kvm, ptr, &rste.val))
532 return -EFAULT;
533 if (rste.i)
534 return PGM_REGION_SECOND_TRANS;
535 if (rste.tt != TABLE_TYPE_REGION2)
536 return PGM_TRANSLATION_SPEC;
537 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
538 return PGM_REGION_THIRD_TRANS;
539 if (edat1)
540 dat_protection |= rste.p;
58cdf5eb 541 ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
22938978 542 }
3b684a42 543 fallthrough;
22938978
HC
544 case ASCE_TYPE_REGION3: {
545 union region3_table_entry rtte;
546
9e7325ac 547 if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
22938978
HC
548 return PGM_ADDRESSING;
549 if (deref_table(vcpu->kvm, ptr, &rtte.val))
550 return -EFAULT;
551 if (rtte.i)
552 return PGM_REGION_THIRD_TRANS;
553 if (rtte.tt != TABLE_TYPE_REGION3)
554 return PGM_TRANSLATION_SPEC;
555 if (rtte.cr && asce.p && edat2)
556 return PGM_TRANSLATION_SPEC;
557 if (rtte.fc && edat2) {
558 dat_protection |= rtte.fc1.p;
6ae1574c 559 iep_protection = rtte.fc1.iep;
22938978
HC
560 raddr.rfaa = rtte.fc1.rfaa;
561 goto absolute_address;
562 }
563 if (vaddr.sx01 < rtte.fc0.tf)
564 return PGM_SEGMENT_TRANSLATION;
565 if (vaddr.sx01 > rtte.fc0.tl)
566 return PGM_SEGMENT_TRANSLATION;
567 if (edat1)
568 dat_protection |= rtte.fc0.p;
58cdf5eb 569 ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
22938978 570 }
3b684a42 571 fallthrough;
22938978
HC
572 case ASCE_TYPE_SEGMENT: {
573 union segment_table_entry ste;
574
9e7325ac 575 if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
22938978
HC
576 return PGM_ADDRESSING;
577 if (deref_table(vcpu->kvm, ptr, &ste.val))
578 return -EFAULT;
579 if (ste.i)
580 return PGM_SEGMENT_TRANSLATION;
581 if (ste.tt != TABLE_TYPE_SEGMENT)
582 return PGM_TRANSLATION_SPEC;
583 if (ste.cs && asce.p)
584 return PGM_TRANSLATION_SPEC;
585 if (ste.fc && edat1) {
586 dat_protection |= ste.fc1.p;
6ae1574c 587 iep_protection = ste.fc1.iep;
22938978
HC
588 raddr.sfaa = ste.fc1.sfaa;
589 goto absolute_address;
590 }
591 dat_protection |= ste.fc0.p;
58cdf5eb 592 ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
22938978
HC
593 }
594 }
9e7325ac 595 if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
22938978
HC
596 return PGM_ADDRESSING;
597 if (deref_table(vcpu->kvm, ptr, &pte.val))
598 return -EFAULT;
599 if (pte.i)
600 return PGM_PAGE_TRANSLATION;
601 if (pte.z)
602 return PGM_TRANSLATION_SPEC;
22938978 603 dat_protection |= pte.p;
6ae1574c 604 iep_protection = pte.iep;
22938978
HC
605 raddr.pfra = pte.pfra;
606real_address:
607 raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
608absolute_address:
6ae1574c
CB
609 if (mode == GACC_STORE && dat_protection) {
610 *prot = PROT_TYPE_DAT;
22938978 611 return PGM_PROTECTION;
6ae1574c
CB
612 }
613 if (mode == GACC_IFETCH && iep_protection && iep) {
614 *prot = PROT_TYPE_IEP;
615 return PGM_PROTECTION;
616 }
9e7325ac 617 if (!kvm_is_gpa_in_memslot(vcpu->kvm, raddr.addr))
22938978
HC
618 return PGM_ADDRESSING;
619 *gpa = raddr.addr;
620 return 0;
621}
622
623static inline int is_low_address(unsigned long ga)
624{
625 /* Check for address ranges 0..511 and 4096..4607 */
626 return (ga & ~0x11fful) == 0;
627}
628
75a18122
AY
629static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
630 const union asce asce)
22938978
HC
631{
632 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
633 psw_t *psw = &vcpu->arch.sie_block->gpsw;
22938978
HC
634
635 if (!ctlreg0.lap)
636 return 0;
a7525982 637 if (psw_bits(*psw).dat && asce.p)
22938978
HC
638 return 0;
639 return 1;
640}
641
ef11c946
JSG
642static int vm_check_access_key(struct kvm *kvm, u8 access_key,
643 enum gacc_mode mode, gpa_t gpa)
644{
645 u8 storage_key, access_control;
646 bool fetch_protected;
647 unsigned long hva;
648 int r;
649
650 if (access_key == 0)
651 return 0;
652
653 hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
654 if (kvm_is_error_hva(hva))
655 return PGM_ADDRESSING;
656
657 mmap_read_lock(current->mm);
658 r = get_guest_storage_key(current->mm, hva, &storage_key);
659 mmap_read_unlock(current->mm);
660 if (r)
661 return r;
662 access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
663 if (access_control == access_key)
664 return 0;
665 fetch_protected = storage_key & _PAGE_FP_BIT;
666 if ((mode == GACC_FETCH || mode == GACC_IFETCH) && !fetch_protected)
667 return 0;
668 return PGM_PROTECTION;
669}
670
e613d834
JSG
671static bool fetch_prot_override_applicable(struct kvm_vcpu *vcpu, enum gacc_mode mode,
672 union asce asce)
673{
674 psw_t *psw = &vcpu->arch.sie_block->gpsw;
675 unsigned long override;
676
677 if (mode == GACC_FETCH || mode == GACC_IFETCH) {
678 /* check if fetch protection override enabled */
679 override = vcpu->arch.sie_block->gcr[0];
680 override &= CR0_FETCH_PROTECTION_OVERRIDE;
681 /* not applicable if subject to DAT && private space */
682 override = override && !(psw_bits(*psw).dat && asce.p);
683 return override;
684 }
685 return false;
686}
687
688static bool fetch_prot_override_applies(unsigned long ga, unsigned int len)
689{
690 return ga < 2048 && ga + len <= 2048;
691}
692
693static bool storage_prot_override_applicable(struct kvm_vcpu *vcpu)
694{
695 /* check if storage protection override enabled */
696 return vcpu->arch.sie_block->gcr[0] & CR0_STORAGE_PROTECTION_OVERRIDE;
697}
698
699static bool storage_prot_override_applies(u8 access_control)
700{
701 /* matches special storage protection override key (9) -> allow */
702 return access_control == PAGE_SPO_ACC;
703}
704
705static int vcpu_check_access_key(struct kvm_vcpu *vcpu, u8 access_key,
706 enum gacc_mode mode, union asce asce, gpa_t gpa,
707 unsigned long ga, unsigned int len)
708{
709 u8 storage_key, access_control;
710 unsigned long hva;
711 int r;
712
713 /* access key 0 matches any storage key -> allow */
714 if (access_key == 0)
715 return 0;
716 /*
717 * caller needs to ensure that gfn is accessible, so we can
718 * assume that this cannot fail
719 */
720 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa));
721 mmap_read_lock(current->mm);
722 r = get_guest_storage_key(current->mm, hva, &storage_key);
723 mmap_read_unlock(current->mm);
724 if (r)
725 return r;
726 access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
727 /* access key matches storage key -> allow */
728 if (access_control == access_key)
729 return 0;
730 if (mode == GACC_FETCH || mode == GACC_IFETCH) {
731 /* it is a fetch and fetch protection is off -> allow */
732 if (!(storage_key & _PAGE_FP_BIT))
733 return 0;
734 if (fetch_prot_override_applicable(vcpu, mode, asce) &&
735 fetch_prot_override_applies(ga, len))
736 return 0;
737 }
738 if (storage_prot_override_applicable(vcpu) &&
739 storage_prot_override_applies(access_control))
740 return 0;
741 return PGM_PROTECTION;
742}
743
7faa543d
JSG
744/**
745 * guest_range_to_gpas() - Calculate guest physical addresses of page fragments
746 * covering a logical range
747 * @vcpu: virtual cpu
748 * @ga: guest address, start of range
749 * @ar: access register
750 * @gpas: output argument, may be NULL
751 * @len: length of range in bytes
752 * @asce: address-space-control element to use for translation
753 * @mode: access mode
e613d834 754 * @access_key: access key to mach the range's storage keys against
7faa543d
JSG
755 *
756 * Translate a logical range to a series of guest absolute addresses,
757 * such that the concatenation of page fragments starting at each gpa make up
758 * the whole range.
759 * The translation is performed as if done by the cpu for the given @asce, @ar,
760 * @mode and state of the @vcpu.
761 * If the translation causes an exception, its program interruption code is
762 * returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
763 * such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
764 * a correct exception into the guest.
765 * The resulting gpas are stored into @gpas, unless it is NULL.
766 *
767 * Note: All fragments except the first one start at the beginning of a page.
768 * When deriving the boundaries of a fragment from a gpa, all but the last
769 * fragment end at the end of the page.
770 *
771 * Return:
772 * * 0 - success
773 * * <0 - translation could not be performed, for example if guest
774 * memory could not be accessed
775 * * >0 - an access exception occurred. In this case the returned value
776 * is the program interruption code and the contents of pgm may
777 * be used to inject an exception into the guest.
778 */
779static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
780 unsigned long *gpas, unsigned long len,
e613d834
JSG
781 const union asce asce, enum gacc_mode mode,
782 u8 access_key)
22938978 783{
22938978 784 psw_t *psw = &vcpu->arch.sie_block->gpsw;
7faa543d
JSG
785 unsigned int offset = offset_in_page(ga);
786 unsigned int fragment_len;
cde0dcfb 787 int lap_enabled, rc = 0;
6ae1574c 788 enum prot_type prot;
7faa543d 789 unsigned long gpa;
22938978 790
75a18122 791 lap_enabled = low_address_protection_enabled(vcpu, asce);
7faa543d
JSG
792 while (min(PAGE_SIZE - offset, len) > 0) {
793 fragment_len = min(PAGE_SIZE - offset, len);
22938978 794 ga = kvm_s390_logical_to_effective(vcpu, ga);
cde0dcfb
DH
795 if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
796 return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
797 PROT_TYPE_LA);
a7525982 798 if (psw_bits(*psw).dat) {
7faa543d 799 rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
22938978
HC
800 if (rc < 0)
801 return rc;
22938978 802 } else {
7faa543d 803 gpa = kvm_s390_real_to_abs(vcpu, ga);
9e7325ac 804 if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) {
cde0dcfb 805 rc = PGM_ADDRESSING;
b3cefd6b
JSG
806 prot = PROT_NONE;
807 }
22938978 808 }
cde0dcfb 809 if (rc)
6ae1574c 810 return trans_exc(vcpu, rc, ga, ar, mode, prot);
e613d834
JSG
811 rc = vcpu_check_access_key(vcpu, access_key, mode, asce, gpa, ga,
812 fragment_len);
813 if (rc)
814 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_KEYC);
7faa543d
JSG
815 if (gpas)
816 *gpas++ = gpa;
817 offset = 0;
818 ga += fragment_len;
819 len -= fragment_len;
22938978
HC
820 }
821 return 0;
822}
823
bad13799
JSG
824static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
825 void *data, unsigned int len)
826{
827 const unsigned int offset = offset_in_page(gpa);
828 const gfn_t gfn = gpa_to_gfn(gpa);
829 int rc;
830
831 if (mode == GACC_STORE)
832 rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
833 else
834 rc = kvm_read_guest_page(kvm, gfn, data, offset, len);
835 return rc;
836}
837
e613d834
JSG
838static int
839access_guest_page_with_key(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
840 void *data, unsigned int len, u8 access_key)
841{
842 struct kvm_memory_slot *slot;
843 bool writable;
844 gfn_t gfn;
845 hva_t hva;
846 int rc;
847
848 gfn = gpa >> PAGE_SHIFT;
849 slot = gfn_to_memslot(kvm, gfn);
850 hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
851
852 if (kvm_is_error_hva(hva))
853 return PGM_ADDRESSING;
854 /*
855 * Check if it's a ro memslot, even tho that can't occur (they're unsupported).
856 * Don't try to actually handle that case.
857 */
858 if (!writable && mode == GACC_STORE)
859 return -EOPNOTSUPP;
860 hva += offset_in_page(gpa);
861 if (mode == GACC_STORE)
862 rc = copy_to_user_key((void __user *)hva, data, len, access_key);
863 else
864 rc = copy_from_user_key(data, (void __user *)hva, len, access_key);
865 if (rc)
866 return PGM_PROTECTION;
867 if (mode == GACC_STORE)
868 mark_page_dirty_in_slot(kvm, slot, gfn);
869 return 0;
870}
871
ef11c946
JSG
872int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data,
873 unsigned long len, enum gacc_mode mode, u8 access_key)
874{
875 int offset = offset_in_page(gpa);
876 int fragment_len;
877 int rc;
878
879 while (min(PAGE_SIZE - offset, len) > 0) {
880 fragment_len = min(PAGE_SIZE - offset, len);
881 rc = access_guest_page_with_key(kvm, mode, gpa, data, fragment_len, access_key);
882 if (rc)
883 return rc;
884 offset = 0;
885 len -= fragment_len;
886 data += fragment_len;
887 gpa += fragment_len;
888 }
889 return 0;
890}
891
e613d834
JSG
892int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
893 void *data, unsigned long len, enum gacc_mode mode,
894 u8 access_key)
22938978
HC
895{
896 psw_t *psw = &vcpu->arch.sie_block->gpsw;
7faa543d
JSG
897 unsigned long nr_pages, idx;
898 unsigned long gpa_array[2];
416e7f0c 899 unsigned int fragment_len;
7faa543d 900 unsigned long *gpas;
e613d834 901 enum prot_type prot;
8a242234
HC
902 int need_ipte_lock;
903 union asce asce;
e613d834
JSG
904 bool try_storage_prot_override;
905 bool try_fetch_prot_override;
22938978
HC
906 int rc;
907
908 if (!len)
909 return 0;
6167375b
DH
910 ga = kvm_s390_logical_to_effective(vcpu, ga);
911 rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
664b4973
AY
912 if (rc)
913 return rc;
22938978 914 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
7faa543d
JSG
915 gpas = gpa_array;
916 if (nr_pages > ARRAY_SIZE(gpa_array))
917 gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
918 if (!gpas)
22938978 919 return -ENOMEM;
e613d834
JSG
920 try_fetch_prot_override = fetch_prot_override_applicable(vcpu, mode, asce);
921 try_storage_prot_override = storage_prot_override_applicable(vcpu);
a7525982 922 need_ipte_lock = psw_bits(*psw).dat && !asce.r;
8a242234 923 if (need_ipte_lock)
0130337e 924 ipte_lock(vcpu->kvm);
e613d834
JSG
925 /*
926 * Since we do the access further down ultimately via a move instruction
927 * that does key checking and returns an error in case of a protection
928 * violation, we don't need to do the check during address translation.
929 * Skip it by passing access key 0, which matches any storage key,
930 * obviating the need for any further checks. As a result the check is
931 * handled entirely in hardware on access, we only need to take care to
932 * forego key protection checking if fetch protection override applies or
933 * retry with the special key 9 in case of storage protection override.
934 */
935 rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode, 0);
936 if (rc)
937 goto out_unlock;
938 for (idx = 0; idx < nr_pages; idx++) {
7faa543d 939 fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
e613d834
JSG
940 if (try_fetch_prot_override && fetch_prot_override_applies(ga, fragment_len)) {
941 rc = access_guest_page(vcpu->kvm, mode, gpas[idx],
942 data, fragment_len);
943 } else {
944 rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
945 data, fragment_len, access_key);
946 }
947 if (rc == PGM_PROTECTION && try_storage_prot_override)
948 rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
949 data, fragment_len, PAGE_SPO_ACC);
e613d834
JSG
950 if (rc)
951 break;
416e7f0c 952 len -= fragment_len;
416e7f0c 953 data += fragment_len;
e613d834 954 ga = kvm_s390_logical_to_effective(vcpu, ga + fragment_len);
22938978 955 }
c783631b
JSG
956 if (rc > 0) {
957 bool terminate = (mode == GACC_STORE) && (idx > 0);
958
b3cefd6b
JSG
959 if (rc == PGM_PROTECTION)
960 prot = PROT_TYPE_KEYC;
961 else
962 prot = PROT_NONE;
c783631b
JSG
963 rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
964 }
e613d834 965out_unlock:
8a242234 966 if (need_ipte_lock)
0130337e 967 ipte_unlock(vcpu->kvm);
7faa543d
JSG
968 if (nr_pages > ARRAY_SIZE(gpa_array))
969 vfree(gpas);
22938978
HC
970 return rc;
971}
972
973int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
92c96321 974 void *data, unsigned long len, enum gacc_mode mode)
22938978 975{
416e7f0c
JSG
976 unsigned int fragment_len;
977 unsigned long gpa;
22938978
HC
978 int rc = 0;
979
980 while (len && !rc) {
981 gpa = kvm_s390_real_to_abs(vcpu, gra);
416e7f0c 982 fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
bad13799 983 rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len);
416e7f0c
JSG
984 len -= fragment_len;
985 gra += fragment_len;
986 data += fragment_len;
22938978
HC
987 }
988 return rc;
989}
f8232c8c 990
3fd49805
JSG
991/**
992 * cmpxchg_guest_abs_with_key() - Perform cmpxchg on guest absolute address.
993 * @kvm: Virtual machine instance.
994 * @gpa: Absolute guest address of the location to be changed.
995 * @len: Operand length of the cmpxchg, required: 1 <= len <= 16. Providing a
996 * non power of two will result in failure.
997 * @old_addr: Pointer to old value. If the location at @gpa contains this value,
998 * the exchange will succeed. After calling cmpxchg_guest_abs_with_key()
999 * *@old_addr contains the value at @gpa before the attempt to
1000 * exchange the value.
1001 * @new: The value to place at @gpa.
1002 * @access_key: The access key to use for the guest access.
1003 * @success: output value indicating if an exchange occurred.
1004 *
1005 * Atomically exchange the value at @gpa by @new, if it contains *@old.
1006 * Honors storage keys.
1007 *
1008 * Return: * 0: successful exchange
1009 * * >0: a program interruption code indicating the reason cmpxchg could
1010 * not be attempted
1011 * * -EINVAL: address misaligned or len not power of two
1012 * * -EAGAIN: transient failure (len 1 or 2)
1013 * * -EOPNOTSUPP: read-only memslot (should never occur)
1014 */
1015int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len,
1016 __uint128_t *old_addr, __uint128_t new,
1017 u8 access_key, bool *success)
1018{
1019 gfn_t gfn = gpa_to_gfn(gpa);
1020 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1021 bool writable;
1022 hva_t hva;
1023 int ret;
1024
1025 if (!IS_ALIGNED(gpa, len))
1026 return -EINVAL;
1027
1028 hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
1029 if (kvm_is_error_hva(hva))
1030 return PGM_ADDRESSING;
1031 /*
1032 * Check if it's a read-only memslot, even though that cannot occur
1033 * since those are unsupported.
1034 * Don't try to actually handle that case.
1035 */
1036 if (!writable)
1037 return -EOPNOTSUPP;
1038
1039 hva += offset_in_page(gpa);
1040 /*
1041 * The cmpxchg_user_key macro depends on the type of "old", so we need
1042 * a case for each valid length and get some code duplication as long
1043 * as we don't introduce a new macro.
1044 */
1045 switch (len) {
1046 case 1: {
1047 u8 old;
1048
1049 ret = cmpxchg_user_key((u8 __user *)hva, &old, *old_addr, new, access_key);
1050 *success = !ret && old == *old_addr;
1051 *old_addr = old;
1052 break;
1053 }
1054 case 2: {
1055 u16 old;
1056
1057 ret = cmpxchg_user_key((u16 __user *)hva, &old, *old_addr, new, access_key);
1058 *success = !ret && old == *old_addr;
1059 *old_addr = old;
1060 break;
1061 }
1062 case 4: {
1063 u32 old;
1064
1065 ret = cmpxchg_user_key((u32 __user *)hva, &old, *old_addr, new, access_key);
1066 *success = !ret && old == *old_addr;
1067 *old_addr = old;
1068 break;
1069 }
1070 case 8: {
1071 u64 old;
1072
1073 ret = cmpxchg_user_key((u64 __user *)hva, &old, *old_addr, new, access_key);
1074 *success = !ret && old == *old_addr;
1075 *old_addr = old;
1076 break;
1077 }
1078 case 16: {
1079 __uint128_t old;
1080
1081 ret = cmpxchg_user_key((__uint128_t __user *)hva, &old, *old_addr, new, access_key);
1082 *success = !ret && old == *old_addr;
1083 *old_addr = old;
1084 break;
1085 }
1086 default:
1087 return -EINVAL;
1088 }
1089 if (*success)
1090 mark_page_dirty_in_slot(kvm, slot, gfn);
1091 /*
1092 * Assume that the fault is caused by protection, either key protection
1093 * or user page write protection.
1094 */
1095 if (ret == -EFAULT)
1096 ret = PGM_PROTECTION;
1097 return ret;
1098}
1099
9fbc0276 1100/**
e613d834 1101 * guest_translate_address_with_key - translate guest logical into guest absolute address
25b5476a
JF
1102 * @vcpu: virtual cpu
1103 * @gva: Guest virtual address
1104 * @ar: Access register
1105 * @gpa: Guest physical address
1106 * @mode: Translation access mode
e613d834 1107 * @access_key: access key to mach the storage key with
9fbc0276
TH
1108 *
1109 * Parameter semantics are the same as the ones from guest_translate.
1110 * The memory contents at the guest address are not changed.
1111 *
1112 * Note: The IPTE lock is not taken during this function, so the caller
1113 * has to take care of this.
1114 */
e613d834
JSG
1115int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
1116 unsigned long *gpa, enum gacc_mode mode,
1117 u8 access_key)
9fbc0276 1118{
9fbc0276
TH
1119 union asce asce;
1120 int rc;
1121
9fbc0276 1122 gva = kvm_s390_logical_to_effective(vcpu, gva);
6167375b 1123 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
664b4973
AY
1124 if (rc)
1125 return rc;
e613d834
JSG
1126 return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode,
1127 access_key);
1128}
1129
41408c28
TH
1130/**
1131 * check_gva_range - test a range of guest virtual addresses for accessibility
25b5476a
JF
1132 * @vcpu: virtual cpu
1133 * @gva: Guest virtual address
1134 * @ar: Access register
1135 * @length: Length of test range
1136 * @mode: Translation access mode
e613d834 1137 * @access_key: access key to mach the storage keys with
41408c28 1138 */
27f67f87 1139int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
e613d834 1140 unsigned long length, enum gacc_mode mode, u8 access_key)
41408c28 1141{
7faa543d 1142 union asce asce;
41408c28
TH
1143 int rc = 0;
1144
7faa543d
JSG
1145 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
1146 if (rc)
1147 return rc;
0130337e 1148 ipte_lock(vcpu->kvm);
e613d834
JSG
1149 rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode,
1150 access_key);
0130337e 1151 ipte_unlock(vcpu->kvm);
41408c28
TH
1152
1153 return rc;
1154}
1155
ef11c946
JSG
1156/**
1157 * check_gpa_range - test a range of guest physical addresses for accessibility
1158 * @kvm: virtual machine instance
1159 * @gpa: guest physical address
1160 * @length: length of test range
1161 * @mode: access mode to test, relevant for storage keys
1162 * @access_key: access key to mach the storage keys with
1163 */
1164int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length,
1165 enum gacc_mode mode, u8 access_key)
1166{
1167 unsigned int fragment_len;
1168 int rc = 0;
1169
1170 while (length && !rc) {
1171 fragment_len = min(PAGE_SIZE - offset_in_page(gpa), length);
1172 rc = vm_check_access_key(kvm, access_key, mode, gpa);
1173 length -= fragment_len;
1174 gpa += fragment_len;
1175 }
1176 return rc;
1177}
1178
f8232c8c 1179/**
dd9e5b7b 1180 * kvm_s390_check_low_addr_prot_real - check for low-address protection
25b5476a 1181 * @vcpu: virtual cpu
dd9e5b7b 1182 * @gra: Guest real address
f8232c8c
TH
1183 *
1184 * Checks whether an address is subject to low-address protection and set
1185 * up vcpu->arch.pgm accordingly if necessary.
1186 *
1187 * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
1188 */
dd9e5b7b 1189int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
f8232c8c 1190{
dd9e5b7b 1191 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
f8232c8c 1192
dd9e5b7b 1193 if (!ctlreg0.lap || !is_low_address(gra))
f8232c8c 1194 return 0;
3e3c67f6 1195 return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
f8232c8c 1196}
aa17aa57
MS
1197
1198/**
1199 * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
1200 * @sg: pointer to the shadow guest address space structure
1201 * @saddr: faulting address in the shadow gmap
5ac14bac
CI
1202 * @pgt: pointer to the beginning of the page table for the given address if
1203 * successful (return value 0), or to the first invalid DAT entry in
1204 * case of exceptions (return value > 0)
25b5476a 1205 * @dat_protection: referenced memory is write protected
fd8d4e3a 1206 * @fake: pgt references contiguous guest memory block, not a pgtable
aa17aa57
MS
1207 */
1208static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
fd8d4e3a
DH
1209 unsigned long *pgt, int *dat_protection,
1210 int *fake)
aa17aa57 1211{
c3235e2d 1212 struct kvm *kvm;
aa17aa57
MS
1213 struct gmap *parent;
1214 union asce asce;
1215 union vaddress vaddr;
1216 unsigned long ptr;
1217 int rc;
1218
fd8d4e3a 1219 *fake = 0;
1c65781b 1220 *dat_protection = 0;
c3235e2d 1221 kvm = sg->private;
aa17aa57
MS
1222 parent = sg->parent;
1223 vaddr.addr = saddr;
1224 asce.val = sg->orig_asce;
275d05ce 1225 ptr = asce.rsto * PAGE_SIZE;
3218f709
DH
1226 if (asce.r) {
1227 *fake = 1;
addb63c1 1228 ptr = 0;
3218f709
DH
1229 asce.dt = ASCE_TYPE_REGION1;
1230 }
aa17aa57
MS
1231 switch (asce.dt) {
1232 case ASCE_TYPE_REGION1:
addb63c1 1233 if (vaddr.rfx01 > asce.tl && !*fake)
aa17aa57
MS
1234 return PGM_REGION_FIRST_TRANS;
1235 break;
1236 case ASCE_TYPE_REGION2:
1237 if (vaddr.rfx)
1238 return PGM_ASCE_TYPE;
1239 if (vaddr.rsx01 > asce.tl)
1240 return PGM_REGION_SECOND_TRANS;
1241 break;
1242 case ASCE_TYPE_REGION3:
1243 if (vaddr.rfx || vaddr.rsx)
1244 return PGM_ASCE_TYPE;
1245 if (vaddr.rtx01 > asce.tl)
1246 return PGM_REGION_THIRD_TRANS;
1247 break;
1248 case ASCE_TYPE_SEGMENT:
1249 if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
1250 return PGM_ASCE_TYPE;
1251 if (vaddr.sx01 > asce.tl)
1252 return PGM_SEGMENT_TRANSLATION;
1253 break;
1254 }
1255
1256 switch (asce.dt) {
1257 case ASCE_TYPE_REGION1: {
1258 union region1_table_entry rfte;
1259
3218f709 1260 if (*fake) {
58cdf5eb 1261 ptr += vaddr.rfx * _REGION1_SIZE;
3218f709
DH
1262 rfte.val = ptr;
1263 goto shadow_r2t;
1264 }
5ac14bac 1265 *pgt = ptr + vaddr.rfx * 8;
aa17aa57
MS
1266 rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
1267 if (rc)
1268 return rc;
1269 if (rfte.i)
1270 return PGM_REGION_FIRST_TRANS;
1271 if (rfte.tt != TABLE_TYPE_REGION1)
1272 return PGM_TRANSLATION_SPEC;
1273 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
1274 return PGM_REGION_SECOND_TRANS;
1c65781b
DH
1275 if (sg->edat_level >= 1)
1276 *dat_protection |= rfte.p;
58cdf5eb 1277 ptr = rfte.rto * PAGE_SIZE;
3218f709
DH
1278shadow_r2t:
1279 rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
aa17aa57
MS
1280 if (rc)
1281 return rc;
c3235e2d 1282 kvm->stat.gmap_shadow_r1_entry++;
3b684a42
JP
1283 }
1284 fallthrough;
aa17aa57
MS
1285 case ASCE_TYPE_REGION2: {
1286 union region2_table_entry rste;
1287
3218f709 1288 if (*fake) {
58cdf5eb 1289 ptr += vaddr.rsx * _REGION2_SIZE;
3218f709
DH
1290 rste.val = ptr;
1291 goto shadow_r3t;
1292 }
5ac14bac 1293 *pgt = ptr + vaddr.rsx * 8;
aa17aa57
MS
1294 rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
1295 if (rc)
1296 return rc;
1297 if (rste.i)
1298 return PGM_REGION_SECOND_TRANS;
1299 if (rste.tt != TABLE_TYPE_REGION2)
1300 return PGM_TRANSLATION_SPEC;
1301 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
1302 return PGM_REGION_THIRD_TRANS;
1c65781b
DH
1303 if (sg->edat_level >= 1)
1304 *dat_protection |= rste.p;
58cdf5eb 1305 ptr = rste.rto * PAGE_SIZE;
3218f709 1306shadow_r3t:
1c65781b 1307 rste.p |= *dat_protection;
3218f709 1308 rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
aa17aa57
MS
1309 if (rc)
1310 return rc;
c3235e2d 1311 kvm->stat.gmap_shadow_r2_entry++;
3b684a42
JP
1312 }
1313 fallthrough;
aa17aa57
MS
1314 case ASCE_TYPE_REGION3: {
1315 union region3_table_entry rtte;
1316
3218f709 1317 if (*fake) {
58cdf5eb 1318 ptr += vaddr.rtx * _REGION3_SIZE;
3218f709
DH
1319 rtte.val = ptr;
1320 goto shadow_sgt;
1321 }
5ac14bac 1322 *pgt = ptr + vaddr.rtx * 8;
aa17aa57
MS
1323 rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
1324 if (rc)
1325 return rc;
1326 if (rtte.i)
1327 return PGM_REGION_THIRD_TRANS;
1328 if (rtte.tt != TABLE_TYPE_REGION3)
1329 return PGM_TRANSLATION_SPEC;
18b89809
DH
1330 if (rtte.cr && asce.p && sg->edat_level >= 2)
1331 return PGM_TRANSLATION_SPEC;
1332 if (rtte.fc && sg->edat_level >= 2) {
1c65781b 1333 *dat_protection |= rtte.fc0.p;
18b89809 1334 *fake = 1;
58cdf5eb 1335 ptr = rtte.fc1.rfaa * _REGION3_SIZE;
18b89809 1336 rtte.val = ptr;
18b89809
DH
1337 goto shadow_sgt;
1338 }
aa17aa57
MS
1339 if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
1340 return PGM_SEGMENT_TRANSLATION;
1c65781b
DH
1341 if (sg->edat_level >= 1)
1342 *dat_protection |= rtte.fc0.p;
58cdf5eb 1343 ptr = rtte.fc0.sto * PAGE_SIZE;
18b89809 1344shadow_sgt:
1c65781b 1345 rtte.fc0.p |= *dat_protection;
18b89809 1346 rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
aa17aa57
MS
1347 if (rc)
1348 return rc;
c3235e2d 1349 kvm->stat.gmap_shadow_r3_entry++;
3b684a42
JP
1350 }
1351 fallthrough;
aa17aa57
MS
1352 case ASCE_TYPE_SEGMENT: {
1353 union segment_table_entry ste;
1354
18b89809 1355 if (*fake) {
58cdf5eb 1356 ptr += vaddr.sx * _SEGMENT_SIZE;
18b89809
DH
1357 ste.val = ptr;
1358 goto shadow_pgt;
1359 }
5ac14bac 1360 *pgt = ptr + vaddr.sx * 8;
aa17aa57
MS
1361 rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
1362 if (rc)
1363 return rc;
1364 if (ste.i)
1365 return PGM_SEGMENT_TRANSLATION;
1366 if (ste.tt != TABLE_TYPE_SEGMENT)
1367 return PGM_TRANSLATION_SPEC;
1368 if (ste.cs && asce.p)
1369 return PGM_TRANSLATION_SPEC;
1c65781b 1370 *dat_protection |= ste.fc0.p;
fd8d4e3a 1371 if (ste.fc && sg->edat_level >= 1) {
fd8d4e3a 1372 *fake = 1;
58cdf5eb 1373 ptr = ste.fc1.sfaa * _SEGMENT_SIZE;
fd8d4e3a 1374 ste.val = ptr;
fd8d4e3a
DH
1375 goto shadow_pgt;
1376 }
58cdf5eb 1377 ptr = ste.fc0.pto * (PAGE_SIZE / 2);
fd8d4e3a 1378shadow_pgt:
1c65781b 1379 ste.fc0.p |= *dat_protection;
fd8d4e3a 1380 rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
aa17aa57
MS
1381 if (rc)
1382 return rc;
c3235e2d 1383 kvm->stat.gmap_shadow_sg_entry++;
aa17aa57
MS
1384 }
1385 }
1386 /* Return the parent address of the page table */
1387 *pgt = ptr;
1388 return 0;
1389}
1390
1391/**
1392 * kvm_s390_shadow_fault - handle fault on a shadow page table
f4debb40 1393 * @vcpu: virtual cpu
aa17aa57
MS
1394 * @sg: pointer to the shadow guest address space structure
1395 * @saddr: faulting address in the shadow gmap
5ac14bac
CI
1396 * @datptr: will contain the address of the faulting DAT table entry, or of
1397 * the valid leaf, plus some flags
aa17aa57
MS
1398 *
1399 * Returns: - 0 if the shadow fault was successfully resolved
1400 * - > 0 (pgm exception code) on exceptions while faulting
1401 * - -EAGAIN if the caller can retry immediately
1402 * - -EFAULT when accessing invalid guest addresses
1403 * - -ENOMEM if out of memory
1404 */
f4debb40 1405int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
5ac14bac 1406 unsigned long saddr, unsigned long *datptr)
aa17aa57
MS
1407{
1408 union vaddress vaddr;
1409 union page_table_entry pte;
5ac14bac 1410 unsigned long pgt = 0;
fd8d4e3a 1411 int dat_protection, fake;
aa17aa57
MS
1412 int rc;
1413
d8ed45c5 1414 mmap_read_lock(sg->mm);
f4debb40
DH
1415 /*
1416 * We don't want any guest-2 tables to change - so the parent
1417 * tables/pointers we read stay valid - unshadowing is however
1418 * always possible - only guest_table_lock protects us.
1419 */
0130337e 1420 ipte_lock(vcpu->kvm);
e52f8b61 1421
fd8d4e3a 1422 rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
e52f8b61 1423 if (rc)
fd8d4e3a
DH
1424 rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
1425 &fake);
aa17aa57
MS
1426
1427 vaddr.addr = saddr;
fd8d4e3a 1428 if (fake) {
58cdf5eb 1429 pte.val = pgt + vaddr.px * PAGE_SIZE;
fd8d4e3a
DH
1430 goto shadow_page;
1431 }
5ac14bac
CI
1432
1433 switch (rc) {
1434 case PGM_SEGMENT_TRANSLATION:
1435 case PGM_REGION_THIRD_TRANS:
1436 case PGM_REGION_SECOND_TRANS:
1437 case PGM_REGION_FIRST_TRANS:
1438 pgt |= PEI_NOT_PTE;
1439 break;
1440 case 0:
1441 pgt += vaddr.px * 8;
1442 rc = gmap_read_table(sg->parent, pgt, &pte.val);
1443 }
1444 if (datptr)
1445 *datptr = pgt | dat_protection * PEI_DAT_PROT;
e52f8b61
DH
1446 if (!rc && pte.i)
1447 rc = PGM_PAGE_TRANSLATION;
232b8e3b 1448 if (!rc && pte.z)
e52f8b61 1449 rc = PGM_TRANSLATION_SPEC;
fd8d4e3a 1450shadow_page:
00fc062d 1451 pte.p |= dat_protection;
e52f8b61
DH
1452 if (!rc)
1453 rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
c3235e2d 1454 vcpu->kvm->stat.gmap_shadow_pg_entry++;
0130337e 1455 ipte_unlock(vcpu->kvm);
d8ed45c5 1456 mmap_read_unlock(sg->mm);
e52f8b61 1457 return rc;
aa17aa57 1458}