Commit ba738b9215 for qemu.org
commit ba738b92156e1e03e5800d33e86fb1fe3a7d4258
Author: Richard Henderson <richard.henderson@linaro.org>
Date: Thu Feb 26 11:27:19 2026 +0000
target/arm: Add vq argument to kvm_arch_{get, put}_sve
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20260216034432.23912-11-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index b8aba20f02..ecb732a669 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -2118,16 +2118,15 @@ static int kvm_arch_put_fpsimd(CPUState *cs)
* code the slice index to zero for now as it's unlikely we'll need more than
* one slice for quite some time.
*/
-static int kvm_arch_put_sve(CPUState *cs)
+static int kvm_arch_put_sve(CPUState *cs, uint32_t vq)
{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
+ CPUARMState *env = cpu_env(cs);
uint64_t tmp[ARM_MAX_VQ * 2];
uint64_t *r;
int n, ret;
for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
- r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
+ r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], vq * 2);
ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
if (ret) {
return ret;
@@ -2135,8 +2134,7 @@ static int kvm_arch_put_sve(CPUState *cs)
}
for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
- r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
- DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+ r = sve_bswap64(tmp, &env->vfp.pregs[n].p[0], DIV_ROUND_UP(vq * 2, 8));
ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
if (ret) {
return ret;
@@ -2144,7 +2142,7 @@ static int kvm_arch_put_sve(CPUState *cs)
}
r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
- DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+ DIV_ROUND_UP(vq * 2, 8));
ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
if (ret) {
return ret;
@@ -2236,7 +2234,7 @@ int kvm_arch_put_registers(CPUState *cs, KvmPutState level, Error **errp)
}
if (cpu_isar_feature(aa64_sve, cpu)) {
- ret = kvm_arch_put_sve(cs);
+ ret = kvm_arch_put_sve(cs, cpu->sve_max_vq);
} else {
ret = kvm_arch_put_fpsimd(cs);
}
@@ -2302,10 +2300,9 @@ static int kvm_arch_get_fpsimd(CPUState *cs)
* code the slice index to zero for now as it's unlikely we'll need more than
* one slice for quite some time.
*/
-static int kvm_arch_get_sve(CPUState *cs)
+static int kvm_arch_get_sve(CPUState *cs, uint32_t vq)
{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
+ CPUARMState *env = cpu_env(cs);
uint64_t *r;
int n, ret;
@@ -2315,7 +2312,7 @@ static int kvm_arch_get_sve(CPUState *cs)
if (ret) {
return ret;
}
- sve_bswap64(r, r, cpu->sve_max_vq * 2);
+ sve_bswap64(r, r, vq * 2);
}
for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
@@ -2324,7 +2321,7 @@ static int kvm_arch_get_sve(CPUState *cs)
if (ret) {
return ret;
}
- sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+ sve_bswap64(r, r, DIV_ROUND_UP(vq * 2, 8));
}
r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
@@ -2332,7 +2329,7 @@ static int kvm_arch_get_sve(CPUState *cs)
if (ret) {
return ret;
}
- sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+ sve_bswap64(r, r, DIV_ROUND_UP(vq * 2, 8));
return 0;
}
@@ -2420,7 +2417,7 @@ int kvm_arch_get_registers(CPUState *cs, Error **errp)
}
if (cpu_isar_feature(aa64_sve, cpu)) {
- ret = kvm_arch_get_sve(cs);
+ ret = kvm_arch_get_sve(cs, cpu->sve_max_vq);
} else {
ret = kvm_arch_get_fpsimd(cs);
}