Skip to content

Commit 7d3f3e3

Browse files
committed
Merge tag 'v6.18.19' into 6.18-main
This is the 6.18.19 stable release
2 parents 05f37b9 + 4aea1dc commit 7d3f3e3

337 files changed

Lines changed: 3073 additions & 1604 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ maintainers:
1010
- Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
1111

1212
description:
13-
SM8650 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like
13+
SM8750 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like
1414
DPU display controller, DSI and DP interfaces etc.
1515

1616
$ref: /schemas/display/msm/mdss-common.yaml#

Documentation/virt/kvm/api.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8403,6 +8403,14 @@ KVM_X86_QUIRK_IGNORE_GUEST_PAT By default, on Intel platforms, KVM ignores
84038403
guest software, for example if it does not
84048404
expose a bochs graphics device (which is
84058405
known to have had a buggy driver).
8406+
8407+
KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM By default, KVM relaxes the consistency
8408+
check for GUEST_IA32_DEBUGCTL in vmcs12
8409+
to allow FREEZE_IN_SMM to be set. When
8410+
this quirk is disabled, KVM requires this
8411+
bit to be cleared. Note that the vmcs02
8412+
bit is still completely controlled by the
8413+
host, regardless of the quirk setting.
84068414
=================================== ============================================
84078415

84088416
7.32 KVM_CAP_MAX_VCPU_ID

Makefile

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# SPDX-License-Identifier: GPL-2.0
22
VERSION = 6
33
PATCHLEVEL = 18
4-
SUBLEVEL = 18
4+
SUBLEVEL = 19
55
EXTRAVERSION =
66
NAME = Baby Opossum Posse
77

@@ -473,6 +473,7 @@ KBUILD_USERLDFLAGS := $(USERLDFLAGS)
473473
export rust_common_flags := --edition=2021 \
474474
-Zbinary_dep_depinfo=y \
475475
-Astable_features \
476+
-Aunused_features \
476477
-Dnon_ascii_idents \
477478
-Dunsafe_op_in_unsafe_fn \
478479
-Wmissing_docs \
@@ -1440,13 +1441,13 @@ ifneq ($(wildcard $(resolve_btfids_O)),)
14401441
$(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
14411442
endif
14421443

1443-
PHONY += objtool_clean
1444+
PHONY += objtool_clean objtool_mrproper
14441445

14451446
objtool_O = $(abspath $(objtree))/tools/objtool
14461447

1447-
objtool_clean:
1448+
objtool_clean objtool_mrproper:
14481449
ifneq ($(wildcard $(objtool_O)),)
1449-
$(Q)$(MAKE) -sC $(abs_srctree)/tools/objtool O=$(objtool_O) srctree=$(abs_srctree) clean
1450+
$(Q)$(MAKE) -sC $(abs_srctree)/tools/objtool O=$(objtool_O) srctree=$(abs_srctree) $(patsubst objtool_%,%,$@)
14501451
endif
14511452

14521453
tools/: FORCE
@@ -1623,7 +1624,7 @@ PHONY += $(mrproper-dirs) mrproper
16231624
$(mrproper-dirs):
16241625
$(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
16251626

1626-
mrproper: clean $(mrproper-dirs)
1627+
mrproper: clean objtool_mrproper $(mrproper-dirs)
16271628
$(call cmd,rmfiles)
16281629
@find . $(RCS_FIND_IGNORE) \
16291630
\( -name '*.rmeta' \) \

arch/arm64/include/asm/pgtable-prot.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -50,11 +50,11 @@
5050

5151
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
5252

53-
#define _PAGE_KERNEL (PROT_NORMAL)
54-
#define _PAGE_KERNEL_RO ((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
55-
#define _PAGE_KERNEL_ROX ((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
56-
#define _PAGE_KERNEL_EXEC (PROT_NORMAL & ~PTE_PXN)
57-
#define _PAGE_KERNEL_EXEC_CONT ((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
53+
#define _PAGE_KERNEL (PROT_NORMAL | PTE_DIRTY)
54+
#define _PAGE_KERNEL_RO ((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY | PTE_DIRTY)
55+
#define _PAGE_KERNEL_ROX ((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY | PTE_DIRTY)
56+
#define _PAGE_KERNEL_EXEC ((PROT_NORMAL & ~PTE_PXN) | PTE_DIRTY)
57+
#define _PAGE_KERNEL_EXEC_CONT ((PROT_NORMAL & ~PTE_PXN) | PTE_CONT | PTE_DIRTY)
5858

5959
#define _PAGE_SHARED (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
6060
#define _PAGE_SHARED_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -516,7 +516,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
516516
granule = kvm_granule_size(level);
517517
cur.start = ALIGN_DOWN(addr, granule);
518518
cur.end = cur.start + granule;
519-
if (!range_included(&cur, range))
519+
if (!range_included(&cur, range) && level < KVM_PGTABLE_LAST_LEVEL)
520520
continue;
521521
*range = cur;
522522
return 0;

arch/arm64/kvm/mmu.c

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1712,14 +1712,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
17121712
}
17131713

17141714
/*
1715-
* Both the canonical IPA and fault IPA must be hugepage-aligned to
1716-
* ensure we find the right PFN and lay down the mapping in the right
1717-
* place.
1715+
* Both the canonical IPA and fault IPA must be aligned to the
1716+
* mapping size to ensure we find the right PFN and lay down the
1717+
* mapping in the right place.
17181718
*/
1719-
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) {
1720-
fault_ipa &= ~(vma_pagesize - 1);
1721-
ipa &= ~(vma_pagesize - 1);
1722-
}
1719+
fault_ipa = ALIGN_DOWN(fault_ipa, vma_pagesize);
1720+
ipa = ALIGN_DOWN(ipa, vma_pagesize);
17231721

17241722
gfn = ipa >> PAGE_SHIFT;
17251723
mte_allowed = kvm_vma_mte_allowed(vma);

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -140,26 +140,9 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
140140
goto out_unlock;
141141
}
142142

143-
kvm_for_each_vcpu(i, vcpu, kvm) {
144-
ret = vgic_allocate_private_irqs_locked(vcpu, type);
145-
if (ret)
146-
break;
147-
}
148-
149-
if (ret) {
150-
kvm_for_each_vcpu(i, vcpu, kvm) {
151-
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
152-
kfree(vgic_cpu->private_irqs);
153-
vgic_cpu->private_irqs = NULL;
154-
}
155-
156-
goto out_unlock;
157-
}
158-
159143
kvm->arch.vgic.in_kernel = true;
160144
kvm->arch.vgic.vgic_model = type;
161145
kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST;
162-
163146
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
164147

165148
aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
@@ -176,6 +159,23 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
176159
kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0);
177160
kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1);
178161

162+
kvm_for_each_vcpu(i, vcpu, kvm) {
163+
ret = vgic_allocate_private_irqs_locked(vcpu, type);
164+
if (ret)
165+
break;
166+
}
167+
168+
if (ret) {
169+
kvm_for_each_vcpu(i, vcpu, kvm) {
170+
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
171+
kfree(vgic_cpu->private_irqs);
172+
vgic_cpu->private_irqs = NULL;
173+
}
174+
175+
kvm->arch.vgic.vgic_model = 0;
176+
goto out_unlock;
177+
}
178+
179179
if (type == KVM_DEV_TYPE_ARM_VGIC_V3)
180180
kvm->arch.vgic.nassgicap = system_supports_direct_sgis();
181181

arch/arm64/mm/contpte.c

Lines changed: 49 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -581,6 +581,27 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
581581
}
582582
EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);
583583

584+
static bool contpte_all_subptes_match_access_flags(pte_t *ptep, pte_t entry)
585+
{
586+
pte_t *cont_ptep = contpte_align_down(ptep);
587+
/*
588+
* PFNs differ per sub-PTE. Match only bits consumed by
589+
* __ptep_set_access_flags(): AF, DIRTY and write permission.
590+
*/
591+
const pteval_t cmp_mask = PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY;
592+
pteval_t entry_cmp = pte_val(entry) & cmp_mask;
593+
int i;
594+
595+
for (i = 0; i < CONT_PTES; i++) {
596+
pteval_t pte_cmp = pte_val(__ptep_get(cont_ptep + i)) & cmp_mask;
597+
598+
if (pte_cmp != entry_cmp)
599+
return false;
600+
}
601+
602+
return true;
603+
}
604+
584605
int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
585606
unsigned long addr, pte_t *ptep,
586607
pte_t entry, int dirty)
@@ -590,13 +611,37 @@ int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
590611
int i;
591612

592613
/*
593-
* Gather the access/dirty bits for the contiguous range. If nothing has
594-
* changed, its a noop.
614+
* Check whether all sub-PTEs in the CONT block already match the
615+
* requested access flags/write permission, using raw per-PTE values
616+
* rather than the gathered ptep_get() view.
617+
*
618+
* __ptep_set_access_flags() can update AF, dirty and write
619+
* permission, but only to make the mapping more permissive.
620+
*
621+
* ptep_get() gathers AF/dirty state across the whole CONT block,
622+
* which is correct for a CPU with FEAT_HAFDBS. But page-table
623+
* walkers that evaluate each descriptor individually (e.g. a CPU
624+
* without DBM support, or an SMMU without HTTU, or with HA/HD
625+
* disabled in CD.TCR) can keep faulting on the target sub-PTE if
626+
* only a sibling has been updated. Gathering can therefore cause
627+
* false no-ops when only a sibling has been updated:
628+
* - write faults: target still has PTE_RDONLY (needs PTE_RDONLY cleared)
629+
* - read faults: target still lacks PTE_AF
630+
*
631+
* Per Arm ARM (DDI 0487) D8.7.1, any sub-PTE in a CONT range may
632+
* become the effective cached translation, so all entries must have
633+
* consistent attributes. Check the full CONT block before returning
634+
* no-op, and when any sub-PTE mismatches, proceed to update the whole
635+
* range.
595636
*/
596-
orig_pte = pte_mknoncont(ptep_get(ptep));
597-
if (pte_val(orig_pte) == pte_val(entry))
637+
if (contpte_all_subptes_match_access_flags(ptep, entry))
598638
return 0;
599639

640+
/*
641+
* Use raw target pte (not gathered) for write-bit unfold decision.
642+
*/
643+
orig_pte = pte_mknoncont(__ptep_get(ptep));
644+
600645
/*
601646
* We can fix up access/dirty bits without having to unfold the contig
602647
* range. But if the write bit is changing, we must unfold.

arch/arm64/mm/mmap.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,11 @@ pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
9191

9292
/* Short circuit GCS to avoid bloating the table. */
9393
if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) {
94-
prot = gcs_page_prot;
94+
/* Honour mprotect(PROT_NONE) on shadow stack mappings */
95+
if (vm_flags & VM_ACCESS_FLAGS)
96+
prot = gcs_page_prot;
97+
else
98+
prot = pgprot_val(protection_map[VM_NONE]);
9599
} else {
96100
prot = pgprot_val(protection_map[vm_flags &
97101
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);

arch/parisc/include/asm/pgtable.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ extern void __update_cache(pte_t pte);
8585
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
8686

8787
/* This is the size of the initially mapped kernel memory */
88-
#if defined(CONFIG_64BIT)
88+
#if defined(CONFIG_64BIT) || defined(CONFIG_KALLSYMS)
8989
#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
9090
#else
9191
#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */

0 commit comments

Comments
 (0)