diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 09fde8c5b6188c5686d2588e09202973c9897cc3..ab6cbb944b36566129515cf4aa0ad92b08f0c284 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1657,7 +1657,10 @@ config ARM64_BTI_KERNEL depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 depends on !CC_IS_GCC || GCC_VERSION >= 100100 - depends on !(CC_IS_CLANG && GCOV_KERNEL) + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671 + depends on !CC_IS_GCC + # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9 + depends on !CC_IS_CLANG || CLANG_VERSION >= 120000 depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help Build the kernel with Branch Target Identification annotations diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index e6c1c94c8d69c5e4be895b4d91d7ec38c3c37f22..07737b65d7a3d9de28fbb61e0b09d34182426458 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts @@ -87,3 +87,8 @@ h1_int_od_l: h1-int-od-l { }; }; }; + +&wlan_host_wake_l { + /* Kevin has an external pull up, but Bob does not. */ + rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi index 1384dabbdf4067b38af92ccf017f6afc9c6fec11..739937f70f8d0a7d0eb43b304c5f2c52fa268180 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi @@ -237,6 +237,14 @@ &cdn_dp { &edp { status = "okay"; + /* + * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only + * set this here, because rk3399-gru.dtsi ensures we can generate this + * off GPLL=600MHz, whereas some other RK3399 boards may not. + */ + assigned-clocks = <&cru PCLK_EDP>; + assigned-clock-rates = <24000000>; + ports { edp_out: port@1 { reg = <1>; @@ -395,6 +403,7 @@ wifi_perst_l: wifi-perst-l { }; wlan_host_wake_l: wlan-host-wake-l { + /* Kevin has an external pull up, but Bob does not */ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 544110aaffc569b183a61820a7fcd895f4a29a46..95bc7a5f61dd5b50c2b8614569f24fce0b3ca9fc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -102,7 +102,6 @@ vcc3v3_sys: vcc3v3-sys { vcc5v0_host: vcc5v0-host-regulator { compatible = "regulator-fixed"; gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>; - enable-active-low; pinctrl-names = "default"; pinctrl-0 = <&vcc5v0_host_en>; regulator-name = "vcc5v0_host"; diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index bc6841867b5122eef4ac3c49e21338b840ed5367..529c123cf0a47689a47b595c4ea0f282266e7fff 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -121,6 +121,8 @@ SYSCALL_DEFINE0(rt_sigreturn) if (restore_altstack(&frame->uc.uc_stack)) goto badframe; + regs->cause = -1UL; + return regs->a0; badframe: diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 37ba199a9bef4a34fa4175541f0f977b26d5db60..af4b4d3c6ff6462ccf9faa3fdd8d11b745c154c3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1278,6 +1278,7 @@ struct kvm_x86_ops { int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); + void (*guest_memory_reclaimed)(struct kvm *kvm); int (*get_msr_feature)(struct kvm_msr_entry *entry); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7397cc449e2fc4ceb5fa98ff02fe4f4822b2cefb..c2b34998c27dffc53512cd77b9a65afbee290384 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1177,6 +1177,14 @@ void sev_hardware_teardown(void) sev_flush_asids(); } +void sev_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!sev_guest(kvm)) + return; + + wbinvd_on_all_cpus(); +} + void pre_sev_run(struct vcpu_svm *svm, int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, cpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f3b7a6a82b0767e6db3fb04744b1708dacfb7168..b55af48f5419d8744e9832ecaae3d772c4086de5 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4327,6 +4327,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region, + .guest_memory_reclaimed = sev_guest_memory_reclaimed, .can_emulate_instruction = svm_can_emulate_instruction, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 10aba1dd264ed9ac8a6e3d4725e06015622bdc05..f62d13fc6e01fff0fef7ba65b4d10f4392ad9abc 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -491,6 +491,8 @@ int svm_register_enc_region(struct kvm *kvm, struct kvm_enc_region *range); int svm_unregister_enc_region(struct kvm *kvm, struct kvm_enc_region *range); +void sev_guest_memory_reclaimed(struct kvm *kvm); + void pre_sev_run(struct vcpu_svm *svm, int cpu); int __init sev_hardware_setup(void); void sev_hardware_teardown(void); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6aaffe27293737f8625738c7cf33c3ff5dc6375c..8854df5ed7350880e7d9d8fae12b53a107e1977c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8890,6 +8890,12 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); } +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ + if (kvm_x86_ops.guest_memory_reclaimed) + kvm_x86_ops.guest_memory_reclaimed(kvm); +} + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) { if (!lapic_in_kernel(vcpu)) diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c index 8563a392f30bfcfdbef84869736f64ed26c46808..dadab2feca080140db250a20ed9a2f9e8933602c 100644 --- a/drivers/dma/ti/k3-udma-private.c +++ b/drivers/dma/ti/k3-udma-private.c @@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) } pdev = of_find_device_by_node(udma_node); + if (np != udma_node) + of_node_put(udma_node); + if (!pdev) { pr_debug("UDMA device not found\n"); return ERR_PTR(-EPROBE_DEFER); } - if (np != udma_node) - of_node_put(udma_node); - ud = platform_get_drvdata(pdev); if (!ud) { pr_debug("UDMA has not been probed\n"); diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 5efc524b14bef535e274f5cf93bde867e3f1036c..a2be3a71bcf8e88a9e47269052b50ac56ef10023 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -19,7 +19,7 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode"; /* SHIM variables */ static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; -static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; +static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT"; /* * Determine whether we're in secure boot mode. @@ -53,8 +53,8 @@ enum efi_secureboot_mode efi_get_secureboot(void) /* * See if a user has put the shim into insecure mode. If so, and if the - * variable doesn't have the runtime attribute set, we might as well - * honor that. + * variable doesn't have the non-volatile attribute set, we might as + * well honor that. */ size = sizeof(moksbstate); status = get_efi_var(shim_MokSBState_name, &shim_guid, @@ -63,7 +63,7 @@ enum efi_secureboot_mode efi_get_secureboot(void) /* If it fails, we don't care why. Default to secure */ if (status != EFI_SUCCESS) goto secure_boot_enabled; - if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1) + if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1) return efi_secureboot_mode_disabled; secure_boot_enabled: diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 3672539cb96eb3043d66adb12122d0502e2ba554..5d0f1b1966fc6e48958b7f128da4e9b9307a85cd 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -414,6 +414,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, hdr->ramdisk_image = 0; hdr->ramdisk_size = 0; + /* + * Disregard any setup data that was provided by the bootloader: + * setup_data could be pointing anywhere, and we have no way of + * authenticating or validating the payload. + */ + hdr->setup_data = 0; + efi_stub_entry(handle, sys_table_arg, boot_params); /* not reached */ diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 780cba4e30d0e7badfbb6f025855e871b2142ce0..876027fdefc9543e4835839cbe2939a8671c8918 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -604,9 +604,9 @@ static int __init gpio_mockup_init(void) static void __exit gpio_mockup_exit(void) { + gpio_mockup_unregister_pdevs(); debugfs_remove_recursive(gpio_mockup_dbg_dir); platform_driver_unregister(&gpio_mockup_driver); - gpio_mockup_unregister_pdevs(); } module_init(gpio_mockup_init); diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 2613881a66e662beccd0b8b51f98133ea52114ca..381cfa26a4a1a0881a56616431af1c1ebbee4a5f 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -1769,7 +1769,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) ret = -ENODEV; goto out_free_le; } - le->irq = irq; if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? @@ -1783,7 +1782,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) init_waitqueue_head(&le->wait); /* Request a thread to read the events */ - ret = request_threaded_irq(le->irq, + ret = request_threaded_irq(irq, lineevent_irq_handler, lineevent_irq_thread, irqflags, @@ -1792,6 +1791,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) if (ret) goto out_free_le; + le->irq = irq; + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); if (fd < 0) { ret = fd; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f262c4e7a48a24809bacde91e95aeaf0bcdc4df4..f44ab44abd64961d32868cbfa0ba7b32f609dc03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2047,6 +2047,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); return r; } + + /*get pf2vf msg info at it's earliest time*/ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_init_data_exchange(adev); + } } @@ -2174,8 +2179,20 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) } adev->ip_blocks[i].status.sw = true; - /* need to do gmc hw init early so we can allocate gpu mem */ - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { + /* need to do common hw init early so everything is set up for gmc */ + r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); + if (r) { + DRM_ERROR("hw_init %d failed %d\n", i, r); + goto init_failed; + } + adev->ip_blocks[i].status.hw = true; + } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { + /* need to do gmc hw init early so we can allocate gpu mem */ + /* Try to reserve bad pages early */ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_exchange_data(adev); + r = amdgpu_device_vram_scratch_init(adev); if (r) { DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); @@ -2207,7 +2224,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) } if (amdgpu_sriov_vf(adev)) - amdgpu_virt_init_data_exchange(adev); + amdgpu_virt_exchange_data(adev); r = amdgpu_ib_pool_init(adev); if (r) { @@ -2753,8 +2770,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) int i, r; static enum amd_ip_block_type ip_order[] = { - AMD_IP_BLOCK_TYPE_GMC, AMD_IP_BLOCK_TYPE_COMMON, + AMD_IP_BLOCK_TYPE_GMC, AMD_IP_BLOCK_TYPE_PSP, AMD_IP_BLOCK_TYPE_IH, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e7678ba8fdcf8e0b7cd8cc4fb4921f260dbf2ef7..5217eadd72140b4b5c170219a9f32e474fb76f2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -580,17 +580,35 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) { - uint64_t bp_block_offset = 0; - uint32_t bp_block_size = 0; - struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; - adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; - if (adev->mman.fw_vram_usage_va != NULL) { + if (adev->bios != NULL) { adev->virt.vf2pf_update_interval_ms = 2000; + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + + amdgpu_virt_read_pf2vf_data(adev); + } + + if (adev->virt.vf2pf_update_interval_ms != 0) { + INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); + schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); + } +} + + +void amdgpu_virt_exchange_data(struct amdgpu_device *adev) +{ + uint64_t bp_block_offset = 0; + uint32_t bp_block_size = 0; + struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; + + if (adev->mman.fw_vram_usage_va != NULL) { + adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); @@ -615,14 +633,16 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) if (adev->virt.ras_init_done) amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); } - } + } else if (adev->bios != NULL) { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); - if (adev->virt.vf2pf_update_interval_ms != 0) { - INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); - schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); + amdgpu_virt_read_pf2vf_data(adev); } } + void amdgpu_detect_virtualization(struct amdgpu_device *adev) { uint32_t reg; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 8dd624c20f895ed8f5d57c17e85eb06e92a5fe0d..77b9d37bfa1b27a0689484d9861ca0232bacb76b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -271,6 +271,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); +void amdgpu_virt_exchange_data(struct amdgpu_device *adev); void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); void amdgpu_detect_virtualization(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 1f2e2460e121ee134a72d423baeea06184474750..a1a8e026b9fa60787da29be341e9c4c7fa6cf72c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1475,6 +1475,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) WREG32_SDMA(i, mmSDMA0_CNTL, temp); if (!amdgpu_sriov_vf(adev)) { + ring = &adev->sdma.instance[i].ring; + adev->nbio.funcs->sdma_doorbell_range(adev, i, + ring->use_doorbell, ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range); + /* unhalt engine */ temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL); temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 7212b9900e0abafdf8386235789ec6f25167384d..abd649285a22d8f9bcde9a2047711901989b2d71 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1332,25 +1332,6 @@ static int soc15_common_sw_fini(void *handle) return 0; } -static void soc15_doorbell_range_init(struct amdgpu_device *adev) -{ - int i; - struct amdgpu_ring *ring; - - /* sdma/ih doorbell range are programed by hypervisor */ - if (!amdgpu_sriov_vf(adev)) { - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; - adev->nbio.funcs->sdma_doorbell_range(adev, i, - ring->use_doorbell, ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range); - } - - adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, - adev->irq.ih.doorbell_index); - } -} - static int soc15_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1370,12 +1351,6 @@ static int soc15_common_hw_init(void *handle) /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); - /* HW doorbell routing policy: doorbell writing not - * in SDMA/IH/MM/ACV range will be routed to CP. So - * we need to init SDMA/IH/MM/ACV doorbell range prior - * to CP ip block init and ring test. - */ - soc15_doorbell_range_init(adev); return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 7d37d2a01e3cf924743d19768264d75f0c635825..b8c1a3c1c5170e39eb0888f0ab7730d11610839e 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -791,10 +791,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge, static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = { .attach = mtk_dsi_bridge_attach, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_disable = mtk_dsi_bridge_atomic_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_enable = mtk_dsi_bridge_atomic_enable, .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable, .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable, + .atomic_reset = drm_atomic_helper_bridge_reset, .mode_set = mtk_dsi_bridge_mode_set, }; diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index f6fae64861ce873bca8105e2dcc1038b81e6901c..27cc5f03611cbe91e0bf50a903296829ecf81782 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -20,13 +20,18 @@ void qcom_icc_pre_aggregate(struct icc_node *node) { size_t i; struct qcom_icc_node *qn; + struct qcom_icc_provider *qp; qn = node->data; + qp = to_qcom_provider(node->provider); for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { qn->sum_avg[i] = 0; qn->max_peak[i] = 0; } + + for (i = 0; i < qn->num_bcms; i++) + qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); } EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate); @@ -44,10 +49,8 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, { size_t i; struct qcom_icc_node *qn; - struct qcom_icc_provider *qp; qn = node->data; - qp = to_qcom_provider(node->provider); if (!tag) tag = QCOM_ICC_TAG_ALWAYS; @@ -67,9 +70,6 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, *agg_avg += avg_bw; *agg_peak = max_t(u32, *agg_peak, peak_bw); - for (i = 0; i < qn->num_bcms; i++) - qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); - return 0; } EXPORT_SYMBOL_GPL(qcom_icc_aggregate); diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c index c76b2c7f9b106dad1a2807bacbbbf939531de873..b936196c229c805223dd2ca264913f42202bde76 100644 --- a/drivers/interconnect/qcom/sm8150.c +++ b/drivers/interconnect/qcom/sm8150.c @@ -627,7 +627,6 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8150", .of_match_table = qnoc_of_match, - .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c index cc558fec74e38780de5d42b75ca412cee7a9bb7b..40820043c8d368748ae63cb795dfe9dd66a3e6d1 100644 --- a/drivers/interconnect/qcom/sm8250.c +++ b/drivers/interconnect/qcom/sm8250.c @@ -643,7 +643,6 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8250", .of_match_table = qnoc_of_match, - .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 93c60712a948ec0c5f15fa1a979340fb94147b9e..c48cf737b521d726df669d370e3ee44a2a9854e9 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -569,7 +569,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) { unsigned long fl_sagaw, sl_sagaw; - fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0); + fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0); sl_sagaw = cap_sagaw(iommu->cap); /* Second level only. */ diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index a2563c25408086d013c4a7a9306d913343aa1eda..2299d5cca8ffb6d418b88c2e6bfdc366dc3c31ef 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -512,7 +512,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb) if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; - if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc)) + if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc)) return -ENODEV; switch (fc_usb->udev->speed) { diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 0b09cdaaeb6c1e0ce2ea503073a0382f8e65d370..899768ed1688d51b4020cae2f57087f46c3bc243 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -932,15 +932,16 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, /* Erase init depends on CSD and SSR */ mmc_init_erase(card); - - /* - * Fetch switch information from card. - */ - err = mmc_read_switch(card); - if (err) - return err; } + /* + * Fetch switch information from card. Note, sd3_bus_mode can change if + * voltage switch outcome changes, so do this always. + */ + err = mmc_read_switch(card); + if (err) + return err; + /* * For SPI, enable CRC as appropriate. * This CRC enable is located AFTER the reading of the @@ -1089,26 +1090,15 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) && mmc_sd_card_using_v18(card) && host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) { - /* - * Re-read switch information in case it has changed since - * oldcard was initialized. - */ - if (oldcard) { - err = mmc_read_switch(card); - if (err) - goto free_card; - } - if (mmc_sd_card_using_v18(card)) { - if (mmc_host_set_uhs_voltage(host) || - mmc_sd_init_uhs_card(card)) { - v18_fixup_failed = true; - mmc_power_cycle(host, ocr); - if (!oldcard) - mmc_remove_card(card); - goto retry; - } - goto cont; + if (mmc_host_set_uhs_voltage(host) || + mmc_sd_init_uhs_card(card)) { + v18_fixup_failed = true; + mmc_power_cycle(host, ocr); + if (!oldcard) + mmc_remove_card(card); + goto retry; } + goto cont; } /* Initialization sequence for UHS-I cards */ diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 7cbaac238ff62da1bf1c41a470cc1da73768c75b..429950241de326a3113aabcd91aa0e95ba0de9f5 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -954,11 +954,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, u32 reg_ctrl, reg_id, reg_iflag1; int i; - if (unlikely(drop)) { - skb = ERR_PTR(-ENOBUFS); - goto mark_as_read; - } - mb = flexcan_get_mb(priv, n); if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { @@ -987,6 +982,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, reg_ctrl = priv->read(&mb->can_ctrl); } + if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + if (reg_ctrl & FLEXCAN_MB_CNT_EDL) skb = alloc_canfd_skb(offload->dev, &cfd); else diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 99983f7a0ce0bd7d04707fc436d175edf6ace927..8f6269e9f6a7c4d6013ba065559b4d2323f6dfc6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) { u32 head, tail; + /* underlying hardware might not allow access and/or always return + * 0 for the head/tail registers so just use the cached values + */ head = ring->next_to_clean; - tail = readl(ring->tail); + tail = ring->next_to_use; if (head != tail) return (head < tail) ? diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8801d093135c3e72ca22643a8fbc7bf896727e4b..a33149ee0ddcfc79fbe1ca8d73ad66b50f96912f 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -496,7 +496,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) static int ipvlan_process_outbound(struct sk_buff *skb) { - struct ethhdr *ethh = eth_hdr(skb); int ret = NET_XMIT_DROP; /* The ipvlan is a pseudo-L2 device, so the packets that we receive @@ -506,6 +505,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb) if (skb_mac_header_was_set(skb)) { /* In this mode we dont care about * multicast and broadcast traffic */ + struct ethhdr *ethh = eth_hdr(skb); + if (is_multicast_ether_addr(ethh->h_dest)) { pr_debug_ratelimited( "Dropped {multi|broad}cast of type=[%x]\n", @@ -590,7 +591,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) { const struct ipvl_dev *ipvlan = netdev_priv(dev); - struct ethhdr *eth = eth_hdr(skb); + struct ethhdr *eth = skb_eth_hdr(skb); struct ipvl_addr *addr; void *lyr3h; int addr_type; @@ -620,6 +621,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) return dev_forward_skb(ipvlan->phy_dev, skb); } else if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); ipvlan_skb_crossing_ns(skb, NULL); ipvlan_multicast_enqueue(ipvlan->port, skb, true); return NET_XMIT_SUCCESS; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 1465a92ea3fc9008bf67efff11a016455f5f7fe4..b26617026e831d41aa8070628a0fc27abf2745b7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -950,7 +950,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) offset %= 32; val = mt76_rr(dev, addr); - val >>= (tid % 32); + val >>= offset; if (offset > 20) { addr += 4; diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 602065bfc9bb8766e9c16038dd10fabc90f5ae89..b7872ad3e76225c8493878461f3322fd981b06df 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -295,20 +295,16 @@ static int atmel_config_rs485(struct uart_port *port, mode = atmel_uart_readl(port, ATMEL_US_MR); - /* Resetting serial mode to RS232 (0x0) */ - mode &= ~ATMEL_US_USMODE; - - port->rs485 = *rs485conf; - if (rs485conf->flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); - if (port->rs485.flags & SER_RS485_RX_DURING_TX) + if (rs485conf->flags & SER_RS485_RX_DURING_TX) atmel_port->tx_done_mask = ATMEL_US_TXRDY; else atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; atmel_uart_writel(port, ATMEL_US_TTGR, rs485conf->delay_rts_after_send); + mode &= ~ATMEL_US_USMODE; mode |= ATMEL_US_USMODE_RS485; } else { dev_dbg(port->dev, "Setting UART to RS232\n"); diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index c6fc14b169dacb79f5d6e3274162a2d136ed1822..e3a8b6c71aa1ddd5b71b49d59a5584ef0564a33b 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -1531,7 +1531,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, TRB_LEN(le32_to_cpu(trb->length)); if (priv_req->num_of_trb > 1 && - le32_to_cpu(trb->control) & TRB_SMM) + le32_to_cpu(trb->control) & TRB_SMM && + le32_to_cpu(trb->control) & TRB_CHAIN) transfer_end = true; cdns3_ep_inc_deq(priv_ep); @@ -1691,6 +1692,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) ep_cfg &= ~EP_CFG_ENABLE; writel(ep_cfg, &priv_dev->regs->ep_cfg); priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; + priv_ep->flags |= EP_UPDATE_EP_TRBADDR; } cdns3_transfer_completed(priv_dev, priv_ep); } else if (!(priv_ep->flags & EP_STALLED) && diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 53b3d77fba6a251b6a7ec3cbbba8d0dbf87557b8..f2a3c0b5b535db3bf62528f70fc90cc2ce8a3af6 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -5968,7 +5968,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) * * Return: The same as for usb_reset_and_verify_device(). * However, if a reset is already in progress (for instance, if a - * driver doesn't have pre_ or post_reset() callbacks, and while + * driver doesn't have pre_reset() or post_reset() callbacks, and while * being unbound or re-bound during the ongoing reset its disconnect() * or probe() routine tries to perform a second, nested reset), the * routine returns -EINPROGRESS. diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 5aae7504f78a149315bd6c8a324769882008bf1f..4a0eec17651186c24d3e27115eb51769ef13546b 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -114,8 +114,6 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) dwc->current_dr_role = mode; } -static int dwc3_core_soft_reset(struct dwc3 *dwc); - static void __dwc3_set_mode(struct work_struct *work) { struct dwc3 *dwc = work_to_dwc(work); @@ -265,7 +263,7 @@ u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) * dwc3_core_soft_reset - Issues core soft reset and PHY reset * @dwc: pointer to our context structure */ -static int dwc3_core_soft_reset(struct dwc3 *dwc) +int dwc3_core_soft_reset(struct dwc3 *dwc) { u32 reg; int retries = 1000; diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 79e1b82e5e057cf793da4ba843bd9c32ec581880..cbebe541f7e8fe9a37990c56bf3fc45f3bf6342b 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1010,6 +1010,7 @@ struct dwc3_scratchpad_array { * @tx_max_burst_prd: max periodic ESS transmit burst size * @hsphy_interface: "utmi" or "ulpi" * @connected: true when we're connected to a host, false otherwise + * @softconnect: true when gadget connect is called, false when disconnect runs * @delayed_status: true when gadget driver asks for delayed status * @ep0_bounced: true when we used bounce buffer * @ep0_expect_in: true when we expect a DATA IN transfer @@ -1218,6 +1219,7 @@ struct dwc3 { const char *hsphy_interface; unsigned connected:1; + unsigned softconnect:1; unsigned delayed_status:1; unsigned ep0_bounced:1; unsigned ep0_expect_in:1; @@ -1456,6 +1458,8 @@ bool dwc3_has_imod(struct dwc3 *dwc); int dwc3_event_buffers_setup(struct dwc3 *dwc); void dwc3_event_buffers_cleanup(struct dwc3 *dwc); +int dwc3_core_soft_reset(struct dwc3 *dwc); + #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) int dwc3_host_init(struct dwc3 *dwc); void dwc3_host_exit(struct dwc3 *dwc); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a2a10c05ef3fb048c41534b4f73d7acf589d9639..41ed2f6f8a8d019d1c68b5b62788dfb63c8a3358 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2120,14 +2120,42 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc); static void __dwc3_gadget_stop(struct dwc3 *dwc); static int __dwc3_gadget_start(struct dwc3 *dwc); +static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc) +{ + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + dwc->connected = false; + + /* + * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a + * Section 4.1.8 Table 4-7, it states that for a device-initiated + * disconnect, the SW needs to ensure that it sends "a DEPENDXFER + * command for any active transfers" before clearing the RunStop + * bit. + */ + dwc3_stop_active_transfers(dwc); + __dwc3_gadget_stop(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + + /* + * Note: if the GEVNTCOUNT indicates events in the event buffer, the + * driver needs to acknowledge them before the controller can halt. + * Simply let the interrupt handler acknowledges and handle the + * remaining event generated by the controller while polling for + * DSTS.DEVCTLHLT. + */ + return dwc3_gadget_run_stop(dwc, false, false); +} + static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) { struct dwc3 *dwc = gadget_to_dwc(g); - unsigned long flags; int ret; is_on = !!is_on; + dwc->softconnect = is_on; /* * Per databook, when we want to stop the gadget, if a control transfer * is still in process, complete it and get the core into setup phase. @@ -2163,50 +2191,27 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) return 0; } - /* - * Synchronize and disable any further event handling while controller - * is being enabled/disabled. - */ - disable_irq(dwc->irq_gadget); - - spin_lock_irqsave(&dwc->lock, flags); + if (dwc->pullups_connected == is_on) { + pm_runtime_put(dwc->dev); + return 0; + } if (!is_on) { - u32 count; - - dwc->connected = false; + ret = dwc3_gadget_soft_disconnect(dwc); + } else { /* - * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a - * Section 4.1.8 Table 4-7, it states that for a device-initiated - * disconnect, the SW needs to ensure that it sends "a DEPENDXFER - * command for any active transfers" before clearing the RunStop - * bit. + * In the Synopsys DWC_usb31 1.90a programming guide section + * 4.1.9, it specifies that for a reconnect after a + * device-initiated disconnect requires a core soft reset + * (DCTL.CSftRst) before enabling the run/stop bit. */ - dwc3_stop_active_transfers(dwc); - __dwc3_gadget_stop(dwc); + dwc3_core_soft_reset(dwc); - /* - * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a - * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the - * "software needs to acknowledge the events that are generated - * (by writing to GEVNTCOUNTn) while it is waiting for this bit - * to be set to '1'." - */ - count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); - count &= DWC3_GEVNTCOUNT_MASK; - if (count > 0) { - dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); - dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) % - dwc->ev_buf->length; - } - } else { + dwc3_event_buffers_setup(dwc); __dwc3_gadget_start(dwc); + ret = dwc3_gadget_run_stop(dwc, true, false); } - ret = dwc3_gadget_run_stop(dwc, is_on, false); - spin_unlock_irqrestore(&dwc->lock, flags); - enable_irq(dwc->irq_gadget); - pm_runtime_put(dwc->dev); return ret; @@ -4048,7 +4053,7 @@ int dwc3_gadget_resume(struct dwc3 *dwc) { int ret; - if (!dwc->gadget_driver) + if (!dwc->gadget_driver || !dwc->softconnect) return 0; ret = __dwc3_gadget_start(dwc); diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index 8950d1f10a7fb82d866f2d96d0f1a3f67ff1caf5..4a7b200674eacb457a52a3c8aedc20092065995b 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -25,6 +25,13 @@ */ #define TT_MICROFRAMES_MAX 9 +/* schedule error type */ +#define ESCH_SS_Y6 1001 +#define ESCH_SS_OVERLAP 1002 +#define ESCH_CS_OVERFLOW 1003 +#define ESCH_BW_OVERFLOW 1004 +#define ESCH_FIXME 1005 + /* mtk scheduler bitmasks */ #define EP_BPKTS(p) ((p) & 0x7f) #define EP_BCSCOUNT(p) (((p) & 0x7) << 8) @@ -32,6 +39,24 @@ #define EP_BOFFSET(p) ((p) & 0x3fff) #define EP_BREPEAT(p) (((p) & 0x7fff) << 16) +static char *sch_error_string(int err_num) +{ + switch (err_num) { + case ESCH_SS_Y6: + return "Can't schedule Start-Split in Y6"; + case ESCH_SS_OVERLAP: + return "Can't find a suitable Start-Split location"; + case ESCH_CS_OVERFLOW: + return "The last Complete-Split is greater than 7"; + case ESCH_BW_OVERFLOW: + return "Bandwidth exceeds the maximum limit"; + case ESCH_FIXME: + return "FIXME, to be resolved"; + default: + return "Unknown"; + } +} + static int is_fs_or_ls(enum usb_device_speed speed) { return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW; @@ -375,7 +400,6 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, sch_ep->bw_budget_table[j]; } } - sch_ep->allocated = used; } static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) @@ -384,19 +408,20 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) u32 num_esit, tmp; int base; int i, j; + u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX); num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; + + if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP) + offset++; + for (i = 0; i < num_esit; i++) { base = offset + i * sch_ep->esit; - /* - * Compared with hs bus, no matter what ep type, - * the hub will always delay one uframe to send data - */ - for (j = 0; j < sch_ep->cs_count; j++) { + for (j = 0; j < uframes; j++) { tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe; if (tmp > FS_PAYLOAD_MAX) - return -ERANGE; + return -ESCH_BW_OVERFLOW; } } @@ -406,15 +431,11 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) static int check_sch_tt(struct usb_device *udev, struct mu3h_sch_ep_info *sch_ep, u32 offset) { - struct mu3h_sch_tt *tt = sch_ep->sch_tt; u32 extra_cs_count; - u32 fs_budget_start; u32 start_ss, last_ss; u32 start_cs, last_cs; - int i; start_ss = offset % 8; - fs_budget_start = (start_ss + 1) % 8; if (sch_ep->ep_type == ISOC_OUT_EP) { last_ss = start_ss + sch_ep->cs_count - 1; @@ -424,11 +445,7 @@ static int check_sch_tt(struct usb_device *udev, * must never schedule Start-Split in Y6 */ if (!(start_ss == 7 || last_ss < 6)) - return -ERANGE; - - for (i = 0; i < sch_ep->cs_count; i++) - if (test_bit(offset + i, tt->ss_bit_map)) - return -ERANGE; + return -ESCH_SS_Y6; } else { u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX); @@ -438,29 +455,24 @@ static int check_sch_tt(struct usb_device *udev, * must never schedule Start-Split in Y6 */ if (start_ss == 6) - return -ERANGE; + return -ESCH_SS_Y6; /* one uframe for ss + one uframe for idle */ start_cs = (start_ss + 2) % 8; last_cs = start_cs + cs_count - 1; if (last_cs > 7) - return -ERANGE; + return -ESCH_CS_OVERFLOW; if (sch_ep->ep_type == ISOC_IN_EP) extra_cs_count = (last_cs == 7) ? 1 : 2; else /* ep_type : INTR IN / INTR OUT */ - extra_cs_count = (fs_budget_start == 6) ? 1 : 2; + extra_cs_count = 1; cs_count += extra_cs_count; if (cs_count > 7) cs_count = 7; /* HW limit */ - for (i = 0; i < cs_count + 2; i++) { - if (test_bit(offset + i, tt->ss_bit_map)) - return -ERANGE; - } - sch_ep->cs_count = cs_count; /* one for ss, the other for idle */ sch_ep->num_budget_microframes = cs_count + 2; @@ -482,28 +494,24 @@ static void update_sch_tt(struct usb_device *udev, struct mu3h_sch_tt *tt = sch_ep->sch_tt; u32 base, num_esit; int bw_updated; - int bits; int i, j; + int offset = sch_ep->offset; + u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX); num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; - bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1; if (used) bw_updated = sch_ep->bw_cost_per_microframe; else bw_updated = -sch_ep->bw_cost_per_microframe; - for (i = 0; i < num_esit; i++) { - base = sch_ep->offset + i * sch_ep->esit; + if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP) + offset++; - for (j = 0; j < bits; j++) { - if (used) - set_bit(base + j, tt->ss_bit_map); - else - clear_bit(base + j, tt->ss_bit_map); - } + for (i = 0; i < num_esit; i++) { + base = offset + i * sch_ep->esit; - for (j = 0; j < sch_ep->cs_count; j++) + for (j = 0; j < uframes; j++) tt->fs_bus_bw[base + j] += bw_updated; } @@ -513,21 +521,46 @@ static void update_sch_tt(struct usb_device *udev, list_del(&sch_ep->tt_endpoint); } +static int load_ep_bw(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw, + struct mu3h_sch_ep_info *sch_ep, bool loaded) +{ + if (sch_ep->sch_tt) + update_sch_tt(udev, sch_ep, loaded); + + /* update bus bandwidth info */ + update_bus_bw(sch_bw, sch_ep, loaded); + sch_ep->allocated = loaded; + + return 0; +} + +static u32 get_esit_boundary(struct mu3h_sch_ep_info *sch_ep) +{ + u32 boundary = sch_ep->esit; + + if (sch_ep->sch_tt) { /* LS/FS with TT */ + /* tune for CS */ + if (sch_ep->ep_type != ISOC_OUT_EP) + boundary++; + else if (boundary > 1) /* normally esit >= 8 for FS/LS */ + boundary--; + } + + return boundary; +} + static int check_sch_bw(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) { u32 offset; - u32 esit; u32 min_bw; u32 min_index; u32 worst_bw; u32 bw_boundary; + u32 esit_boundary; u32 min_num_budget; u32 min_cs_count; - bool tt_offset_ok = false; - int ret; - - esit = sch_ep->esit; + int ret = 0; /* * Search through all possible schedule microframes. @@ -537,16 +570,15 @@ static int check_sch_bw(struct usb_device *udev, min_index = 0; min_cs_count = sch_ep->cs_count; min_num_budget = sch_ep->num_budget_microframes; - for (offset = 0; offset < esit; offset++) { - if (is_fs_or_ls(udev->speed)) { + esit_boundary = get_esit_boundary(sch_ep); + for (offset = 0; offset < sch_ep->esit; offset++) { + if (sch_ep->sch_tt) { ret = check_sch_tt(udev, sch_ep, offset); if (ret) continue; - else - tt_offset_ok = true; } - if ((offset + sch_ep->num_budget_microframes) > sch_ep->esit) + if ((offset + sch_ep->num_budget_microframes) > esit_boundary) break; worst_bw = get_max_bw(sch_bw, sch_ep, offset); @@ -569,35 +601,21 @@ static int check_sch_bw(struct usb_device *udev, /* check bandwidth */ if (min_bw > bw_boundary) - return -ERANGE; + return ret ? ret : -ESCH_BW_OVERFLOW; sch_ep->offset = min_index; sch_ep->cs_count = min_cs_count; sch_ep->num_budget_microframes = min_num_budget; - if (is_fs_or_ls(udev->speed)) { - /* all offset for tt is not ok*/ - if (!tt_offset_ok) - return -ERANGE; - - update_sch_tt(udev, sch_ep, 1); - } - - /* update bus bandwidth info */ - update_bus_bw(sch_bw, sch_ep, 1); - - return 0; + return load_ep_bw(udev, sch_bw, sch_ep, true); } static void destroy_sch_ep(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) { /* only release ep bw check passed by check_sch_bw() */ - if (sch_ep->allocated) { - update_bus_bw(sch_bw, sch_ep, 0); - if (sch_ep->sch_tt) - update_sch_tt(udev, sch_ep, 0); - } + if (sch_ep->allocated) + load_ep_bw(udev, sch_bw, sch_ep, false); if (sch_ep->sch_tt) drop_tt(udev); @@ -760,7 +778,8 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ret = check_sch_bw(udev, sch_bw, sch_ep); if (ret) { - xhci_err(xhci, "Not enough bandwidth!\n"); + xhci_err(xhci, "Not enough bandwidth! (%s)\n", + sch_error_string(-ret)); return -ENOSPC; } } diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index 2fc0568ba054e1c98cbdb5dda9294dae79415a2a..3e2c607b5d64c7dc32e1247fbbf638a462e5c8e5 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -20,14 +20,12 @@ #define XHCI_MTK_MAX_ESIT 64 /** - * @ss_bit_map: used to avoid start split microframes overlay * @fs_bus_bw: array to keep track of bandwidth already used for FS * @ep_list: Endpoints using this TT * @usb_tt: usb TT related * @tt_port: TT port number */ struct mu3h_sch_tt { - DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT); u32 fs_bus_bw[XHCI_MTK_MAX_ESIT]; struct list_head ep_list; struct usb_tt *usb_tt; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 211e03a204072f8b93d7756041bfeda20b54797f..eea3dd18a044c38338ea3206435573e0e405d4b0 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -256,6 +256,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EM060K 0x030b #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 +#define QUECTEL_PRODUCT_RM520N 0x0801 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 #define QUECTEL_PRODUCT_EC200T 0x6026 #define QUECTEL_PRODUCT_RM500K 0x7001 @@ -1138,6 +1139,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), .driver_info = NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */ + .driver_info = ZLP }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), @@ -1159,6 +1162,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), .driver_info = ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index acdef6fbb85e077450f0eba0ebd8aad2d8d9e7e7..80daa70e288b091c193bf3ccf2a5818374bf355e 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -83,8 +83,6 @@ enum { /* * Input Output Manager (IOM) PORT STATUS */ -#define IOM_PORT_STATUS_OFFSET 0x560 - #define IOM_PORT_STATUS_ACTIVITY_TYPE_MASK GENMASK(9, 6) #define IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT 6 #define IOM_PORT_STATUS_ACTIVITY_TYPE_USB 0x03 @@ -144,6 +142,7 @@ struct pmc_usb { struct pmc_usb_port *port; struct acpi_device *iom_adev; void __iomem *iom_base; + u32 iom_port_status_offset; }; static void update_port_status(struct pmc_usb_port *port) @@ -153,7 +152,8 @@ static void update_port_status(struct pmc_usb_port *port) /* SoC expects the USB Type-C port numbers to start with 0 */ port_num = port->usb3_port - 1; - port->iom_status = readl(port->pmc->iom_base + IOM_PORT_STATUS_OFFSET + + port->iom_status = readl(port->pmc->iom_base + + port->pmc->iom_port_status_offset + port_num * sizeof(u32)); } @@ -541,19 +541,42 @@ static int pmc_usb_register_port(struct pmc_usb *pmc, int index, static int is_memory(struct acpi_resource *res, void *data) { - struct resource r; + struct resource_win win = {}; + struct resource *r = &win.res; - return !acpi_dev_resource_memory(res, &r); + return !(acpi_dev_resource_memory(res, r) || + acpi_dev_resource_address_space(res, &win)); } +/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */ +static const struct acpi_device_id iom_acpi_ids[] = { + /* TigerLake */ + { "INTC1072", 0x560, }, + + /* AlderLake */ + { "INTC1079", 0x160, }, + + /* Meteor Lake */ + { "INTC107A", 0x160, }, + {} +}; + static int pmc_usb_probe_iom(struct pmc_usb *pmc) { struct list_head resource_list; struct resource_entry *rentry; - struct acpi_device *adev; + static const struct acpi_device_id *dev_id; + struct acpi_device *adev = NULL; int ret; - adev = acpi_dev_get_first_match_dev("INTC1072", NULL, -1); + for (dev_id = &iom_acpi_ids[0]; dev_id->id[0]; dev_id++) { + if (acpi_dev_present(dev_id->id, NULL, -1)) { + pmc->iom_port_status_offset = (u32)dev_id->driver_data; + adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1); + break; + } + } + if (!adev) return -ENODEV; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index fbd438e9b9b033dbecd810793be95dc5e85ad5c4..cd5c8b49d763678e760d163c5c6c291c4b2563f7 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -98,6 +98,12 @@ struct vfio_dma { unsigned long *bitmap; }; +struct vfio_batch { + struct page **pages; /* for pin_user_pages_remote */ + struct page *fallback_page; /* if pages alloc fails */ + int capacity; /* length of pages array */ +}; + struct vfio_group { struct iommu_group *iommu_group; struct list_head next; @@ -428,6 +434,31 @@ static int put_pfn(unsigned long pfn, int prot) return 0; } +#define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *)) + +static void vfio_batch_init(struct vfio_batch *batch) +{ + if (unlikely(disable_hugepages)) + goto fallback; + + batch->pages = (struct page **) __get_free_page(GFP_KERNEL); + if (!batch->pages) + goto fallback; + + batch->capacity = VFIO_BATCH_MAX_CAPACITY; + return; + +fallback: + batch->pages = &batch->fallback_page; + batch->capacity = 1; +} + +static void vfio_batch_fini(struct vfio_batch *batch) +{ + if (batch->capacity == VFIO_BATCH_MAX_CAPACITY) + free_page((unsigned long)batch->pages); +} + static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, unsigned long vaddr, unsigned long *pfn, bool write_fault) @@ -464,10 +495,14 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, return ret; } -static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, - int prot, unsigned long *pfn) +/* + * Returns the positive number of pfns successfully obtained or a negative + * error code. + */ +static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, + long npages, int prot, unsigned long *pfn, + struct page **pages) { - struct page *page[1]; struct vm_area_struct *vma; unsigned int flags = 0; int ret; @@ -476,11 +511,22 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, flags |= FOLL_WRITE; mmap_read_lock(mm); - ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM, - page, NULL, NULL); - if (ret == 1) { - *pfn = page_to_pfn(page[0]); - ret = 0; + ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, + pages, NULL, NULL); + if (ret > 0) { + int i; + + /* + * The zero page is always resident, we don't need to pin it + * and it falls into our invalid/reserved test so we don't + * unpin in put_pfn(). Unpin all zero pages in the batch here. + */ + for (i = 0 ; i < ret; i++) { + if (unlikely(is_zero_pfn(page_to_pfn(pages[i])))) + unpin_user_page(pages[i]); + } + + *pfn = page_to_pfn(pages[0]); goto done; } @@ -494,8 +540,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, if (ret == -EAGAIN) goto retry; - if (!ret && !is_invalid_reserved_pfn(*pfn)) - ret = -EFAULT; + if (!ret) { + if (is_invalid_reserved_pfn(*pfn)) + ret = 1; + else + ret = -EFAULT; + } } done: mmap_read_unlock(mm); @@ -509,7 +559,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, */ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, long npage, unsigned long *pfn_base, - unsigned long limit) + unsigned long limit, struct vfio_batch *batch) { unsigned long pfn = 0; long ret, pinned = 0, lock_acct = 0; @@ -520,8 +570,9 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, if (!current->mm) return -ENODEV; - ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base); - if (ret) + ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, pfn_base, + batch->pages); + if (ret < 0) return ret; pinned++; @@ -547,8 +598,9 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, /* Lock all the consecutive pages from pfn_base */ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { - ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); - if (ret) + ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, &pfn, + batch->pages); + if (ret < 0) break; if (pfn != *pfn_base + pinned || @@ -574,7 +626,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, ret = vfio_lock_acct(dma, lock_acct, false); unpin_out: - if (ret) { + if (ret < 0) { if (!rsvd) { for (pfn = *pfn_base ; pinned ; pfn++, pinned--) put_pfn(pfn, dma->prot); @@ -610,6 +662,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, unsigned long *pfn_base, bool do_accounting) { + struct page *pages[1]; struct mm_struct *mm; int ret; @@ -617,8 +670,8 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, if (!mm) return -ENODEV; - ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); - if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { + ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); + if (ret == 1 && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { ret = vfio_lock_acct(dma, 1, true); if (ret) { put_pfn(*pfn_base, dma->prot); @@ -1263,15 +1316,19 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, { dma_addr_t iova = dma->iova; unsigned long vaddr = dma->vaddr; + struct vfio_batch batch; size_t size = map_size; long npage; unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret = 0; + vfio_batch_init(&batch); + while (size) { /* Pin a contiguous chunk of memory */ npage = vfio_pin_pages_remote(dma, vaddr + dma->size, - size >> PAGE_SHIFT, &pfn, limit); + size >> PAGE_SHIFT, &pfn, limit, + &batch); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1291,6 +1348,7 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, dma->size += npage << PAGE_SHIFT; } + vfio_batch_fini(&batch); dma->iommu_mapped = true; if (ret) @@ -1449,6 +1507,7 @@ static int vfio_bus_type(struct device *dev, void *data) static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) { + struct vfio_batch batch; struct vfio_domain *d = NULL; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; @@ -1459,6 +1518,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); + vfio_batch_init(&batch); + n = rb_first(&iommu->dma_list); for (; n; n = rb_next(n)) { @@ -1506,7 +1567,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, npage = vfio_pin_pages_remote(dma, vaddr, n >> PAGE_SHIFT, - &pfn, limit); + &pfn, limit, + &batch); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1539,6 +1601,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, dma->iommu_mapped = true; } + vfio_batch_fini(&batch); return 0; unwind: @@ -1579,6 +1642,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, } } + vfio_batch_fini(&batch); return ret; } diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index c667c63f2cb003cea791a158627b4bf76ca988c8..fa8aefe6b7ec312170d18e8a3767a77df4b6a19e 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -358,19 +358,36 @@ xfs_dinode_verify_fork( int whichfork) { uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork); + mode_t mode = be16_to_cpu(dip->di_mode); + uint32_t fork_size = XFS_DFORK_SIZE(dip, mp, whichfork); + uint32_t fork_format = XFS_DFORK_FORMAT(dip, whichfork); - switch (XFS_DFORK_FORMAT(dip, whichfork)) { + /* + * For fork types that can contain local data, check that the fork + * format matches the size of local data contained within the fork. + * + * For all types, check that when the size says the should be in extent + * or btree format, the inode isn't claiming it is in local format. + */ + if (whichfork == XFS_DATA_FORK) { + if (S_ISDIR(mode) || S_ISLNK(mode)) { + if (be64_to_cpu(dip->di_size) <= fork_size && + fork_format != XFS_DINODE_FMT_LOCAL) + return __this_address; + } + + if (be64_to_cpu(dip->di_size) > fork_size && + fork_format == XFS_DINODE_FMT_LOCAL) + return __this_address; + } + + switch (fork_format) { case XFS_DINODE_FMT_LOCAL: /* - * no local regular files yet + * No local regular files yet. */ - if (whichfork == XFS_DATA_FORK) { - if (S_ISREG(be16_to_cpu(dip->di_mode))) - return __this_address; - if (be64_to_cpu(dip->di_size) > - XFS_DFORK_SIZE(dip, mp, whichfork)) - return __this_address; - } + if (S_ISREG(mode) && whichfork == XFS_DATA_FORK) + return __this_address; if (di_nextents) return __this_address; break; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index c2818c318ce2d5c34b9c495b3f9031b47f62c3a9..b3ac7c98fe8cdd6aa41dffff2591ee839b7f672a 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2669,14 +2669,13 @@ xfs_ifree_cluster( } /* - * This is called to return an inode to the inode free list. - * The inode should already be truncated to 0 length and have - * no pages associated with it. This routine also assumes that - * the inode is already a part of the transaction. + * This is called to return an inode to the inode free list. The inode should + * already be truncated to 0 length and have no pages associated with it. This + * routine also assumes that the inode is already a part of the transaction. * - * The on-disk copy of the inode will have been added to the list - * of unlinked inodes in the AGI. We need to remove the inode from - * that list atomically with respect to freeing it here. + * The on-disk copy of the inode will have been added to the list of unlinked + * inodes in the AGI. We need to remove the inode from that list atomically with + * respect to freeing it here. */ int xfs_ifree( @@ -2694,13 +2693,16 @@ xfs_ifree( ASSERT(ip->i_d.di_nblocks == 0); /* - * Pull the on-disk inode from the AGI unlinked list. + * Free the inode first so that we guarantee that the AGI lock is going + * to be taken before we remove the inode from the unlinked list. This + * makes the AGI lock -> unlinked list modification order the same as + * used in O_TMPFILE creation. */ - error = xfs_iunlink_remove(tp, ip); + error = xfs_difree(tp, ip->i_ino, &xic); if (error) return error; - error = xfs_difree(tp, ip->i_ino, &xic); + error = xfs_iunlink_remove(tp, ip); if (error) return error; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 94871f12e53622e7900dc8db48dcc4cb69b6b5e7..896e563e2c181a03a0c36a774f3ee83e4abe36ad 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1489,6 +1489,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end); +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); + #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); #else diff --git a/mm/slub.c b/mm/slub.c index c86152366e730d36cd7caff808a44b40218e5b9d..a422f9471608e3e30291df474316bc717fa35156 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5562,7 +5562,8 @@ static char *create_unique_id(struct kmem_cache *s) char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); char *p = name; - BUG_ON(!name); + if (!name) + return ERR_PTR(-ENOMEM); *p++ = ':'; /* @@ -5620,6 +5621,8 @@ static int sysfs_slab_add(struct kmem_cache *s) * for the symlinks. */ name = create_unique_id(s); + if (IS_ERR(name)) + return PTR_ERR(name); } s->kobj.kset = kset; diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index b83dc9bf0a5dd00c5cf48f66da86593c718e7b89..78fd9122b70c7577a5c4b02db5142acce925565e 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, return ret; if (ret == 0) break; - dataoff += *matchoff; + dataoff = *matchoff; } *in_header = 0; } @@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, break; if (ret == 0) return ret; - dataoff += *matchoff; + dataoff = *matchoff; } if (in_header) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 600ea241ead79630bdfcd91da02be4259e401140..79b8d4258fd3b3f1824c6c6f21a0051f5e62d064 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2584,6 +2584,8 @@ static const struct pci_device_id azx_ids[] = { /* 5 Series/3400 */ { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, + { PCI_DEVICE(0x8086, 0x3b57), + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, /* Poulsbo */ { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE }, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 71e11481ba41c553d4c6860df329733da9678c2d..7551cdf3b4529585e18b3b0ef741c3f157cb1a82 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3839,6 +3839,7 @@ static int patch_tegra_hdmi(struct hda_codec *codec) if (err) return err; + codec->depop_delay = 10; codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; spec = codec->spec; spec->chmap.ops.chmap_cea_alloc_validate_get_type = diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 78f4f684a3c72f186c62235c1e3ed2361bccd1d7..574fe798d5125383a2af8c2db23da506d5f30910 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6824,6 +6824,8 @@ enum { ALC294_FIXUP_ASUS_GU502_HP, ALC294_FIXUP_ASUS_GU502_PINS, ALC294_FIXUP_ASUS_GU502_VERBS, + ALC294_FIXUP_ASUS_G513_PINS, + ALC285_FIXUP_ASUS_G533Z_PINS, ALC285_FIXUP_HP_GPIO_LED, ALC285_FIXUP_HP_MUTE_LED, ALC236_FIXUP_HP_GPIO_LED, @@ -8149,6 +8151,24 @@ static const struct hda_fixup alc269_fixups[] = { [ALC294_FIXUP_ASUS_GU502_HP] = { .type = HDA_FIXUP_FUNC, .v.func = alc294_fixup_gu502_hp, + }, + [ALC294_FIXUP_ASUS_G513_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11050 }, /* front HP mic */ + { 0x1a, 0x03a11c30 }, /* rear external mic */ + { 0x21, 0x03211420 }, /* front HP out */ + { } + }, + }, + [ALC285_FIXUP_ASUS_G533Z_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x90170120 }, + { } + }, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_G513_PINS, }, [ALC294_FIXUP_ASUS_COEF_1B] = { .type = HDA_FIXUP_VERBS, @@ -8754,6 +8774,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x087d, "Dell Precision 5530", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), @@ -8769,6 +8790,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -8912,10 +8934,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), + SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), + SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS), SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), - SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), @@ -8930,14 +8953,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), + SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS), SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), @@ -9134,6 +9159,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), + SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20), SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI), SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101), diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 578235291e92e0c358d93e6f6ca074e880424055..c4cce817a45223c0fdb933a1adde69ff0b7b087e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -159,6 +159,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, { } +__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ +} + bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) { /* @@ -340,6 +344,12 @@ void kvm_reload_remote_mmus(struct kvm *kvm) kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); } +static void kvm_flush_shadow_all(struct kvm *kvm) +{ + kvm_arch_flush_shadow_all(kvm); + kvm_arch_guest_memory_reclaimed(kvm); +} + #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, gfp_t gfp_flags) @@ -489,6 +499,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); + kvm_arch_guest_memory_reclaimed(kvm); srcu_read_unlock(&kvm->srcu, idx); return 0; @@ -592,7 +603,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, int idx; idx = srcu_read_lock(&kvm->srcu); - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); } @@ -896,7 +907,7 @@ static void kvm_destroy_vm(struct kvm *kvm) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); #else - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); @@ -1238,6 +1249,7 @@ static int kvm_set_memslot(struct kvm *kvm, * - kvm_is_visible_gfn (mmu_check_root) */ kvm_arch_flush_shadow_memslot(kvm, slot); + kvm_arch_guest_memory_reclaimed(kvm); } r = kvm_arch_prepare_memory_region(kvm, new, mem, change);