1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

Compare commits

...

30 Commits

Author SHA1 Message Date
Linus Torvalds
6fa9041b71 nfsd-6.18 fixes:
Address recently reported issues or issues found at the recent NFS
 bake-a-thon held in Raleigh, NC.
 
 Issues reported with v6.18-rc:
 - Address a kernel build issue
 - Reorder SEQUENCE processing to avoid spurious NFS4ERR_SEQ_MISORDERED
 
 Issues that need expedient stable backports:
 - Close a refcount leak exposure
 - Report support for NFSv4.2 CLONE correctly
 - Fix oops during COPY_NOTIFY processing
 - Prevent rare crash after XDR encoding failure
 - Prevent crash due to confused or malicious NFSv4.1 client
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEKLLlsBKG3yQ88j7+M2qzM29mf5cFAmkR+oIACgkQM2qzM29m
 f5dH4Q//cJWZs9TE6GpC0PbGU+vknUgfSZmRpopmzP2mGk5ECrS/BFGY+KScCbkn
 sRsUmG8iSaXyXUg0XNBh+NB3dWSadlbD9ww5FDQh+V1kV1O3HxvxaMGZ/y7lOjt5
 h29MDnkuRvLgk54vuQy34kpXLrYL4QT9KR6FZqqvtkTt/by+bOqAtdSkM2JcX1ZM
 7ggWUjB2Fyqz+/kryqVyYK2O7hwrdisDboe6/kn6eRpXqNW78RjQBOMbmqPFko00
 XQ6PcYeBzGgDHdc11JciyXa8uK14/17QuiFplHZJSYwqTWXPDPXF5rb1CHfjJECY
 grQWOcz82vXwu4H/4r/doPWTxIU/37O67+ERNqlCiu8ldZdb9oKIWPfshISWQoor
 k9UnoL5MCKiBibspX6MNz9PPqfUdd+ipwo4atft+sscAZL0G6DpKwvyuMUxztGOl
 PqyQ0KbhT1B9PO7wlonSfIgy8oCpz6j2LR17hUKvRsefapU/uQD57npmNlCrCVWs
 SFI2/vtlOxRcNMUdGjM/ih9i/foFgocxweal5uEc5s2nqdQvLp+apDccERDWIYlT
 YgZlz1HgjA5aXKFwQw3t6kUk656IdZGuadzUOk90TSlJxRirfU18u3sBMl6klwtV
 iTOyN96IcwxY6eIvQhTovfSJ9aZmCv5CdwrmJuhPxO+aaxD2MrA=
 =hxkc
 -----END PGP SIGNATURE-----

Merge tag 'nfsd-6.18-3' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux

Pull nfsd fixes from Chuck Lever:
 "Address recently reported issues or issues found at the recent NFS
  bake-a-thon held in Raleigh, NC.

  Issues reported with v6.18-rc:
   - Address a kernel build issue
   - Reorder SEQUENCE processing to avoid spurious NFS4ERR_SEQ_MISORDERED

  Issues that need expedient stable backports:
   - Close a refcount leak exposure
   - Report support for NFSv4.2 CLONE correctly
   - Fix oops during COPY_NOTIFY processing
   - Prevent rare crash after XDR encoding failure
   - Prevent crash due to confused or malicious NFSv4.1 client"

* tag 'nfsd-6.18-3' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux:
  Revert "SUNRPC: Make RPCSEC_GSS_KRB5 select CRYPTO instead of depending on it"
  nfsd: ensure SEQUENCE replay sends a valid reply.
  NFSD: Never cache a COMPOUND when the SEQUENCE operation fails
  NFSD: Skip close replay processing if XDR encoding fails
  NFSD: free copynotify stateid in nfs4_free_ol_stateid()
  nfsd: add missing FATTR4_WORD2_CLONE_BLKSIZE from supported attributes
  nfsd: fix refcount leak in nfsd_set_fh_dentry()
2025-11-12 18:41:01 -08:00
Linus Torvalds
92385a075a dma-mapping fixes for Linux 6.18
- two minor fixes for DMA API infrastructure: restoring proper structure
 padding used in benchmark tests (Qinxin Xia) and global DMA_BIT_MASK
 macro rework to make it a bit more clang friendly (James Clark)
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQSrngzkoBtlA8uaaJ+Jp1EFxbsSRAUCaRRZHwAKCRCJp1EFxbsS
 RPP5AP4ywlRd8X9A6n3BGSWenWv+TIrTVcGwnjL67Gn18QcctgEAk7UrN7QY6POK
 F0E2E1JLDYqju2X1I+57YM1hFqFghg8=
 =tMdy
 -----END PGP SIGNATURE-----

Merge tag 'dma-mapping-6.18-2025-11-12' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux

Pull dma-mapping fixes from Marek Szyprowski:

 - two minor fixes for DMA API infrastructure: restoring proper
   structure padding used in benchmark tests (Qinxin Xia) and global
   DMA_BIT_MASK macro rework to make it a bit more clang friendly (James
   Clark)

* tag 'dma-mapping-6.18-2025-11-12' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux:
  dma-mapping: Allow use of DMA_BIT_MASK(64) in global scope
  dma-mapping: benchmark: Restore padding to ensure uABI remained consistent
2025-11-12 18:31:22 -08:00
Linus Torvalds
e927c520e1 LoongArch fixes for v6.18-rc6
-----BEGIN PGP SIGNATURE-----
 
 iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmkRqAUWHGNoZW5odWFj
 YWlAa2VybmVsLm9yZwAKCRAChivD8uImevoJD/9JuCd4INAi/zA6Kk6qnJ9gEfZn
 y/Hlh3q2Rp74b4u0aerQx8gbjg1euNIRJT+1qMxoundNUir3O7cYYe1TOdkc5mwW
 snOdTVh2v3wuMHw3r2pmFPEEfm+Tn85SysBluCSEJ2y7KXzaWZwWxF8ed42LCj3D
 U14zRAlP+GBgqAg/jhLfQYZIoH6Fbq5p0bC37nz7jBEEuXvylGvt6rnQFxqFpG3p
 kZSkVLziOO5jajtda4zLbvubU3uiirAeWLmavXRXiqx6HCJsmCe4J/xTqGsgNuts
 Aqqzx0g4vB+5XVvyG9VzzO8g8lHnmRHackfL1dG+oEEk4lhMTVzJHX7dk1Yvt8Tt
 2mbKOjJAGwYixfz89E6Ch9Gg5uXjXnvfvMhXMpyTOLXIW4c9Rgwn9HVpNYzB+R/C
 vcdD8SibIcF+oJnmSyx2IR+ksM07SFwBLl7RYyHkY75I7ro20CyibqHs7SWyCPkX
 maCLZ1eYCaAx6OL+3BAGDHzA9U67wQ+BsWFQZxdbrFN2qGXCb0PJmimB1/dHeqll
 lwjUDSGOMoy9UcJIBgl/BBhs/jYpS9Vb6m69/56WE8qsOxwrzurUEI+4aB5dMfws
 /YDAgioJZFs85ifXFzLzhXBKCPx5WAcyCY9hXplVOfWtKCehb9sLjz8754JLSG6U
 KyBBPyXylyNH6NVsjA==
 =i1Nm
 -----END PGP SIGNATURE-----

Merge tag 'loongarch-fixes-6.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch fixes from Huacai Chen:

 - Fix a Rust build error

 - Fix exception/interrupt, memory management, perf event, hardware
   breakpoint, kexec and KVM bugs

* tag 'loongarch-fixes-6.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
  LoongArch: KVM: Fix max supported vCPUs set with EIOINTC
  LoongArch: KVM: Skip PMU checking on vCPU context switch
  LoongArch: KVM: Restore guest PMU if it is enabled
  LoongArch: KVM: Add delay until timer interrupt injected
  LoongArch: KVM: Set page with write attribute if dirty track disabled
  LoongArch: kexec: Print out debugging message if required
  LoongArch: kexec: Initialize the kexec_buf structure
  LoongArch: Use correct accessor to read FWPC/MWPC
  LoongArch: Refine the init_hw_perf_events() function
  LoongArch: Remove __GFP_HIGHMEM masking in pud_alloc_one()
  LoongArch: Let {pte,pmd}_modify() record the status of _PAGE_DIRTY
  LoongArch: Consolidate max_pfn & max_low_pfn calculation
  LoongArch: Consolidate early_ioremap()/ioremap_prot()
  LoongArch: Use physical addresses for CSR_MERRENTRY/CSR_TLBRENTRY
  LoongArch: Clarify 3 MSG interrupt features
  rust: Add -fno-isolate-erroneous-paths-dereference to bindgen_skip_c_flags
2025-11-12 18:21:30 -08:00
Linus Torvalds
89ee862a4d alpha: Fixes for v6.18
Add Magnus as a maintainer of the alpha port.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iNUEABYKAH0WIQReryEEmoa4pUzLG/qs6yl0DJpOlwUCaRPq318UgAAAAAAuAChp
 c3N1ZXItZnByQG5vdGF0aW9ucy5vcGVucGdwLmZpZnRoaG9yc2VtYW4ubmV0NUVB
 RjIxMDQ5QTg2QjhBNTRDQ0IxQkZBQUNFQjI5NzQwQzlBNEU5NwAKCRCs6yl0DJpO
 l2ixAQDwl+7FbCK6YuE7rwV1zFzH5vFPHUr/9L07aJydg1L8BwD/VjJxRKIQYcid
 AP8Llk4OvzfDj16TD+dlhHkRBgaJUAA=
 =KfTN
 -----END PGP SIGNATURE-----

Merge tag 'alpha-fixes-v6.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha

Pull alpha fix from Matt Turner:
 "Add Magnus as a maintainer of the alpha port"

* tag 'alpha-fixes-v6.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha:
  MAINTAINERS: Add Magnus Lindholm as maintainer for alpha port
2025-11-12 18:18:12 -08:00
Magnus Lindholm
d58041d2c6 MAINTAINERS: Add Magnus Lindholm as maintainer for alpha port
Acked-by: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Acked-by: Matt Turner <mattst88@gmail.com>
Signed-off-by: Magnus Lindholm <linmag7@gmail.com>
Signed-off-by: Matt Turner <mattst88@gmail.com>
2025-11-11 20:52:04 -05:00
Chuck Lever
324be6dcbf Revert "SUNRPC: Make RPCSEC_GSS_KRB5 select CRYPTO instead of depending on it"
Geert reports:
> This is now commit d8e97cc476e33037 ("SUNRPC: Make RPCSEC_GSS_KRB5
> select CRYPTO instead of depending on it") in v6.18-rc1.
> As RPCSEC_GSS_KRB5 defaults to "y", CRYPTO is now auto-enabled in
> defconfigs that didn't enable it before.

Revert while we work out a proper solution and then test it.

Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Closes: https://lore.kernel.org/linux-nfs/b97cea29-4ab7-4fb6-85ba-83f9830e524f@kernel.org/T/#t
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
2025-11-10 09:31:52 -05:00
NeilBrown
1cff14b7fc nfsd: ensure SEQUENCE replay sends a valid reply.
nfsd4_enc_sequence_replay() uses nfsd4_encode_operation() to encode a
new SEQUENCE reply when replaying a request from the slot cache - only
ops after the SEQUENCE are replayed from the cache in ->sl_data.

However it does this in nfsd4_replay_cache_entry() which is called
*before* nfsd4_sequence() has filled in reply fields.

This means that in the replayed SEQUENCE reply:
 maxslots will be whatever the client sent
 target_maxslots will be -1 (assuming init to zero, and
      nfsd4_encode_sequence() subtracts 1)
 status_flags will be zero

The incorrect maxslots value, in particular, can cause the client to
think the slot table has been reduced in size so it can discard its
knowledge of current sequence number of the later slots, though the
server has not discarded those slots.  When the client later wants to
use a later slot, it can get NFS4ERR_SEQ_MISORDERED from the server.

This patch moves the setup of the reply into a new helper function and
call it *before* nfsd4_replay_cache_entry() is called.  Only one of the
updated fields was used after this point - maxslots.  So the
nfsd4_sequence struct has been extended to have separate maxslots for
the request and the response.

Reported-by: Olga Kornievskaia <okorniev@redhat.com>
Closes: https://lore.kernel.org/linux-nfs/20251010194449.10281-1-okorniev@redhat.com/
Tested-by: Olga Kornievskaia <okorniev@redhat.com>
Signed-off-by: NeilBrown <neil@brown.name>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
2025-11-10 09:31:52 -05:00
Chuck Lever
c96573c0d7 NFSD: Never cache a COMPOUND when the SEQUENCE operation fails
RFC 8881 normatively mandates that operations where the initial
SEQUENCE operation in a compound fails must not modify the slot's
replay cache.

nfsd4_cache_this() doesn't prevent such caching. So when SEQUENCE
fails, cstate.data_offset is not set, allowing
read_bytes_from_xdr_buf() to access uninitialized memory.

Reported-by: rtm@csail.mit.edu
Closes: https://lore.kernel.org/linux-nfs/c3628d57-94ae-48cf-8c9e-49087a28cec9@oracle.com/T/#t
Fixes: 468de9e54a90 ("nfsd41: expand solo sequence check")
Reviewed-by: NeilBrown <neil@brown.name>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
2025-11-10 09:31:52 -05:00
Chuck Lever
ff8141e49c NFSD: Skip close replay processing if XDR encoding fails
The replay logic added by commit 9411b1d4c7df ("nfsd4: cleanup
handling of nfsv4.0 closed stateid's") cannot be done if encoding
failed due to a short send buffer; there's no guarantee that the
operation encoder has actually encoded the data that is being copied
to the replay cache.

Reported-by: rtm@csail.mit.edu
Closes: https://lore.kernel.org/linux-nfs/c3628d57-94ae-48cf-8c9e-49087a28cec9@oracle.com/T/#t
Fixes: 9411b1d4c7df ("nfsd4: cleanup handling of nfsv4.0 closed stateid's")
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: NeilBrown <neil@brown.name>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
2025-11-10 09:31:52 -05:00
Olga Kornievskaia
4aa17144d5 NFSD: free copynotify stateid in nfs4_free_ol_stateid()
Typically copynotify stateid is freed either when parent's stateid
is being close/freed or in nfsd4_laundromat if the stateid hasn't
been used in a lease period.

However, in case when the server got an OPEN (which created
a parent stateid), followed by a COPY_NOTIFY using that stateid,
followed by a client reboot. New client instance while doing
CREATE_SESSION would force expire previous state of this client.
It leads to the open state being freed thru release_openowner->
nfs4_free_ol_stateid() and it finds that it still has copynotify
stateid associated with it. We currently print a warning and is
triggerred

WARNING: CPU: 1 PID: 8858 at fs/nfsd/nfs4state.c:1550 nfs4_free_ol_stateid+0xb0/0x100 [nfsd]

This patch, instead, frees the associated copynotify stateid here.

If the parent stateid is freed (without freeing the copynotify
stateids associated with it), it leads to the list corruption
when laundromat ends up freeing the copynotify state later.

[ 1626.839430] Internal error: Oops - BUG: 00000000f2000800 [#1]  SMP
[ 1626.842828] Modules linked in: nfnetlink_queue nfnetlink_log bluetooth cfg80211 rpcrdma rdma_cm iw_cm ib_cm ib_core nfsd nfs_acl lockd grace nfs_localio ext4 crc16 mbcache jbd2 overlay uinput snd_seq_dummy snd_hrtimer qrtr rfkill vfat fat uvcvideo snd_hda_codec_generic videobuf2_vmalloc videobuf2_memops snd_hda_intel uvc snd_intel_dspcfg videobuf2_v4l2 videobuf2_common snd_hda_codec snd_hda_core videodev snd_hwdep snd_seq mc snd_seq_device snd_pcm snd_timer snd soundcore sg loop auth_rpcgss vsock_loopback vmw_vsock_virtio_transport_common vmw_vsock_vmci_transport vmw_vmci vsock xfs 8021q garp stp llc mrp nvme ghash_ce e1000e nvme_core sr_mod nvme_keyring nvme_auth cdrom vmwgfx drm_ttm_helper ttm sunrpc dm_mirror dm_region_hash dm_log iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi fuse dm_multipath dm_mod nfnetlink
[ 1626.855594] CPU: 2 UID: 0 PID: 199 Comm: kworker/u24:33 Kdump: loaded Tainted: G    B   W           6.17.0-rc7+ #22 PREEMPT(voluntary)
[ 1626.857075] Tainted: [B]=BAD_PAGE, [W]=WARN
[ 1626.857573] Hardware name: VMware, Inc. VMware20,1/VBSA, BIOS VMW201.00V.24006586.BA64.2406042154 06/04/2024
[ 1626.858724] Workqueue: nfsd4 laundromat_main [nfsd]
[ 1626.859304] pstate: 61400005 (nZCv daif +PAN -UAO -TCO +DIT -SSBS BTYPE=--)
[ 1626.860010] pc : __list_del_entry_valid_or_report+0x148/0x200
[ 1626.860601] lr : __list_del_entry_valid_or_report+0x148/0x200
[ 1626.861182] sp : ffff8000881d7a40
[ 1626.861521] x29: ffff8000881d7a40 x28: 0000000000000018 x27: ffff0000c2a98200
[ 1626.862260] x26: 0000000000000600 x25: 0000000000000000 x24: ffff8000881d7b20
[ 1626.862986] x23: ffff0000c2a981e8 x22: 1fffe00012410e7d x21: ffff0000920873e8
[ 1626.863701] x20: ffff0000920873e8 x19: ffff000086f22998 x18: 0000000000000000
[ 1626.864421] x17: 20747562202c3839 x16: 3932326636383030 x15: 3030666666662065
[ 1626.865092] x14: 6220646c756f6873 x13: 0000000000000001 x12: ffff60004fd9e4a3
[ 1626.865713] x11: 1fffe0004fd9e4a2 x10: ffff60004fd9e4a2 x9 : dfff800000000000
[ 1626.866320] x8 : 00009fffb0261b5e x7 : ffff00027ecf2513 x6 : 0000000000000001
[ 1626.866938] x5 : ffff00027ecf2510 x4 : ffff60004fd9e4a3 x3 : 0000000000000000
[ 1626.867553] x2 : 0000000000000000 x1 : ffff000096069640 x0 : 000000000000006d
[ 1626.868167] Call trace:
[ 1626.868382]  __list_del_entry_valid_or_report+0x148/0x200 (P)
[ 1626.868876]  _free_cpntf_state_locked+0xd0/0x268 [nfsd]
[ 1626.869368]  nfs4_laundromat+0x6f8/0x1058 [nfsd]
[ 1626.869813]  laundromat_main+0x24/0x60 [nfsd]
[ 1626.870231]  process_one_work+0x584/0x1050
[ 1626.870595]  worker_thread+0x4c4/0xc60
[ 1626.870893]  kthread+0x2f8/0x398
[ 1626.871146]  ret_from_fork+0x10/0x20
[ 1626.871422] Code: aa1303e1 aa1403e3 910e8000 97bc55d7 (d4210000)
[ 1626.871892] SMP: stopping secondary CPUs

Reported-by: rtm@csail.mit.edu
Closes: https://lore.kernel.org/linux-nfs/d8f064c1-a26f-4eed-b4f0-1f7f608f415f@oracle.com/T/#t
Fixes: 624322f1adc5 ("NFSD add COPY_NOTIFY operation")
Cc: stable@vger.kernel.org
Signed-off-by: Olga Kornievskaia <okorniev@redhat.com>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
2025-11-10 09:31:52 -05:00
Bibo Mao
237e74bfa2 LoongArch: KVM: Fix max supported vCPUs set with EIOINTC
VM fails to boot with 256 vCPUs, the detailed command is

  qemu-system-loongarch64 -smp 256

and there is an error reported as follows:

  KVM_LOONGARCH_EXTIOI_INIT_NUM_CPU failed: Invalid argument

There is typo issue in function kvm_eiointc_ctrl_access() when set
max supported vCPUs.

Cc: stable@vger.kernel.org
Fixes: 47256c4c8b1b ("LoongArch: KVM: Avoid copy_*_user() with lock hold in kvm_eiointc_ctrl_access()")
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:07 +08:00
Bibo Mao
11f340ece4 LoongArch: KVM: Skip PMU checking on vCPU context switch
PMU hardware about VM is switched on VM exit to host rather than vCPU
context sched off, PMU is checked and restored on return to VM. It is
not necessary to check PMU on vCPU context sched on callback, since the
request is made on the VM exit entry or VM PMU CSR access abort routine
already.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:07 +08:00
Bibo Mao
5001bcf86e LoongArch: KVM: Restore guest PMU if it is enabled
On LoongArch system, guest PMU hardware is shared by guest and host but
PMU interrupt is separated. PMU is pass-through to VM, and there is PMU
context switch when exit to host and return to guest.

There is optimiation to check whether PMU is enabled by guest. If not,
it is not necessary to return to guest. However, if it is enabled, PMU
context for guest need switch on. Now KVM_REQ_PMU notification is set
on vCPU context switch, but it is missing if there is no vCPU context
switch while PMU is used by guest VM, so fix it.

Cc: <stable@vger.kernel.org>
Fixes: f4e40ea9f78f ("LoongArch: KVM: Add PMU support for guest")
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:07 +08:00
Bibo Mao
d3c9515e4f LoongArch: KVM: Add delay until timer interrupt injected
When timer is fired in oneshot mode, CSR.TVAL will stop with value -1
rather than 0. However when the register CSR.TVAL is restored, it will
continue to count down rather than stop there.

Now the method is to write 0 to CSR.TVAL, wait to count down for 1 cycle
at least, which is 10ns with a timer freq 100MHz, and then retore timer
interrupt status. Here add 2 cycles delay to assure that timer interrupt
is injected.

With this patch, timer selftest case passes to run always.

Cc: stable@vger.kernel.org
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:07 +08:00
Bibo Mao
37e9d1a913 LoongArch: KVM: Set page with write attribute if dirty track disabled
With secondary MMU page table, if there is a read page fault, the page's
write attribute will not set even if it is writable from master MMU page
table. This logic only works if dirty tracking is enabled, so page table
should be set with _PAGE_WRITE if dirty tracking is disabled.

It reduces extra page fault on secondary MMU page table if a VM finishes
migration, when the master MMU page table is ready and the secondary MMU
page is fresh.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:07 +08:00
Qiang Ma
62cda5e54f LoongArch: kexec: Print out debugging message if required
When specifying '-d' for kexec_file_load interface, loaded locations of
kernel/initrd/cmdline etc can be printed out to help debug.

Commit eb7622d908a0 ("kexec_file, riscv: print out debugging message if
required") fixes the same issue on RISC-V.

So, remove kexec_image_info() because the content has been printed out
in generic code.

And on Loongson-3A5000, the printed messages look like below:

 kexec_file: kernel: 00000000d9aad283 kernel_size: 0x2e77f30
 kexec_file(EFI): No LoongArch PE image header.
 kexec_file: Loaded initrd at 0x80000000 bufsz=0x1637cd0 memsz=0x1638000
 kexec_file(ELF): Loaded kernel at 0x9c20000 bufsz=0x27f1800 memsz=0x2950000
 kexec_file: nr_segments = 2
 kexec_file: segment[0]: buf=0x00000000cc3e6c33 bufsz=0x27f1800 mem=0x9c20000 memsz=0x2950000
 kexec_file: segment[1]: buf=0x00000000bb75a541 bufsz=0x1637cd0 mem=0x80000000 memsz=0x1638000
 kexec_file: kexec_file_load: type:0, start:0xb15d000 head:0x18db60002 flags:0x8

Signed-off-by: Qiang Ma <maqianga@uniontech.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:07 +08:00
Youling Tang
df16b8956c LoongArch: kexec: Initialize the kexec_buf structure
The kexec_buf structure was previously declared without initialization.
commit bf454ec31add ("kexec_file: allow to place kexec_buf randomly")
added a field that is always read but not consistently populated by all
architectures. This un-initialized field will contain garbage.

This is also triggering a UBSAN warning when the uninitialized data is
accessed:

        ------------[ cut here ]------------
        UBSAN: invalid-load in ./include/linux/kexec.h:210:10
        load of value 252 is not a valid value for type '_Bool'

Zero-initializing kexec_buf at declaration ensures all fields are
cleanly set, preventing future instances of uninitialized memory being
used.

Fixes: bf454ec31add ("kexec_file: allow to place kexec_buf randomly")
Link: https://lore.kernel.org/r/20250827-kbuf_all-v1-2-1df9882bb01a@debian.org
Signed-off-by: Youling Tang <tangyouling@kylinos.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:07 +08:00
Huacai Chen
eeeeaafa62 LoongArch: Use correct accessor to read FWPC/MWPC
CSR.FWPC and CSR.MWPC are 32bit registers, so use csr_read32() rather
than csr_read64() to read the values of FWPC/MWPC.

Cc: stable@vger.kernel.org
Fixes: edffa33c7bb5a73 ("LoongArch: Add hardware breakpoints/watchpoints support")
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:06 +08:00
Tiezhu Yang
4c8a7c9827 LoongArch: Refine the init_hw_perf_events() function
(1) Use the existing CPUCFG6_PMNUM_SHIFT macro definition instead of
the magic value 4 to get the PMU number.

(2) Detect the value of PMU bits via CPUCFG instruction according to
the ISA manual instead of hard-coded as 64, because the value may be
different for various micro-architectures.

Link: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg
Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:06 +08:00
Vishal Moola (Oracle)
17f838512a LoongArch: Remove __GFP_HIGHMEM masking in pud_alloc_one()
Remove the unnecessary __GFP_HIGHMEM masking in pud_alloc_one(), which
was introduced with commit 382739797f79ec2 ("loongarch: convert various
functions to use ptdescs"). GFP_KERNEL doesn't contain __GFP_HIGHMEM.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:06 +08:00
Tianyang Zhang
a073d637c8 LoongArch: Let {pte,pmd}_modify() record the status of _PAGE_DIRTY
Now if the PTE/PMD is dirty with _PAGE_DIRTY but without _PAGE_MODIFIED,
after {pte,pmd}_modify() we lose _PAGE_DIRTY, then {pte,pmd}_dirty()
return false and lead to data loss. This can happen in certain scenarios
such as HW PTW doesn't set _PAGE_MODIFIED automatically, so here we need
_PAGE_MODIFIED to record the dirty status (_PAGE_DIRTY).

The new modification involves checking whether the original PTE/PMD has
the _PAGE_DIRTY flag. If it exists, the _PAGE_MODIFIED bit is also set,
ensuring that the {pte,pmd}_dirty() interface can always return accurate
information.

Cc: stable@vger.kernel.org
Co-developed-by: Liupu Wang <wangliupu@loongson.cn>
Signed-off-by: Liupu Wang <wangliupu@loongson.cn>
Signed-off-by: Tianyang Zhang <zhangtianyang@loongson.cn>
2025-11-10 08:37:06 +08:00
Huacai Chen
ce5ad03e45 LoongArch: Consolidate max_pfn & max_low_pfn calculation
Now there 5 places which calculate max_pfn & max_low_pfn:
1. in fdt_setup() for FDT systems;
2. in memblock_init() for ACPI systems;
3. in init_numa_memory() for NUMA systems;
4. in arch_mem_init() to recalculate for "mem=" cmdline;
5. in paging_init() to recalculate for NUMA systems.

Since memblock_init() is called both for ACPI and FDT systems, move the
calculation out of the for_each_efi_memory_desc() loop can eliminate the
first case. The last case is very questionable (may be derived from the
MIPS/Loongson code) and breaks the "mem=" cmdline, so should be removed.
And then the NUMA version of paging_init() can be also eliminated.

After consolidation there are 3 places of calculation:
1. in memblock_init() for both ACPI and FDT systems;
2. in init_numa_memory() to recalculate for NUMA systems;
3. in arch_mem_init() to recalculate for the "mem=" cmdline.

For all cases the calculation is:
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = min(PFN_DOWN(HIGHMEM_START), max_pfn);

Cc: stable@vger.kernel.org
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:06 +08:00
Huacai Chen
43a9e6a10b LoongArch: Consolidate early_ioremap()/ioremap_prot()
1. Use phys_addr_t instead of u64, which can work for both 32/64 bits.
2. Check whether the input physical address is above TO_PHYS_MASK (and
   return NULL if yes) for the DMW version.

Note: In theory early_ioremap() also need the TO_PHYS_MASK checking, but
the UEFI BIOS pass some DMW virtual addresses.

Cc: stable@vger.kernel.org
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:06 +08:00
Huacai Chen
4e67526840 LoongArch: Use physical addresses for CSR_MERRENTRY/CSR_TLBRENTRY
Now we use virtual addresses to fill CSR_MERRENTRY/CSR_TLBRENTRY, but
hardware hope physical addresses. Now it works well because the high
bits are ignored above PA_BITS (48 bits), but explicitly use physical
addresses can avoid potential bugs. So fix it.

Cc: stable@vger.kernel.org
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:06 +08:00
Huacai Chen
f28abb9f96 LoongArch: Clarify 3 MSG interrupt features
LoongArch's MSG interrupt features are used across multiple subsystems.
Clarify these features to avoid misuse, existing users will be adjusted
if necessary.

MSGINT: Infrastructure, means the CPU core supports message interupts.
Indicated by CPUCFG1.MSGINT.

AVECINT: AVEC interrupt controller based on MSGINT, means the CPU chip
supports direct message interrupts. Indicated by IOCSR.FEATURES.DMSI.

REDIRECTINT: REDIRECT interrupt controller based on MSGINT and AVECINT,
means the CPU chip supports redirect message interrupts. Indicated by
IOCSR.FEATURES.RMSI.

For example:
Loongson-3A5000/3C5000 doesn't support MSGINT/AVECINT/REDIRECTINT;
Loongson-3A6000 supports MSGINT but doesn't support AVECINT/REDIRECTINT;
Loongson-3C6000 supports MSGINT/AVECINT/REDIRECTINT.

Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:06 +08:00
Xi Ruoyao
fe4b3a34e9 rust: Add -fno-isolate-erroneous-paths-dereference to bindgen_skip_c_flags
It's used to work around an objtool issue since commit abb2a5572264
("LoongArch: Add cflag -fno-isolate-erroneous-paths-dereference"), but
it's then passed to bindgen and cause an error because Clang does not
have this option.

Fixes: abb2a5572264 ("LoongArch: Add cflag -fno-isolate-erroneous-paths-dereference")
Acked-by: Miguel Ojeda <ojeda@kernel.org>
Tested-by: Mingcong Bai <jeffbai@aosc.io>
Signed-off-by: Xi Ruoyao <xry111@xry111.site>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-11-10 08:37:06 +08:00
James Clark
a50f7456f8 dma-mapping: Allow use of DMA_BIT_MASK(64) in global scope
Clang doesn't like that (1ULL<<(64)) overflows when initializing a
global scope variable, even if that part of the ternary isn't used when
n = 64. The same initialization can be done without warnings in function
scopes, and GCC doesn't mind either way.

The build failure that highlighted this was already fixed in a different
way [1], which also has detailed links to the Clang issues. However it's
not going to be long before the same thing happens again, so it's better
to fix the root cause.

Fix it by using GENMASK_ULL() which does exactly the same thing, is much
more readable anyway, and doesn't have a shift that overflows.

[1]: https://lore.kernel.org/all/20250918-mmp-pdma-simplify-dma-addressing-v1-1-5c2be2b85696@riscstar.com/

Signed-off-by: James Clark <james.clark@linaro.org>
Reviewed-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20251030-james-fix-dma_bit_mask-v1-1-ad1ce7cfab6e@linaro.org
2025-11-05 13:43:41 +01:00
Olga Kornievskaia
4d3dbc2386 nfsd: add missing FATTR4_WORD2_CLONE_BLKSIZE from supported attributes
RFC 7862 Section 4.1.2 says that if the server supports CLONE it MUST
support clone_blksize attribute.

Fixes: d6ca7d2643ee ("NFSD: Implement FATTR4_CLONE_BLKSIZE attribute")
Cc: stable@vger.kernel.org
Signed-off-by: Olga Kornievskaia <okorniev@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
2025-11-04 11:02:31 -05:00
NeilBrown
8a7348a9ed nfsd: fix refcount leak in nfsd_set_fh_dentry()
nfsd exports a "pseudo root filesystem" which is used by NFSv4 to find
the various exported filesystems using LOOKUP requests from a known root
filehandle.  NFSv3 uses the MOUNT protocol to find those exported
filesystems and so is not given access to the pseudo root filesystem.

If a v3 (or v2) client uses a filehandle from that filesystem,
nfsd_set_fh_dentry() will report an error, but still stores the export
in "struct svc_fh" even though it also drops the reference (exp_put()).
This means that when fh_put() is called an extra reference will be dropped
which can lead to use-after-free and possible denial of service.

Normal NFS usage will not provide a pseudo-root filehandle to a v3
client.  This bug can only be triggered by the client synthesising an
incorrect filehandle.

To fix this we move the assignments to the svc_fh later, after all
possible error cases have been detected.

Reported-and-tested-by: tianshuo han <hantianshuo233@gmail.com>
Fixes: ef7f6c4904d0 ("nfsd: move V4ROOT version check to nfsd_set_fh_dentry()")
Signed-off-by: NeilBrown <neil@brown.name>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Cc: stable@vger.kernel.org
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
2025-11-04 11:02:31 -05:00
Qinxin Xia
23ee8a2563 dma-mapping: benchmark: Restore padding to ensure uABI remained consistent
The padding field in the structure was previously reserved to
maintain a stable interface for potential new fields, ensuring
compatibility with user-space shared data structures.
However,it was accidentally removed by tiantao in a prior commit,
which may lead to incompatibility between user space and the kernel.

This patch reinstates the padding to restore the original structure
layout and preserve compatibility.

Fixes: 8ddde07a3d28 ("dma-mapping: benchmark: extract a common header file for map_benchmark definition")
Cc: stable@vger.kernel.org
Acked-by: Barry Song <baohua@kernel.org>
Signed-off-by: Qinxin Xia <xiaqinxin@huawei.com>
Reported-by: Barry Song <baohua@kernel.org>
Closes: https://lore.kernel.org/lkml/CAGsJ_4waiZ2+NBJG+SCnbNk+nQ_ZF13_Q5FHJqZyxyJTcEop2A@mail.gmail.com/
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20251028120900.2265511-2-xiaqinxin@huawei.com
2025-10-29 09:41:40 +01:00
33 changed files with 120 additions and 111 deletions

View File

@ -915,6 +915,7 @@ F: drivers/staging/media/sunxi/cedrus/
ALPHA PORT
M: Richard Henderson <richard.henderson@linaro.org>
M: Matt Turner <mattst88@gmail.com>
M: Magnus Lindholm <linmag7@gmail.com>
L: linux-alpha@vger.kernel.org
S: Odd Fixes
F: arch/alpha/

View File

@ -67,6 +67,8 @@
#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
#define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW)
#define cpu_has_lspw cpu_opt(LOONGARCH_CPU_LSPW)
#define cpu_has_msgint cpu_opt(LOONGARCH_CPU_MSGINT)
#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT)
#define cpu_has_redirectint cpu_opt(LOONGARCH_CPU_REDIRECTINT)
#endif /* __ASM_CPU_FEATURES_H */

View File

@ -101,7 +101,9 @@ enum cpu_type_enum {
#define CPU_FEATURE_HYPERVISOR 26 /* CPU has hypervisor (running in VM) */
#define CPU_FEATURE_PTW 27 /* CPU has hardware page table walker */
#define CPU_FEATURE_LSPW 28 /* CPU has LSPW (lddir/ldpte instructions) */
#define CPU_FEATURE_AVECINT 29 /* CPU has AVEC interrupt */
#define CPU_FEATURE_MSGINT 29 /* CPU has MSG interrupt */
#define CPU_FEATURE_AVECINT 30 /* CPU has AVEC interrupt */
#define CPU_FEATURE_REDIRECTINT 31 /* CPU has interrupt remapping */
#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
@ -132,6 +134,8 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
#define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW)
#define LOONGARCH_CPU_LSPW BIT_ULL(CPU_FEATURE_LSPW)
#define LOONGARCH_CPU_MSGINT BIT_ULL(CPU_FEATURE_MSGINT)
#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT)
#define LOONGARCH_CPU_REDIRECTINT BIT_ULL(CPU_FEATURE_REDIRECTINT)
#endif /* _ASM_CPU_H */

View File

@ -134,13 +134,13 @@ static inline void hw_breakpoint_thread_switch(struct task_struct *next)
/* Determine number of BRP registers available. */
static inline int get_num_brps(void)
{
return csr_read64(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM;
return csr_read32(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM;
}
/* Determine number of WRP registers available. */
static inline int get_num_wrps(void)
{
return csr_read64(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM;
return csr_read32(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM;
}
#endif /* __KERNEL__ */

View File

@ -14,7 +14,7 @@
#include <asm/pgtable-bits.h>
#include <asm/string.h>
extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
extern void __init __iomem *early_ioremap(phys_addr_t phys_addr, unsigned long size);
extern void __init early_iounmap(void __iomem *addr, unsigned long size);
#define early_memremap early_ioremap
@ -25,6 +25,9 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size);
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
pgprot_t prot)
{
if (offset > TO_PHYS_MASK)
return NULL;
switch (pgprot_val(prot) & _CACHE_MASK) {
case _CACHE_CC:
return (void __iomem *)(unsigned long)(CACHE_BASE + offset);

View File

@ -128,6 +128,7 @@
#define CPUCFG6_PMNUM GENMASK(7, 4)
#define CPUCFG6_PMNUM_SHIFT 4
#define CPUCFG6_PMBITS GENMASK(13, 8)
#define CPUCFG6_PMBITS_SHIFT 8
#define CPUCFG6_UPM BIT(14)
#define LOONGARCH_CPUCFG16 0x10
@ -1137,6 +1138,7 @@
#define IOCSRF_FLATMODE BIT_ULL(10)
#define IOCSRF_VM BIT_ULL(11)
#define IOCSRF_AVEC BIT_ULL(15)
#define IOCSRF_REDIRECT BIT_ULL(16)
#define LOONGARCH_IOCSR_VENDOR 0x10

View File

@ -88,7 +88,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
pud_t *pud;
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, 0);
if (!ptdesc)
return NULL;

View File

@ -424,6 +424,9 @@ static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
if (pte_val(pte) & _PAGE_DIRTY)
pte_val(pte) |= _PAGE_MODIFIED;
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_PAGE_CHG_MASK));
}
@ -547,9 +550,11 @@ static inline struct page *pmd_page(pmd_t pmd)
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
return pmd;
if (pmd_val(pmd) & _PAGE_DIRTY)
pmd_val(pmd) |= _PAGE_MODIFIED;
return __pmd((pmd_val(pmd) & _HPAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_HPAGE_CHG_MASK));
}
static inline pmd_t pmd_mkinvalid(pmd_t pmd)

View File

@ -157,6 +157,8 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_TLB;
if (config & CPUCFG1_IOCSR)
c->options |= LOONGARCH_CPU_IOCSR;
if (config & CPUCFG1_MSGINT)
c->options |= LOONGARCH_CPU_MSGINT;
if (config & CPUCFG1_UAL) {
c->options |= LOONGARCH_CPU_UAL;
elf_hwcap |= HWCAP_LOONGARCH_UAL;
@ -331,6 +333,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
c->options |= LOONGARCH_CPU_EIODECODE;
if (config & IOCSRF_AVEC)
c->options |= LOONGARCH_CPU_AVECINT;
if (config & IOCSRF_REDIRECT)
c->options |= LOONGARCH_CPU_REDIRECTINT;
if (config & IOCSRF_VM)
c->options |= LOONGARCH_CPU_HYPERVISOR;
}

View File

@ -42,7 +42,7 @@ static void *efi_kexec_load(struct kimage *image,
{
int ret;
unsigned long text_offset, kernel_segment_number;
struct kexec_buf kbuf;
struct kexec_buf kbuf = {};
struct kexec_segment *kernel_segment;
struct loongarch_image_header *h;

View File

@ -59,7 +59,7 @@ static void *elf_kexec_load(struct kimage *image,
int ret;
unsigned long text_offset, kernel_segment_number;
struct elfhdr ehdr;
struct kexec_buf kbuf;
struct kexec_buf kbuf = {};
struct kexec_elf_info elf_info;
struct kexec_segment *kernel_segment;

View File

@ -39,34 +39,12 @@ static unsigned long systable_ptr;
static unsigned long start_addr;
static unsigned long first_ind_entry;
static void kexec_image_info(const struct kimage *kimage)
{
unsigned long i;
pr_debug("kexec kimage info:\n");
pr_debug("\ttype: %d\n", kimage->type);
pr_debug("\tstart: %lx\n", kimage->start);
pr_debug("\thead: %lx\n", kimage->head);
pr_debug("\tnr_segments: %lu\n", kimage->nr_segments);
for (i = 0; i < kimage->nr_segments; i++) {
pr_debug("\t segment[%lu]: %016lx - %016lx", i,
kimage->segment[i].mem,
kimage->segment[i].mem + kimage->segment[i].memsz);
pr_debug("\t\t0x%lx bytes, %lu pages\n",
(unsigned long)kimage->segment[i].memsz,
(unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
}
}
int machine_kexec_prepare(struct kimage *kimage)
{
int i;
char *bootloader = "kexec";
void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
kexec_image_info(kimage);
kimage->arch.efi_boot = fw_arg0;
kimage->arch.systable_ptr = fw_arg2;

View File

@ -143,7 +143,7 @@ int load_other_segments(struct kimage *image,
unsigned long initrd_load_addr = 0;
unsigned long orig_segments = image->nr_segments;
char *modified_cmdline = NULL;
struct kexec_buf kbuf;
struct kexec_buf kbuf = {};
kbuf.image = image;
/* Don't allocate anything below the kernel */

View File

@ -13,7 +13,7 @@
void __init memblock_init(void)
{
u32 mem_type;
u64 mem_start, mem_end, mem_size;
u64 mem_start, mem_size;
efi_memory_desc_t *md;
/* Parse memory information */
@ -21,7 +21,6 @@ void __init memblock_init(void)
mem_type = md->type;
mem_start = md->phys_addr;
mem_size = md->num_pages << EFI_PAGE_SHIFT;
mem_end = mem_start + mem_size;
switch (mem_type) {
case EFI_LOADER_CODE:
@ -31,8 +30,6 @@ void __init memblock_init(void)
case EFI_PERSISTENT_MEMORY:
case EFI_CONVENTIONAL_MEMORY:
memblock_add(mem_start, mem_size);
if (max_low_pfn < (mem_end >> PAGE_SHIFT))
max_low_pfn = mem_end >> PAGE_SHIFT;
break;
case EFI_PAL_CODE:
case EFI_UNUSABLE_MEMORY:
@ -49,6 +46,8 @@ void __init memblock_init(void)
}
}
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = min(PFN_DOWN(HIGHMEM_START), max_pfn);
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
/* Reserve the first 2MB */

View File

@ -272,7 +272,8 @@ int __init init_numa_memory(void)
node_mem_init(node);
node_set_online(node);
}
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = min(PFN_DOWN(HIGHMEM_START), max_pfn);
setup_nr_node_ids();
loongson_sysconf.nr_nodes = nr_node_ids;
@ -283,26 +284,6 @@ int __init init_numa_memory(void)
#endif
void __init paging_init(void)
{
unsigned int node;
unsigned long zones_size[MAX_NR_ZONES] = {0, };
for_each_online_node(node) {
unsigned long start_pfn, end_pfn;
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
if (end_pfn > max_low_pfn)
max_low_pfn = end_pfn;
}
#ifdef CONFIG_ZONE_DMA32
zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
zones_size[ZONE_NORMAL] = max_low_pfn;
free_area_init(zones_size);
}
int pcibus_to_node(struct pci_bus *bus)
{
return dev_to_node(&bus->dev);

View File

@ -845,13 +845,14 @@ static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config
static int __init init_hw_perf_events(void)
{
int counters;
int bits, counters;
if (!cpu_has_pmp)
return -ENODEV;
pr_info("Performance counters: ");
counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> 4) + 1;
bits = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMBITS) >> CPUCFG6_PMBITS_SHIFT) + 1;
counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT) + 1;
loongarch_pmu.num_counters = counters;
loongarch_pmu.max_period = (1ULL << 63) - 1;
@ -867,7 +868,7 @@ static int __init init_hw_perf_events(void)
on_each_cpu(reset_counters, NULL, 1);
pr_cont("%s PMU enabled, %d %d-bit counters available to each CPU.\n",
loongarch_pmu.name, counters, 64);
loongarch_pmu.name, counters, bits);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);

View File

@ -294,8 +294,6 @@ static void __init fdt_setup(void)
early_init_dt_scan(fdt_pointer, __pa(fdt_pointer));
early_init_fdt_reserve_self();
max_low_pfn = PFN_PHYS(memblock_end_of_DRAM());
#endif
}
@ -390,7 +388,8 @@ static void __init check_kernel_sections_mem(void)
static void __init arch_mem_init(char **cmdline_p)
{
/* Recalculate max_low_pfn for "mem=xxx" */
max_pfn = max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = min(PFN_DOWN(HIGHMEM_START), max_pfn);
if (usermem)
pr_info("User-defined physical RAM map overwrite\n");

View File

@ -1131,8 +1131,8 @@ static void configure_exception_vector(void)
tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
csr_write64(__pa(eentry), LOONGARCH_CSR_MERRENTRY);
csr_write64(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY);
}
void per_cpu_trap_init(int cpu)

View File

@ -439,7 +439,7 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
spin_lock_irqsave(&s->lock, flags);
switch (type) {
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
if (val >= EIOINTC_ROUTE_MAX_VCPUS)
if (val > EIOINTC_ROUTE_MAX_VCPUS)
ret = -EINVAL;
else
s->num_cpu = val;

View File

@ -857,7 +857,7 @@ retry:
if (writeable) {
prot_bits = kvm_pte_mkwriteable(prot_bits);
if (write)
if (write || !kvm_slot_dirty_track_enabled(memslot))
prot_bits = kvm_pte_mkdirty(prot_bits);
}

View File

@ -4,6 +4,7 @@
*/
#include <linux/kvm_host.h>
#include <asm/delay.h>
#include <asm/kvm_csr.h>
#include <asm/kvm_vcpu.h>
@ -95,6 +96,7 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
* and set CSR TVAL with -1
*/
write_gcsr_timertick(0);
__delay(2); /* Wait cycles until timer interrupt injected */
/*
* Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear

View File

@ -132,6 +132,9 @@ static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
* Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
* exiting the guest, so that the next time trap into the guest.
* We don't need to deal with PMU CSRs contexts.
*
* Otherwise set the request bit KVM_REQ_PMU to restore guest PMU
* before entering guest VM
*/
val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
@ -139,16 +142,12 @@ static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
if (!(val & KVM_PMU_EVENT_ENABLED))
vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
else
kvm_make_request(KVM_REQ_PMU, vcpu);
kvm_restore_host_pmu(vcpu);
}
static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
{
if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
kvm_make_request(KVM_REQ_PMU, vcpu);
}
static void kvm_check_pmu(struct kvm_vcpu *vcpu)
{
if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
@ -299,7 +298,10 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
kvm_lose_pmu(vcpu);
if (vcpu->arch.aux_inuse & KVM_LARCH_PMU) {
kvm_lose_pmu(vcpu);
kvm_make_request(KVM_REQ_PMU, vcpu);
}
/* make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
local_irq_enable();
@ -1604,9 +1606,6 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_restore_timer(vcpu);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
/* Restore hardware PMU CSRs */
kvm_restore_pmu(vcpu);
/* Don't bother restoring registers multiple times unless necessary */
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
return 0;

View File

@ -60,7 +60,6 @@ int __ref page_is_ram(unsigned long pfn)
return memblock_is_memory(addr) && !memblock_is_reserved(addr);
}
#ifndef CONFIG_NUMA
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
@ -72,7 +71,6 @@ void __init paging_init(void)
free_area_init(max_zone_pfns);
}
#endif /* !CONFIG_NUMA */
void __ref free_initmem(void)
{

View File

@ -6,7 +6,7 @@
#include <asm/io.h>
#include <asm-generic/early_ioremap.h>
void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size)
void __init __iomem *early_ioremap(phys_addr_t phys_addr, unsigned long size)
{
return ((void __iomem *)TO_CACHE(phys_addr));
}

View File

@ -1542,7 +1542,8 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
release_all_access(stp);
if (stp->st_stateowner)
nfs4_put_stateowner(stp->st_stateowner);
WARN_ON(!list_empty(&stid->sc_cp_list));
if (!list_empty(&stid->sc_cp_list))
nfs4_free_cpntf_statelist(stid->sc_client->net, stid);
kmem_cache_free(stateid_slab, stid);
}
@ -3486,7 +3487,20 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
struct nfsd4_slot *slot = resp->cstate.slot;
unsigned int base;
dprintk("--> %s slot %p\n", __func__, slot);
/*
* RFC 5661 Section 2.10.6.1.2:
*
* Any time SEQUENCE ... returns an error ... [t]he replier MUST NOT
* modify the reply cache entry for the slot whenever an error is
* returned from SEQUENCE ...
*
* Because nfsd4_store_cache_entry is called only by
* nfsd4_sequence_done(), nfsd4_store_cache_entry() is called only
* when a SEQUENCE operation was part of the COMPOUND.
* nfs41_check_op_ordering() ensures SEQUENCE is the first op.
*/
if (resp->opcnt == 1 && resp->cstate.status != nfs_ok)
return;
slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
slot->sl_opcnt = resp->opcnt;
@ -4349,6 +4363,36 @@ static bool replay_matches_cache(struct svc_rqst *rqstp,
return true;
}
/*
* Note that the response is constructed here both for the case
* of a new SEQUENCE request and for a replayed SEQUENCE request.
* We do not cache SEQUENCE responses as SEQUENCE is idempotent.
*/
static void nfsd4_construct_sequence_response(struct nfsd4_session *session,
struct nfsd4_sequence *seq)
{
struct nfs4_client *clp = session->se_client;
seq->maxslots_response = max(session->se_target_maxslots,
seq->maxslots);
seq->target_maxslots = session->se_target_maxslots;
switch (clp->cl_cb_state) {
case NFSD4_CB_DOWN:
seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
break;
case NFSD4_CB_FAULT:
seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
break;
default:
seq->status_flags = 0;
}
if (!list_empty(&clp->cl_revoked))
seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
if (atomic_read(&clp->cl_admin_revoked))
seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED;
}
__be32
nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@ -4398,6 +4442,9 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("%s: slotid %d\n", __func__, seq->slotid);
trace_nfsd_slot_seqid_sequence(clp, seq, slot);
nfsd4_construct_sequence_response(session, seq);
status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags);
if (status == nfserr_replay_cache) {
status = nfserr_seq_misordered;
@ -4495,23 +4542,6 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
out:
seq->maxslots = max(session->se_target_maxslots, seq->maxslots);
seq->target_maxslots = session->se_target_maxslots;
switch (clp->cl_cb_state) {
case NFSD4_CB_DOWN:
seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
break;
case NFSD4_CB_FAULT:
seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
break;
default:
seq->status_flags = 0;
}
if (!list_empty(&clp->cl_revoked))
seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
if (atomic_read(&clp->cl_admin_revoked))
seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED;
trace_nfsd_seq4_status(rqstp, seq);
out_no_session:
if (conn)

View File

@ -5073,7 +5073,7 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfserr;
/* Note slotid's are numbered from zero: */
/* sr_highest_slotid */
nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots - 1);
nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots_response - 1);
if (nfserr != nfs_ok)
return nfserr;
/* sr_target_highest_slotid */
@ -5925,8 +5925,7 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
*/
warn_on_nonidempotent_op(op);
xdr_truncate_encode(xdr, op_status_offset + XDR_UNIT);
}
if (so) {
} else if (so) {
int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
so->so_replay.rp_status = op->status;

View File

@ -458,6 +458,7 @@ enum {
#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
(NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
FATTR4_WORD2_MODE_UMASK | \
FATTR4_WORD2_CLONE_BLKSIZE | \
NFSD4_2_SECURITY_ATTRS | \
FATTR4_WORD2_XATTR_SUPPORT | \
FATTR4_WORD2_TIME_DELEG_ACCESS | \

View File

@ -269,9 +269,6 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
dentry);
}
fhp->fh_dentry = dentry;
fhp->fh_export = exp;
switch (fhp->fh_maxsize) {
case NFS4_FHSIZE:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOATOMIC_ATTR)
@ -293,6 +290,9 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
goto out;
}
fhp->fh_dentry = dentry;
fhp->fh_export = exp;
return 0;
out:
exp_put(exp);

View File

@ -574,8 +574,9 @@ struct nfsd4_sequence {
struct nfs4_sessionid sessionid; /* request/response */
u32 seqid; /* request/response */
u32 slotid; /* request/response */
u32 maxslots; /* request/response */
u32 maxslots; /* request */
u32 cachethis; /* request */
u32 maxslots_response; /* response */
u32 target_maxslots; /* response */
u32 status_flags; /* response */
};

View File

@ -90,7 +90,7 @@
*/
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
#define DMA_BIT_MASK(n) GENMASK_ULL(n - 1, 0)
struct dma_iova_state {
dma_addr_t addr;

View File

@ -27,5 +27,6 @@ struct map_benchmark {
__u32 dma_dir; /* DMA data direction */
__u32 dma_trans_ns; /* time for DMA transmission in ns */
__u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
__u8 expansion[76]; /* For future use */
};
#endif /* _KERNEL_DMA_BENCHMARK_H */

View File

@ -18,10 +18,9 @@ config SUNRPC_SWAP
config RPCSEC_GSS_KRB5
tristate "Secure RPC: Kerberos V mechanism"
depends on SUNRPC
depends on SUNRPC && CRYPTO
default y
select SUNRPC_GSS
select CRYPTO
select CRYPTO_SKCIPHER
select CRYPTO_HASH
help

View File

@ -298,7 +298,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
-fno-inline-functions-called-once -fsanitize=bounds-strict \
-fstrict-flex-arrays=% -fmin-function-alignment=% \
-fzero-init-padding-bits=% -mno-fdpic \
--param=% --param asan-%
--param=% --param asan-% -fno-isolate-erroneous-paths-dereference
# Derived from `scripts/Makefile.clang`.
BINDGEN_TARGET_x86 := x86_64-linux-gnu